text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
@author: AlexBourassa
This is based on old code. It works, but may not be optimal or simple...
"""
from A_Lab.Widgets.GraphWidget.Graph_Widget_Plugin import *
from PyQt4 import QtGui as _gui
from PyQt4 import QtCore as _core
from PyQt4 import uic as _uic
import os as _os
import numpy as _np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as _plt
from A_Lab.Widgets.GraphWidget import Fitter_Eq as fit
class Fitter(Graph_Widget_Plugin):
def __init__(self, parent_graph, **kwargs):
"""
This plugin allows the graph to span a control pannel for fitting a Trace
"""
Graph_Widget_Plugin.__init__(self, parent_graph)
self.menu = self.graph.menu
self.graph = parent_graph
self.kwargs = kwargs
#Build view menu
if not 'Tools' in self.menu:
self.menu['Tools'] = dict()
self.menu['Tools']['_QMenu'] = self.menu['_QMenuBar'].addMenu('Tools')
#Add the action
self.menu['Tools']['New Fitter...'] = _gui.QAction('New Fitter...', self.menu['Tools']['_QMenu'])
self.menu['Tools']['_QMenu'].addAction(self.menu['Tools']['New Fitter...'])
self.menu['Tools']['New Fitter...'].triggered.connect(lambda: self.createNewFitter(**kwargs))
def createNewFitter(self, **kwargs):
#Control Pannel Widget
control_widget = Fitter_Control_Widget(self.graph, **kwargs)
# Try adding it as a docked widget, works if the parent of the graph
# widget was given and is a Module_Container. If it fails simply show
# the widget... (still works but less pretty...)
try: self.graph.parent().requestNewModule.emit("Fitter", control_widget, None)
except: control_widget.show()
class Fitter_Control_Widget(_gui.QWidget):
def __init__(self, parent_graph, **kwargs):
"""
This widget is a control pannel for the fitter
"""
_gui.QWidget.__init__(self)
_uic.loadUi(_os.path.join(_os.path.dirname(__file__),'Fitter.ui'), self)
self.graph = parent_graph
self.kwargs = kwargs
self.fitTraces = dict()
self._fitted_name_association = dict()
self.prepareLatex()
self.generateCtrls()
#Build a quick function to remove an item by name from the signal list
def removeItemByName(name):
for i in range(self.signalSelect.count()):
if self.signalSelect.itemText(i)==name:
self.signalSelect.removeItem(i)
self.signalSelect.removeItemByName = removeItemByName
self.graph.traceAdded[str].connect(lambda x: self.signalSelect.addItem(x))
self.graph.traceRemoved[str].connect(lambda x: self.signalSelect.removeItemByName(x))
def prepareLatex(self):
# Get window background color
bg = self.palette().window().color()
cl = (bg.redF(), bg.greenF(), bg.blueF())
#Get figure and make it the same color as background
self.fig = _plt.figure()
self.latexHolder = FigureCanvas(self.fig)
self.latexHolder.setFixedHeight(60)
self.matplotlib_container.addWidget(self.latexHolder)
#self.fig = self.latexMPLholder.figure
self.fig.set_edgecolor(cl)
self.fig.set_facecolor(cl)
def generateCtrls(self):
#Fill in the signal select input
items = list(self.graph)
self.signalSelect.addItems(items)
self.signalSelect.setCurrentIndex(len(items)-1)
#Link fit buttons
self.fitBtn.clicked.connect(self.fitData)
self.guessBtn.clicked.connect(self.guessP)
#Fill the combobox for fit function
self.fitFct = fit.getAllFitFct()
self.fitFctSelect.addItems(list(self.fitFct.keys()))
self.fitFctSelect.currentIndexChanged.connect(self.generateFitTable)
self.fitFctSelect.setCurrentIndex(list(self.fitFct.keys()).index('Gaussian'))
#Begin/Stop continuous timer
self.timer = _core.QTimer(self)
self.timer.timeout.connect(self.fitData)
self.contFitActive = False
self.start_stop_btn.clicked.connect(self.toogleContFit)
#Fill in the combobox for error method
self.errorMethodSelect.addItems(fit.Generic_Fct.allowedErrorMethods)
self.errorMethodSelect.setCurrentIndex(fit.Generic_Fct.allowedErrorMethods.index('subtract'))
#Link the output btns
self.calcOutBtn.clicked.connect(lambda: self.generateOutputTable())
self.exportOutputAllBtn.clicked.connect(lambda: self.exportOutputToClipboard(All=True))
self.exportOutputValBtn.clicked.connect(lambda: self.exportOutputToClipboard(All=False))
#Dynamically generate the fit variables table
self.generateFitTable()
def generateFitTable(self):
"""
Delete the current table and regenerate a new one based on the current
fonction variables.
"""
#Fetch fct name and variables
fctName = str(self.fitFctSelect.currentText())
fctVars = self.fitFct[fctName].getParamList()
#Update Latex
self.generateLatex()
#Set the size of the table
self.fitVariableTable.setRowCount(len(fctVars))
#Fill in the table
self.fitVarInputs = dict()
ri = 0
for var in fctVars:
#Set variable name in collumn 0
self.fitVariableTable.setCellWidget(ri,0,_gui.QLabel(str(var)))
#Set variable value in collumn 1
varValue = _gui.QDoubleSpinBox()
varValue.setDecimals(16)
varValue.setMaximum(1000000000000000)
varValue.setMinimum(-1000000000000000)
varValue.setValue(1.0)
self.fitVariableTable.setCellWidget(ri,2,varValue)
#Set the variable constant checkbox in collumn 2
varConst = _gui.QCheckBox()
self.fitVariableTable.setCellWidget(ri,1,varConst)
#Remember the widgets
self.fitVarInputs[var] = dict()
self.fitVarInputs[var]['value'] = varValue
self.fitVarInputs[var]['const'] = varConst
#Go to next row
ri += 1
return
def generateOutputTable(self, p=None):
"""
Delete the current table and regenerate a new one based on the current
fonction variables.
"""
#Fetch fct name and variables
fctName = str(self.fitFctSelect.currentText())
if p == None:
#Read p
p = dict()
for var in list(self.fitVarInputs.keys()):
p[var] = self.fitVarInputs[var]['value'].value()
#Calculate output
outputVal = self.fitFct[fctName].calcOutput(p)
if outputVal == None:
#No Output variables defined
self.outputTable.setRowCount(0)
return
else:
self.outputTable.setRowCount(len(outputVal))
#Fill in the table
ri = 0
for val in list(outputVal.keys()):
#Set variable name in collumn 0
self.outputTable.setCellWidget(ri,0,_gui.QLabel(str(val)))
#Set variable value in collumn 1
outputLabel = _gui.QLabel(str(outputVal[val]))
outputLabel.setTextInteractionFlags(_core.Qt.TextSelectableByMouse)
self.outputTable.setCellWidget(ri,1,outputLabel)
#Go to next row
ri += 1
return
#------------------------------------------------------------------------------
# Fit parameters and value
#------------------------------------------------------------------------------
def fitData(self):
"""
Get all the variables and perform the desired fit
"""
#Grab the variables and constants
fctVars = dict()
fctConst = dict()
for var in list(self.fitVarInputs.keys()):
if self.fitVarInputs[var]['const'].isChecked():
fctConst[var] = self.fitVarInputs[var]['value'].value()
else:
fctVars[var] = self.fitVarInputs[var]['value'].value()
#Get the name of the function
fctName = str(self.fitFctSelect.currentText())
#Get the error method
errorMethod = str(self.errorMethodSelect.currentText())
#Get the trace name
trc =str(self.signalSelect.currentText())
#Fit the data
xData, yData = self.graph.getRegionData(trc, transformed = self.transformed_Check.isChecked())
if len(fctVars) != 0:
fitData, p = self.fitFct[fctName].performFit(fctVars, _np.array([xData, yData]), const=fctConst, errorMethod=errorMethod)
else:
p = fctConst
#Gets the full fit data
x = self.graph[trc].getData(transformed = self.transformed_Check.isChecked())[0]
y = self.fitFct[fctName].evaluateFct(p, x)
#Fill the tables
self.setTableValue(p)
self.generateOutputTable(p)
#If Add the fit data if not present and update it if present
if not trc in self._fitted_name_association:
trace = self.graph.addTrace(trc+' Fit', **self.kwargs)
self._fitted_name_association[trc] = fit_trc_name = trace.name
self.fitTraces[fit_trc_name] = trace
else:
fit_trc_name = self._fitted_name_association[trc]
self.fitTraces[fit_trc_name].setData(x,y)
#Change transform if necessary
if self.transformed_Check.isChecked():
def f(x,y): return x,y
self.fitTraces[fit_trc_name].setTransform(f, transform_name = 'NoTransform')
else:
self.fitTraces[fit_trc_name].setTransform(self.graph[trc].transform, self.graph[trc].transform_name)
def setTableValue(self, p):
"""
Set all variables in the table to their associated dictionnary value
"""
for var in list(p.keys()):
if var in self.fitVarInputs:
self.fitVarInputs[var]['value'].setValue(p[var])
def generateLatex(self):
"""
Generate and show the latex equation
"""
# Clear figure
self.fig.clf()
#Fetch fct name and variables
fctName = str(self.fitFctSelect.currentText())
#Update Latex
latex = self.fitFct[fctName].getLatex()
# Set figure title
self.fig.suptitle(latex,
y = 0.5,
x = 0.5,
horizontalalignment='center',
verticalalignment='center',
size = 18)
self.latexHolder.draw()
#self.latexMPLholder.draw()
def guessP(self):
"""
Guess and set the parameters
"""
#Fetch fct name and variables
fctName = str(self.fitFctSelect.currentText())
#Data
xData, yData = self.graph.getRegionData(self.signalSelect.currentText(), transformed = self.transformed_Check.isChecked())
#Guess the parametters
p = self.fitFct[fctName].guessP(xData, yData)
if p != None:
#Set the parametters
self.setTableValue(p)
else:
print("No Guessing algorithm defined for this function!")
#------------------------------------------------------------------------------
# Other HELPER methods
#------------------------------------------------------------------------------
def exportOutputToClipboard(self, All=True):
app = _core.QCoreApplication.instance()
exportStr = ''
for i in range(self.outputTable.rowCount()):
if All:
exportStr += str(self.outputTable.cellWidget(i, 0).text())
exportStr += '\t'
exportStr += str(self.outputTable.cellWidget(i, 1).text())
exportStr += '\n'
app.clipboard().setText(exportStr)
def toogleContFit(self):
if self.contFitActive:
self.timer.stop()
self.start_stop_btn.setText("Start")
else:
self.timer.start(self.contFitTime.value()*1000.)
self.start_stop_btn.setText("Stop")
self.contFitActive = not self.contFitActive
def _closeEvent(self, event):
self.timer.stop()
for trc in self.fitTraces:
if trc in self.graph: self.graph.removeTrace(trc)
self.parent().requestSelfDestroy.emit()
def loadSettings(self, settingsObj = None, **kwargs):
print('Loading Fitter...')
if type(settingsObj) != _core.QSettings:
print("No QSetting object was provided")
else:
self.restoreGeometry(settingsObj.value('Geometry'))
lastFunctionUsed = str(settingsObj.value('FunctionName'))
self.fitFctSelect.setCurrentIndex(list(self.fitFct.keys()).index(lastFunctionUsed))
return
def saveSettings(self, settingsObj = None, **kwargs):
print('Saving Fitter...')
if type(settingsObj) != _core.QSettings:
print("No QSetting object was provided")
else:
settingsObj.setValue('Geometry', self.saveGeometry())
settingsObj.setValue('FunctionName', str(self.fitFctSelect.currentText()))
return
| AlexBourassa/A-Lab | A_Lab/Widgets/GraphWidget/Fitter.py | Python | gpl-2.0 | 14,054 | [
"Gaussian"
] | 61dba5114ce43af0520700da49178dfd1df7f6354045bced83af1f46a467447a |
"""
View for Courseware Index
"""
# pylint: disable=attribute-defined-outside-init
import logging
import urllib
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.http import Http404
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import View
from opaque_keys.edx.keys import CourseKey
from web_fragments.fragment import Fragment
from edxmako.shortcuts import render_to_response, render_to_string
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from lms.djangoapps.experiments.utils import get_experiment_user_metadata_context
from lms.djangoapps.gating.api import get_entrance_exam_score_ratio, get_entrance_exam_usage_key
from lms.djangoapps.grades.new.course_grade_factory import CourseGradeFactory
from openedx.core.djangoapps.crawlers.models import CrawlersConfig
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.monitoring_utils import set_custom_metrics_for_course_key
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference
from openedx.core.djangoapps.waffle_utils import WaffleSwitchNamespace
from openedx.features.course_experience import COURSE_OUTLINE_PAGE_FLAG, default_course_url_name
from openedx.features.course_experience.views.course_sock import CourseSockFragmentView
from openedx.features.enterprise_support.api import data_sharing_consent_required
from shoppingcart.models import CourseRegistrationCode
from student.views import is_course_blocked
from student.models import CourseEnrollment
from util.views import ensure_valid_course_key
from xmodule.modulestore.django import modulestore
from xmodule.x_module import STUDENT_VIEW
from ..access import has_access
from ..access_utils import in_preview_mode, check_course_open_for_learner
from ..courses import get_course_with_access, get_current_child, get_studio_url
from ..entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
user_can_skip_entrance_exam,
user_has_passed_entrance_exam
)
from ..masquerade import setup_masquerade
from ..model_data import FieldDataCache
from ..module_render import get_module_for_descriptor, toc_for_course
from .views import (
CourseTabView,
)
log = logging.getLogger("edx.courseware.views.index")
TEMPLATE_IMPORTS = {'urllib': urllib}
CONTENT_DEPTH = 2
class CoursewareIndex(View):
"""
View class for the Courseware page.
"""
@method_decorator(login_required)
@method_decorator(ensure_csrf_cookie)
@method_decorator(cache_control(no_cache=True, no_store=True, must_revalidate=True))
@method_decorator(ensure_valid_course_key)
@method_decorator(data_sharing_consent_required)
def get(self, request, course_id, chapter=None, section=None, position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right
chapter.
If neither chapter or section are specified, displays the user's most
recent chapter, or the first chapter if this is the user's first visit.
Arguments:
request: HTTP request
course_id (unicode): course id
chapter (unicode): chapter url_name
section (unicode): section url_name
position (unicode): position in module, eg of <sequential> module
"""
self.course_key = CourseKey.from_string(course_id)
self.request = request
self.original_chapter_url_name = chapter
self.original_section_url_name = section
self.chapter_url_name = chapter
self.section_url_name = section
self.position = position
self.chapter, self.section = None, None
self.course = None
self.url = request.path
try:
set_custom_metrics_for_course_key(self.course_key)
self._clean_position()
with modulestore().bulk_operations(self.course_key):
self.course = get_course_with_access(
request.user, 'load', self.course_key,
depth=CONTENT_DEPTH,
check_if_enrolled=True,
)
self.is_staff = has_access(request.user, 'staff', self.course)
self._setup_masquerade_for_effective_user()
return self._get(request)
except Exception as exception: # pylint: disable=broad-except
return CourseTabView.handle_exceptions(request, self.course, exception)
def _setup_masquerade_for_effective_user(self):
"""
Setup the masquerade information to allow the request to
be processed for the requested effective user.
"""
self.real_user = self.request.user
self.masquerade, self.effective_user = setup_masquerade(
self.request,
self.course_key,
self.is_staff,
reset_masquerade_data=True
)
# Set the user in the request to the effective user.
self.request.user = self.effective_user
def _get(self, request):
"""
Render the index page.
"""
self._redirect_if_needed_to_pay_for_course()
self._prefetch_and_bind_course(request)
if self.course.has_children_at_depth(CONTENT_DEPTH):
self._reset_section_to_exam_if_required()
self.chapter = self._find_chapter()
self.section = self._find_section()
if self.chapter and self.section:
self._redirect_if_not_requested_section()
self._save_positions()
self._prefetch_and_bind_section()
return render_to_response('courseware/courseware.html', self._create_courseware_context(request))
def _redirect_if_not_requested_section(self):
"""
If the resulting section and chapter are different from what was initially
requested, redirect back to the index page, but with an updated URL that includes
the correct section and chapter values. We do this so that our analytics events
and error logs have the appropriate URLs.
"""
if (
self.chapter.url_name != self.original_chapter_url_name or
(self.original_section_url_name and self.section.url_name != self.original_section_url_name)
):
raise CourseAccessRedirect(
reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course_key),
'chapter': self.chapter.url_name,
'section': self.section.url_name,
},
)
)
def _clean_position(self):
"""
Verify that the given position is an integer. If it is not positive, set it to 1.
"""
if self.position is not None:
try:
self.position = max(int(self.position), 1)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(self.position))
def _redirect_if_needed_to_pay_for_course(self):
"""
Redirect to dashboard if the course is blocked due to non-payment.
"""
self.real_user = User.objects.prefetch_related("groups").get(id=self.real_user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=self.course_key,
registrationcoderedemption__redeemed_by=self.real_user
)
if is_course_blocked(self.request, redeemed_registration_codes, self.course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
self.real_user,
unicode(self.course_key),
)
raise CourseAccessRedirect(reverse('dashboard'))
def _reset_section_to_exam_if_required(self):
"""
Check to see if an Entrance Exam is required for the user.
"""
if not user_can_skip_entrance_exam(self.effective_user, self.course):
exam_chapter = get_entrance_exam_content(self.effective_user, self.course)
if exam_chapter and exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
self.chapter_url_name = exam_chapter.url_name
self.section_url_name = exam_section.url_name
def _get_language_preference(self):
"""
Returns the preferred language for the actual user making the request.
"""
language_preference = get_user_preference(self.real_user, LANGUAGE_KEY)
if not language_preference:
language_preference = settings.LANGUAGE_CODE
return language_preference
def _is_masquerading_as_student(self):
"""
Returns whether the current request is masquerading as a student.
"""
return self.masquerade and self.masquerade.role == 'student'
def _is_masquerading_as_specific_student(self):
"""
Returns whether the current request is masqueurading as a specific student.
"""
return self._is_masquerading_as_student() and self.masquerade.user_name
def _find_block(self, parent, url_name, block_type, min_depth=None):
"""
Finds the block in the parent with the specified url_name.
If not found, calls get_current_child on the parent.
"""
child = None
if url_name:
child = parent.get_child_by(lambda m: m.location.name == url_name)
if not child:
# User may be trying to access a child that isn't live yet
if not self._is_masquerading_as_student():
raise Http404('No {block_type} found with name {url_name}'.format(
block_type=block_type,
url_name=url_name,
))
elif min_depth and not child.has_children_at_depth(min_depth - 1):
child = None
if not child:
child = get_current_child(parent, min_depth=min_depth, requested_child=self.request.GET.get("child"))
return child
def _find_chapter(self):
"""
Finds the requested chapter.
"""
return self._find_block(self.course, self.chapter_url_name, 'chapter', CONTENT_DEPTH - 1)
def _find_section(self):
"""
Finds the requested section.
"""
if self.chapter:
return self._find_block(self.chapter, self.section_url_name, 'section')
def _prefetch_and_bind_course(self, request):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key,
self.effective_user,
self.course,
depth=CONTENT_DEPTH,
read_only=CrawlersConfig.is_crawler(request),
)
self.course = get_module_for_descriptor(
self.effective_user,
self.request,
self.course,
self.field_data_cache,
self.course_key,
course=self.course,
)
def _prefetch_and_bind_section(self):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
# Pre-fetch all descendant data
self.section = modulestore().get_item(self.section.location, depth=None, lazy=False)
self.field_data_cache.add_descriptor_descendents(self.section, depth=None)
# Bind section to user
self.section = get_module_for_descriptor(
self.effective_user,
self.request,
self.section,
self.field_data_cache,
self.course_key,
self.position,
course=self.course,
)
def _save_positions(self):
"""
Save where we are in the course and chapter.
"""
save_child_position(self.course, self.chapter_url_name)
save_child_position(self.chapter, self.section_url_name)
def _create_courseware_context(self, request):
"""
Returns and creates the rendering context for the courseware.
Also returns the table of contents for the courseware.
"""
course_url_name = default_course_url_name(self.course.id)
course_url = reverse(course_url_name, kwargs={'course_id': unicode(self.course.id)})
courseware_context = {
'csrf': csrf(self.request)['csrf_token'],
'course': self.course,
'course_url': course_url,
'chapter': self.chapter,
'section': self.section,
'init': '',
'fragment': Fragment(),
'staff_access': self.is_staff,
'masquerade': self.masquerade,
'supports_preview_menu': True,
'studio_url': get_studio_url(self.course, 'course'),
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
'bookmarks_api_url': reverse('bookmarks'),
'language_preference': self._get_language_preference(),
'disable_optimizely': not WaffleSwitchNamespace('RET').is_enabled('enable_optimizely_in_courseware'),
'section_title': None,
'sequence_title': None,
'disable_accordion': COURSE_OUTLINE_PAGE_FLAG.is_enabled(self.course.id),
}
courseware_context.update(
get_experiment_user_metadata_context(
self.course,
self.effective_user,
)
)
table_of_contents = toc_for_course(
self.effective_user,
self.request,
self.course,
self.chapter_url_name,
self.section_url_name,
self.field_data_cache,
)
courseware_context['accordion'] = render_accordion(
self.request,
self.course,
table_of_contents['chapters'],
)
courseware_context['course_sock_fragment'] = CourseSockFragmentView().render_to_fragment(
request, course=self.course)
# entrance exam data
self._add_entrance_exam_to_context(courseware_context)
# staff masquerading data
if not check_course_open_for_learner(self.effective_user, self.course):
# Disable student view button if user is staff and
# course is not yet visible to students.
courseware_context['disable_student_access'] = True
courseware_context['supports_preview_menu'] = False
if self.section:
# chromeless data
if self.section.chrome:
chrome = [s.strip() for s in self.section.chrome.lower().split(",")]
if 'accordion' not in chrome:
courseware_context['disable_accordion'] = True
if 'tabs' not in chrome:
courseware_context['disable_tabs'] = True
# default tab
if self.section.default_tab:
courseware_context['default_tab'] = self.section.default_tab
# section data
courseware_context['section_title'] = self.section.display_name_with_default
section_context = self._create_section_context(
table_of_contents['previous_of_active_section'],
table_of_contents['next_of_active_section'],
)
courseware_context['fragment'] = self.section.render(STUDENT_VIEW, section_context)
if self.section.position and self.section.has_children:
display_items = self.section.get_display_items()
if display_items:
try:
courseware_context['sequence_title'] = display_items[self.section.position - 1] \
.display_name_with_default
except IndexError:
log.exception(
"IndexError loading courseware for user %s, course %s, section %s, position %d. Total items: %d. URL: %s",
self.real_user.username,
self.course.id,
self.section.display_name_with_default,
self.section.position,
len(display_items),
self.url,
)
raise
return courseware_context
def _add_entrance_exam_to_context(self, courseware_context):
"""
Adds entrance exam related information to the given context.
"""
if course_has_entrance_exam(self.course) and getattr(self.chapter, 'is_entrance_exam', False):
courseware_context['entrance_exam_passed'] = user_has_passed_entrance_exam(self.effective_user, self.course)
courseware_context['entrance_exam_current_score'] = get_entrance_exam_score_ratio(
CourseGradeFactory().create(self.effective_user, self.course),
get_entrance_exam_usage_key(self.course),
)
def _create_section_context(self, previous_of_active_section, next_of_active_section):
"""
Returns and creates the rendering context for the section.
"""
def _compute_section_url(section_info, requested_child):
"""
Returns the section URL for the given section_info with the given child parameter.
"""
return "{url}?child={requested_child}".format(
url=reverse(
'courseware_section',
args=[unicode(self.course_key), section_info['chapter_url_name'], section_info['url_name']],
),
requested_child=requested_child,
)
section_context = {
'activate_block_id': self.request.GET.get('activate_block_id'),
'requested_child': self.request.GET.get("child"),
'progress_url': reverse('progress', kwargs={'course_id': unicode(self.course_key)}),
}
if previous_of_active_section:
section_context['prev_url'] = _compute_section_url(previous_of_active_section, 'last')
if next_of_active_section:
section_context['next_url'] = _compute_section_url(next_of_active_section, 'first')
# sections can hide data that masquerading staff should see when debugging issues with specific students
section_context['specific_masquerade'] = self._is_masquerading_as_specific_student()
return section_context
def render_accordion(request, course, table_of_contents):
"""
Returns the HTML that renders the navigation for the given course.
Expects the table_of_contents to have data on each chapter and section,
including which ones are active.
"""
context = dict(
[
('toc', table_of_contents),
('course_id', unicode(course.id)),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format),
] + TEMPLATE_IMPORTS.items()
)
return render_to_string('courseware/accordion.html', context)
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, child in enumerate(seq_module.get_display_items(), start=1):
if child.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(
user,
request,
parent_descriptor,
field_data_cache,
current_module.location.course_key,
course=course
)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
| Lektorium-LLC/edx-platform | lms/djangoapps/courseware/views/index.py | Python | agpl-3.0 | 21,509 | [
"VisIt"
] | 5bd3fc9ad0d2eea43ba2432e1a87f52aecea928f89b3bb1a435aed2badd2d92d |
#!/usr/bin/env python
'''
Parse logged data from BEDs .EVT or .WAT file to compute translations in x, y, z and rotations
about these axes. Output as NetCDF with the intention of of loading into a STOQS database.
The resulting Netcdf file will be of featureType timeSeries for stationary events (the default).
If the --trajectory option is given then a NetCDF file with featureType of trajectory will be
created using the path in the file specified with the --trajectory option.
--
Mike McCann
18 July 2013
$Id: bed2netcdf.py 13838 2019-08-12 22:59:15Z mccann $
'''
import os
import sys
import csv
import math
import coards
import datetime
import numpy as np
import numpy.ma as ma
from BEDS import BEDS, bcal, NoPressureData
from scipy.interpolate import interp1d
from seawater import eos80
from netCDF4 import Dataset
class BEDS_NetCDF(BEDS):
def __init__(self):
'''
Initialize with options
'''
return super(BEDS_NetCDF, self).__init__()
def createNetCDFfromFile(self):
'''
Read data from EVT or WAT file and apply operations to convert it to data with units that
are then written to a NetCDF file.
'''
# Suppress 'util.py:70: RuntimeWarning: invalid value encountered in sqrt'
np.seterr(all='ignore')
if self.args.trajectory and self.args.beg_depth and self.args.end_depth:
print("Extracting thalweg data between command line specified depths {} and {}".format(self.args.beg_depth, self.args.end_depth))
self.readTrajectory(self.args.beg_depth, self.args.end_depth)
for fileName in self.inputFileNames:
# Make sure input file is openable
print('Input fileName = ', fileName)
try:
with open(fileName):
pass
except IOError:
raise Exception('Cannot open input file %s' % fileName)
if self.sensorType == 'Invensense':
try:
if self.args.read_csv:
self.readBEDs_csv_File(fileName)
else:
self.readBEDsFile(fileName)
self.processAccelerations()
self.processRotations(useMatlabCode=False)
except NoPressureData as e:
print(str(e))
continue
else:
raise Exception("No handler for sensorType = %s" % self.sensorType)
if not hasattr(self, 's2013'):
print('Could not read time (s2013) from input file(s)')
exit(-1)
if self.args.seconds_offset:
self.s2013 = self.s2013 + self.args.seconds_offset
self.ps2013 = self.ps2013 + self.args.seconds_offset
if self.args.output:
self.outFile = self.args.output
elif len(self.inputFileNames) == 1:
self.outFile = self.inputFileNames[0].split('.')[0]
if '.EVT' in self.inputFileNames[0]:
self.outFile += '_full'
elif '.E00' in self.inputFileNames[0]:
self.outFile += '_decim'
if self.args.trajectory:
self.outFile += '_traj'
self.outFile += '.nc'
else:
raise Exception("Must specify --output if more than one input file.")
if self.args.trajectory and self.args.bed_name:
# Expect we have well-calibrated and tide-corrected depths in bed_depth[] array
print("Extracting thalweg data between depths {} and {}".format(self.bed_depth[0], self.bed_depth[-1]))
self.readTrajectory(self.bed_depth[0], self.bed_depth[-1])
if (not self.traj_lat or not self.traj_lon) and self.args.trajectory:
raise Exception('Could not exctract trajectory between {} and {}.'
' Consider processing as a stationary event.'.format(
self.bed_depth[0], self.bed_depth[-1]))
if self.args.trajectory:
self.featureType = 'trajectory'
# Interpolate data to regularly spaced time values - may need to do this to improve accuracy
# (See http://www.freescale.com/files/sensors/doc/app_note/AN3397.pdf)
##si = linspace(self.s2013[0], self.s2013[-1], len(self.s2013))
##axi = interp(si, self.s2013, self.ax)
# TODO: Review this calculation - may need to rotate these to the absolute (not rotating) frame
# Double integrate accelerations to get position and construct X3D position values string
# (May need to high-pass filter the data to remove noise that can give unreasonably large positions.)
##x = self.cumtrapz(self.s2013, self.cumtrapz(self.s2013, self.ax))
##y = self.cumtrapz(self.s2013, self.cumtrapz(self.s2013, self.ay))
##z = self.cumtrapz(self.s2013, self.cumtrapz(self.s2013, self.az))
dateCreated = datetime.datetime.now().strftime("%d %B %Y %H:%M:%S")
yearCreated = datetime.datetime.now().strftime("%Y")
# Create the NetCDF file
self.ncFile = Dataset(self.outFile, 'w')
# Time dimensions for both trajectory and timeSeries datasets - IMU and pressure have different times
self.ncFile.createDimension('time', len(self.s2013))
self.time = self.ncFile.createVariable('time', 'float64', ('time',))
self.time.standard_name = 'time'
self.time.long_name = 'Time(GMT)'
self.time.units = 'seconds since 2013-01-01 00:00:00'
self.time[:] = self.s2013
if self.featureType == 'timeseries':
self.ncFile.createDimension('ptime', len(self.ps2013))
self.ptime = self.ncFile.createVariable('ptime', 'float64', ('ptime',))
self.ptime.standard_name = 'time'
self.ptime.long_name = 'Time(GMT)'
self.ptime.units = 'seconds since 2013-01-01 00:00:00'
self.ptime[:] = self.ps2013
# Save with COARDS compliant station (singleton) coordinates
self.ncFile.createDimension('latitude', 1)
self.latitude = self.ncFile.createVariable('latitude', 'float64', ('latitude',))
self.latitude.long_name = 'LATITUDE'
self.latitude.standard_name = 'latitude'
self.latitude.units = 'degree_north'
self.latitude[0] = self.lat
self.ncFile.createDimension('longitude', 1)
self.longitude = self.ncFile.createVariable('longitude', 'float64', ('longitude',))
self.longitude.long_name = 'LONGITUDE'
self.longitude.standard_name = 'longitude'
self.longitude.units = 'degree_east'
self.longitude[0] = self.lon
self.ncFile.createDimension('depth', 1)
self.depth = self.ncFile.createVariable('depth', 'float64', ('depth',))
self.depth.long_name = 'depth'
self.depth.standard_name = 'depth'
self.depth.comment = 'Value provided on bed2netcdf.py command line'
self.depth.units = 'm'
self.depth[0] = self.dpth
# Record Variable - Pressure and Depth
pr = self.ncFile.createVariable('PRESS', 'float64', ('ptime', 'depth', 'latitude', 'longitude'))
pr.long_name = 'External Instrument Pressure'
pr.coordinates = 'ptime depth latitude longitude'
pr.units = 'bar'
pr[:] = self.pr.reshape(len(self.pr), 1, 1, 1)
bd = self.ncFile.createVariable('BED_DEPTH', 'float64', ('ptime', 'depth', 'latitude', 'longitude'))
bd.long_name = 'Depth of BED'
bd.coordinates = 'ptime depth latitude longitude'
bd.comment = self.bed_depth_comment
bd.units = 'm'
bd[:] = self.bed_depth.reshape(len(self.bed_depth), 1, 1, 1)
bdi = self.ncFile.createVariable('BED_DEPTH_LI', 'float64', ('time', 'depth', 'latitude', 'longitude'))
bdi.long_name = 'Depth of BED - Linerarly Interpolated to IMU Samples'
bdi.coordinates = 'time depth latitude longitude'
bdi.comment = self.bed_depth_comment
bdi.units = 'm'
bdi[:] = np.interp(self.s2013, self.ps2013, bd[:].reshape(len(self.pr))).reshape(len(self.s2013), 1, 1, 1)
# Record Variables - Accelerations
xa = self.ncFile.createVariable('XA', 'float64', ('time', 'depth', 'latitude', 'longitude'))
xa.long_name = 'Acceleration along X-axis'
xa.coordinates = 'time depth latitude longitude'
xa.units = 'g'
xa[:] = self.ax.reshape(len(self.ax), 1, 1, 1)
ya = self.ncFile.createVariable('YA', 'float64', ('time', 'depth', 'latitude', 'longitude'))
ya.long_name = 'Acceleration along Y-axis'
ya.coordinates = 'time depth latitude longitude'
ya.units = 'g'
ya[:] = self.ay.reshape(len(self.ay), 1, 1, 1)
za = self.ncFile.createVariable('ZA', 'float64', ('time', 'depth', 'latitude', 'longitude'))
za.long_name = 'Acceleration along X-axis'
za.coordinates = 'time depth latitude longitude'
za.units = 'g'
za[:] = self.az.reshape(len(self.az), 1, 1, 1)
a = self.ncFile.createVariable('A', 'float64', ('time', 'depth', 'latitude', 'longitude'))
a.long_name = 'Acceleration Magnitude'
a.coordinates = 'time depth latitude longitude'
a.units = 'g'
a[:] = self.a.reshape(len(self.a), 1, 1, 1)
# Record Variables - Rotations
# Nose of model points to -Z (north) and Up is +Y
xr = self.ncFile.createVariable('XR', 'float64', ('time', 'depth', 'latitude', 'longitude'))
xr.long_name = 'Rotation about X-axis'
xr.coordinates = 'time depth latitude longitude'
xr.units = 'degree'
xr.standard_name = 'platform_pitch_angle'
xr[:] = (self.rx * 180 / np.pi).reshape(len(self.rx), 1, 1, 1)
yr = self.ncFile.createVariable('YR', 'float64', ('time', 'depth', 'latitude', 'longitude'))
yr.long_name = 'Rotation about Y-axis'
yr.coordinates = 'time depth latitude longitude'
yr.units = 'degree'
yr.standard_name = 'platform_yaw_angle'
yr[:] = (self.ry * 180 / np.pi).reshape(len(self.ry), 1, 1, 1)
zr = self.ncFile.createVariable('ZR', 'float64', ('time', 'depth', 'latitude', 'longitude'))
zr.long_name = 'Rotation about Z-axis'
zr.coordinates = 'time depth latitude longitude'
zr.units = 'degree'
zr.standard_name = 'platform_roll_angle'
zr[:] = (self.rz * 180 / np.pi).reshape(len(self.rz), 1, 1, 1)
axis_x = self.ncFile.createVariable('AXIS_X', 'float64', ('time', 'depth', 'latitude', 'longitude'))
axis_x.long_name = 'X-component of axis in axis-angle form of quaternion measurement'
axis_x.comment = self.p_angle_axis_comment
axis_x.coordinates = 'time depth latitude longitude'
axis_x.units = ''
axis_x[:] = self.px.reshape(len(self.px), 1, 1, 1)
axis_y = self.ncFile.createVariable('AXIS_Y', 'float64', ('time', 'depth', 'latitude', 'longitude'))
axis_y.long_name = 'Y-component of axis in axis-angle form of quaternion measurement'
axis_y.comment = self.p_angle_axis_comment
axis_y.coordinates = 'time depth latitude longitude'
axis_y.units = ''
axis_y[:] = self.py.reshape(len(self.py), 1, 1, 1)
axis_z = self.ncFile.createVariable('AXIS_Z', 'float64', ('time', 'depth', 'latitude', 'longitude'))
axis_z.long_name = 'Z-component of axis in axis-angle form of quaternion measurement'
axis_z.comment = self.p_angle_axis_comment
axis_z.coordinates = 'time depth latitude longitude'
axis_z.units = ''
axis_z[:] = self.pz.reshape(len(self.pz), 1, 1, 1)
angle = self.ncFile.createVariable('ANGLE', 'float64', ('time', 'depth', 'latitude', 'longitude'))
angle.long_name = 'Angle rotated about axis in axis-angle form of quaternion measurement'
angle.comment = self.p_angle_axis_comment
angle.coordinates = 'time depth latitude longitude'
angle.units = 'radian'
angle[:] = self.angle.reshape(len(self.angle), 1, 1, 1)
angle_rate = self.ncFile.createVariable('ANGLE_RATE', 'float64', ('time', 'depth', 'latitude', 'longitude'))
angle_rate.long_name = 'Absolute rate of ANGLE change'
angle_rate.comment = self.angle_rate_comment
angle_rate.coordinates = 'time depth latitude longitude'
angle_rate.units = 'degree/second'
angle_rate[:] = self.angle_rate.reshape(len(self.angle_rate), 1, 1, 1)
angle_count = self.ncFile.createVariable('ANGLE_COUNT', 'float64', ('time', 'depth', 'latitude', 'longitude'))
angle_count.long_name = 'Absolute complete rotation count from ANGLE data'
angle_count.comment = self.angle_count_comment
angle_count.coordinates = 'time depth latitude longitude'
angle_count.units = ''
angle_count[:] = self.angle_count.reshape(len(self.angle_count), 1, 1, 1)
# Axis about which platform is rotating - derived from dividing quaternions
rot_x = self.ncFile.createVariable('ROT_X', 'float64', ('time', 'depth', 'latitude', 'longitude'))
rot_x.long_name = 'X-component of axis about which the BED is rotating from one time step to the next'
rot_x.comment = self.m_angle_axis_comment
rot_x.coordinates = 'time depth latitude longitude'
rot_x.units = ''
rot_x[:] = self.mx.reshape(len(self.mx), 1, 1, 1)
rot_y = self.ncFile.createVariable('ROT_Y', 'float64', ('time', 'depth', 'latitude', 'longitude'))
rot_y.long_name = 'Y-component of axis about which the BED is rotating from one time step to the next'
rot_y.comment = self.m_angle_axis_comment
rot_y.coordinates = 'time depth latitude longitude'
rot_y.units = ''
rot_y[:] = self.my.reshape(len(self.my), 1, 1, 1)
rot_z = self.ncFile.createVariable('ROT_Z', 'float64', ('time', 'depth', 'latitude', 'longitude'))
rot_z.long_name = 'Z-component of axis about which the BED is rotating from one time step to the next'
rot_z.comment = self.m_angle_axis_comment
rot_z.coordinates = 'time depth latitude longitude'
rot_z.units = ''
rot_z[:] = self.mz.reshape(len(self.mz), 1, 1, 1)
rot_rate = self.ncFile.createVariable('ROT_RATE', 'float64', ('time', 'depth', 'latitude', 'longitude'))
rot_rate.long_name = 'Instantaneous rotation rate around axis about which the BED is rotating'
rot_rate.comment = self.m_angle_axis_comment + ' and then angle / dt'
rot_rate.coordinates = 'time depth latitude longitude'
rot_rate.units = 'degree/second'
rot_rate[:] = self.rotrate.reshape(len(self.rotrate), 1, 1, 1)
rot_count = self.ncFile.createVariable('ROT_COUNT', 'float64', ('time', 'depth', 'latitude', 'longitude'))
rot_count.long_name = 'Rotation Count - Cumulative Sum of ROT_RATE * dt / 360 deg'
rot_count.coordinates = 'time depth latitude longitude'
rot_count.units = ''
rot_count[:] = (self.rotcount).reshape(len(self.rotcount), 1, 1, 1)
# Pressure sensor data interpolated to IMU samples
p = self.ncFile.createVariable('P', 'float64', ('time','depth', 'latitude', 'longitude'))
p.long_name = 'Pressure'
p.coordinates = 'time depth latitude longitude'
p.units = 'dbar'
pres = np.interp(self.s2013, self.ps2013, self.pr)
p[:] = pres.reshape(len(pres), 1, 1, 1)
# Tumble rate & count
tumble_rate = self.ncFile.createVariable('TUMBLE_RATE', 'float64', ('time', 'depth', 'latitude', 'longitude'))
tumble_rate.long_name = "Angular rate of change of BED's axis of rotation"
tumble_rate.comment = 'Computed with: abs(last_vec.angle(vec)), where vec is the division of 2 successive quaternion measurements and last_vec is the previous vec'
tumble_rate.coordinates = 'time depth latitude longitude'
tumble_rate.units = 'degree/second'
tumble_rate[:] = self.tumblerate.reshape(len(self.tumblerate), 1, 1, 1)
tumble_count = self.ncFile.createVariable('TUMBLE_COUNT', 'float64', ('time', 'depth', 'latitude', 'longitude'))
tumble_count.long_name = 'Tumble Count - Cumulative Sum of TUMBLE_RATE * dt / 360 deg'
tumble_count.comment = 'Computed with: np.cumsum(np.absolute(self.difftumble)) / 2. / np.pi'
tumble_count.coordinates = 'time depth latitude longitude'
tumble_count[:] = self.tumblecount.reshape(len(self.tumblecount), 1, 1, 1)
if hasattr(self, 'bed_depth_csi_comment'):
# Spline interpolated bed_depth
bed_depth_csi = self.ncFile.createVariable('BED_DEPTH_CSI', 'float64', ('time', 'depth', 'latitude', 'longitude'), fill_value=1.e20)
bed_depth_csi.long_name = 'Depth of BED - Cubic Spline Interpolated to IMU Samples'
bed_depth_csi.units = 'm'
bed_depth_csi.coordinates = 'time depth latitude longitude'
bed_depth_csi.comment = self.bed_depth_csi_comment
bed_depth_csi[ma.clump_unmasked(self.p_mask)] = self.bed_depth_inside_spline
if not self.args.no_tide_removal:
# Tide data from OSTP Software calculation
tide = self.ncFile.createVariable('TIDE', 'float64', ('ptime', 'depth', 'latitude', 'longitude'))
tide.long_name = 'OSTP2 Tide model height'
tide.coordinates = 'ptime depth latitude longitude'
tide.comment = self.tide_comment
tide.units = 'm'
tide[:] = self.tide.reshape(len(self.tide), 1, 1, 1)
elif self.featureType == 'trajectory':
ifmt = '{var} linearly intepolated onto thalweg data from file {traj_file} using formula {formula}'
print("Writing trajectory data")
# Coordinate variables for trajectory
# Interpolate trajectory lat and lon onto the times of the data
self.latitude = self.ncFile.createVariable('latitude', 'float64', ('time',))
self.latitude.long_name = 'LATITUDE'
self.latitude.standard_name = 'latitude'
self.latitude.units = 'degree_north'
self.latitude.comment = ifmt.format(var='Latitude', traj_file=self.args.trajectory, formula=
'np.interp(np.linspace(0,1,len(self.s2013)), np.linspace(0,1,len(self.traj_lat)), self.traj_lat)')
self.latitude[:] = np.interp(np.linspace(0,1,len(self.s2013)), np.linspace(0,1,len(self.traj_lat)), self.traj_lat)
self.longitude = self.ncFile.createVariable('longitude', 'float64', ('time',))
self.longitude.long_name = 'LONGITUDE'
self.longitude.standard_name = 'longitude'
self.longitude.units = 'degree_east'
self.longitude.comment = ifmt.format(var='Longitude', traj_file=self.args.trajectory, formula=
'np.interp(np.linspace(0,1,len(self.s2013)), np.linspace(0,1,len(self.traj_lon)), self.traj_lon)')
self.longitude[:] = np.interp(np.linspace(0,1,len(self.s2013)), np.linspace(0,1,len(self.traj_lon)), self.traj_lon)
self.depth = self.ncFile.createVariable('depth', 'float64', ('time',))
self.depth.long_name = 'DEPTH'
self.depth.standard_name = 'depth'
self.depth.units = 'm'
self.depth.comment = "{} Linearly interpolated to IMU samples.".format(self.bed_depth_comment)
self.depth[:] = np.interp(self.s2013, self.ps2013, self.bed_depth)
# Record Variables - Accelerations
xa = self.ncFile.createVariable('XA', 'float64', ('time',))
xa.long_name = 'Acceleration along X-axis'
xa.comment = 'Recorded by instrument'
xa.coordinates = 'time depth latitude longitude'
xa.units = 'g'
xa[:] = self.ax
ya = self.ncFile.createVariable('YA', 'float64', ('time',))
ya.long_name = 'Acceleration along Y-axis'
ya.comment = 'Recorded by instrument'
ya.coordinates = 'time depth latitude longitude'
ya.units = 'g'
ya[:] = self.ay
za = self.ncFile.createVariable('ZA', 'float64', ('time',))
za.long_name = 'Acceleration along X-axis'
za.comment = 'Recorded by instrument'
za.coordinates = 'time depth latitude longitude'
za.units = 'g'
za[:] = self.az
a = self.ncFile.createVariable('A', 'float64', ('time',))
a.long_name = 'Acceleration Magnitude'
a.comment = 'Computed with: np.sqrt(self.ax**2 + self.ay**2 + self.az**2)'
a.coordinates = 'time depth latitude longitude'
a.units = 'g'
a[:] = self.a
# Record Variables - Rotations
# Nose of model points to -Z (north) and Up is +Y
xr = self.ncFile.createVariable('XR', 'float64', ('time',))
xr.long_name = 'Rotation about X-axis'
xr.standard_name = 'platform_pitch_angle'
xr.comment = self.euler_comment
xr.coordinates = 'time depth latitude longitude'
xr.units = 'degree'
xr[:] = (self.rx * 180 / np.pi)
yr = self.ncFile.createVariable('YR', 'float64', ('time',))
yr.long_name = 'Rotation about Y-axis'
yr.standard_name = 'platform_yaw_angle'
yr.comment = self.euler_comment
yr.coordinates = 'time depth latitude longitude'
yr.units = 'degree'
if self.args.yaw_offset:
yr.comment = yr.comment + '. Added {} degrees to original values.'.format(self.args.yaw_offset)
yawl = []
for y in (self.ry * 180 / np.pi) + self.args.yaw_offset:
if y > 360.0:
yawl.append(y - 360.0)
else:
yawl.append(y)
yaw = np.array(yawl)
else:
yaw = (self.ry * 180 / np.pi)
yr[:] = yaw
zr = self.ncFile.createVariable('ZR', 'float64', ('time',))
zr.long_name = 'Rotation about Z-axis'
zr.standard_name = 'platform_roll_angle'
zr.comment = self.euler_comment
zr.coordinates = 'time depth latitude longitude'
zr.units = 'degree'
zr[:] = (self.rz * 180 / np.pi)
# Axis coordinates & angle for angle_axis form of the quaternion
# Note: STOQS UI has preference for AXIS_X, AXIS_Y, AXIS_Z, ANGLE over roll, pitch, and yaw
axis_x = self.ncFile.createVariable('AXIS_X', 'float64', ('time',))
axis_x.long_name = 'X-component of rotation vector'
axis_x.comment = self.p_angle_axis_comment
axis_x.coordinates = 'time depth latitude longitude'
axis_x.units = ''
axis_x[:] = self.px
axis_y = self.ncFile.createVariable('AXIS_Y', 'float64', ('time',))
axis_y.long_name = 'Y-component of rotation vector'
axis_y.comment = self.p_angle_axis_comment
axis_y.coordinates = 'time depth latitude longitude'
axis_y.units = ''
axis_y[:] = self.py
axis_z = self.ncFile.createVariable('AXIS_Z', 'float64', ('time',))
axis_z.long_name = 'Z-component of rotation vector'
axis_z.comment = self.p_angle_axis_comment
axis_z.coordinates = 'time depth latitude longitude'
axis_z.units = ''
axis_z[:] = self.pz
angle = self.ncFile.createVariable('ANGLE', 'float64', ('time',))
angle.long_name = 'Angle rotated about rotation vector'
angle.comment = self.p_angle_axis_comment
angle.coordinates = 'time depth latitude longitude'
angle.units = 'radian'
angle[:] = self.angle
# Axis about which platform is rotating - derived from dividing quaternions
rot_x = self.ncFile.createVariable('ROT_X', 'float64', ('time',))
rot_x.long_name = 'X-component of platform rotation vector'
rot_x.comment = self.m_angle_axis_comment
rot_x.coordinates = 'time depth latitude longitude'
rot_x.units = ''
rot_x[:] = self.mx
rot_y = self.ncFile.createVariable('ROT_Y', 'float64', ('time',))
rot_y.long_name = 'Y-component of platform rotation vector'
rot_y.comment = self.m_angle_axis_comment
rot_y.coordinates = 'time depth latitude longitude'
rot_y.units = ''
rot_y[:] = self.my
rot_z = self.ncFile.createVariable('ROT_Z', 'float64', ('time',))
rot_z.long_name = 'Z-component of platform rotation vector'
rot_z.comment = self.m_angle_axis_comment
rot_z.coordinates = 'time depth latitude longitude'
rot_z.units = ''
rot_z[:] = self.mz
# Rotation rate & count
rot_rate = self.ncFile.createVariable('ROT_RATE', 'float64', ('time',))
rot_rate.long_name = 'Absolute rotation rate about rotation vector'
rot_rate.comment = 'Computed from angle output from Quaternion.get_euler() and the angle difference from one time step to the next'
rot_rate.coordinates = 'time depth latitude longitude'
rot_rate.units = 'degree/second'
rot_rate[:] = self.rotrate
rot_count = self.ncFile.createVariable('ROT_COUNT', 'float64', ('time', ))
rot_count.long_name = 'Rotation Count - Cumulative Sum of ROT_RATE * dt / 360 deg'
rot_count.comment = 'Computed with: np.cumsum(np.absolute(self.diffrot)) / 2. / np.pi'
rot_count.coordinates = 'time depth latitude longitude'
rot_count[:] = (self.rotcount)
# Pressure sensor data linearly interpolated to IMU samples
p = self.ncFile.createVariable('P', 'float64', ('time',))
p.long_name = 'Pressure'
p.comment = 'Recorded pressure linearly interpolated to IMU samples with np.interp(self.s2013, self.ps2013, self.pr)'
p.coordinates = 'time depth latitude longitude'
p.units = 'dbar'
p[:] = np.interp(self.s2013, self.ps2013, self.pr)
p_adj = self.ncFile.createVariable('P_ADJUSTED', 'float64', ('time',))
p_adj.long_name = 'Adjusted Pressure'
p_adj.coordinates = 'time depth latitude longitude'
p_adj.units = 'dbar'
p_adj.comment = self.pr_adj_comment
p_adj[:] = np.interp(self.s2013, self.ps2013, self.pr_adj)
# bed depth at pressure sample intervals
bed_depth_li = self.ncFile.createVariable('BED_DEPTH_LI', 'float64', ('time',))
bed_depth_li.long_name = 'Depth of BED - Linearly Interpolated to IMU samples'
bed_depth_li.units = 'm'
bed_depth_li.coordinates = 'time depth latitude longitude'
bed_depth_li.comment = self.bed_depth_comment
bed_depth_li[:] = np.interp(self.s2013, self.ps2013, self.bed_depth)
# Avoid memory problems, see http://stackoverflow.com/questions/21435648/cubic-spline-memory-error
if len(self.ps2013) < 6000:
# Pressure sensor data linearly interpolated to IMU samples
p_spline = self.ncFile.createVariable('P_SPLINE', 'float64', ('time',), fill_value=1.e20)
p_spline.long_name = 'Pressure'
p_spline.comment = ("Recorded pressure cubic spline interpolated to IMU samples with"
" spline_func = scipy.interpolate.interp1d(self.ps2013, self.pr, kind='cubic');"
" p_mask = ma.masked_less(ma.masked_greater(self.s2013, np.max(self.ps2013)), np.min(self.ps2013));"
" inside_spline = spline_func(ma.compressed(p_mask));"
" p_spline = spline_func(self.s2013); p_spline[ma.clump_unmasked(p_mask)] = inside_spline")
p_spline.coordinates = 'time depth latitude longitude'
p_spline.units = 'dbar'
spline_func = interp1d(self.ps2013, self.pr_adj, kind='cubic')
# Mask IMU points outside of pressure time, interpolate, then put back into filled array
p_mask = ma.masked_less(ma.masked_greater(self.s2013, np.max(self.ps2013)), np.min(self.ps2013))
inside_spline = spline_func(ma.compressed(p_mask))
p_spline[ma.clump_unmasked(p_mask)] = inside_spline
# First difference of splined pressure sensor data interpolated to IMU samples
p_spline_rate = self.ncFile.createVariable('P_SPLINE_RATE', 'float64', ('time',), fill_value=1.e20)
p_spline_rate.long_name = 'Rate of change of spline fit of pressure'
p_spline_rate.comment = 'Pressure rate of change interpolated to IMU samples with p_spline_rate[ma.clump_unmasked(p_mask)] = np.append([0], np.diff(inside_spline)) * self.rateHz'
p_spline_rate.coordinates = 'time depth latitude longitude'
p_spline_rate.units = 'dbar/s'
p_spline_rate[ma.clump_unmasked(p_mask)] = np.append([0], np.diff(inside_spline)) * self.rateHz
# Spline interpolated bed depth
bed_depth_csi = self.ncFile.createVariable('BED_DEPTH_CSI', 'float64', ('time',), fill_value=1.e20)
bed_depth_csi.long_name = 'Depth of BED - Cubic Spline Interpolated to IMU Samples'
bed_depth_csi.units = 'm'
bed_depth_csi.coordinates = 'time depth latitude longitude'
bed_depth_csi.comment = self.bed_depth_csi_comment
bed_depth_csi[ma.clump_unmasked(self.p_mask)] = self.bed_depth_inside_spline
else:
print("Not creating cubic-spline interpolated variables, time series too long: {} points".format(len(self.ps2013)))
# First difference of pressure sensor data interpolated to IMU samples
p_rate = self.ncFile.createVariable('P_RATE', 'float64', ('time',))
p_rate.long_name = 'Rate of change of pressure'
p_rate.comment = 'Pressure rate of change interpolated to IMU samples with np.append([0], np.diff(np.interp(self.s2013, self.ps2013, self.pr))) * self.rateHz'
p_rate.coordinates = 'time depth latitude longitude'
p_rate.units = 'dbar/s'
p_rate[:] = np.append([0], np.diff(np.interp(self.s2013, self.ps2013, self.pr_adj))) * self.rateHz
# Compute implied distance and velocity based on 147 cm BED housing circumference
rot_dist = self.ncFile.createVariable('ROT_DIST', 'float64', ('time', ))
rot_dist.long_name = 'Implied distance traveled assuming pure rolling motion'
rot_dist.comment = 'Computed with: ROT_COUNT * 1.47 m'
rot_dist.coordinates = 'time depth latitude longitude'
rot_dist.units = 'm'
rot_dist[:] = rot_count[:] * 1.47
implied_velocity = self.ncFile.createVariable('IMPLIED_VELOCITY', 'float64', ('time', ))
implied_velocity.long_name = 'Implied BED velocity assuming pure rolling motion'
implied_velocity.comment = 'Computed with: ROT_RATE * 1.47 / 360.0'
implied_velocity.coordinates = 'time depth latitude longitude'
implied_velocity.units = 'm/s'
implied_velocity[:] = rot_rate[:] * 1.47 / 360.0
if self.traj_dist_topo:
# Distance over topo from mbgrdviz generated trajectory thalweg trace file
self.dist_topo = self.ncFile.createVariable('DIST_TOPO', 'float64', ('time',))
self.dist_topo.long_name = 'Distance over topography along thalweg'
self.dist_topo.units = 'm'
self.dist_topo.comment = ifmt.format(var='dist_topo', traj_file=self.args.trajectory, formula=
'np.interp(np.linspace(0,1,len(self.s2013)), np.linspace(0,1,len(self.traj_dist_topo)), self.traj_dist_topo)')
self.dist_topo.coordinates = 'time depth latitude longitude'
self.dist_topo[:] = np.interp(np.linspace(0,1,len(self.s2013)), np.linspace(0,1,len(self.traj_dist_topo)), self.traj_dist_topo)
# Tumble rate & count
tumble_rate = self.ncFile.createVariable('TUMBLE_RATE', 'float64', ('time', ))
tumble_rate.long_name = 'Angle change of axis (vec) in axis-angle representation of BED rotation'
tumble_rate.comment = 'Computed with: abs(last_vec.angle(vec))'
tumble_rate.coordinates = 'time depth latitude longitude'
tumble_rate.units = 'degree/second'
tumble_rate[:] = self.tumblerate.reshape(len(self.tumblerate), 1, 1, 1)
tumble_count = self.ncFile.createVariable('TUMBLE_COUNT', 'float64', ('time', ))
tumble_count.long_name = 'Tumble Count - Cumulative Sum of TUMBLE_RATE * dt / 360 deg'
tumble_count.comment = 'Computed with: np.cumsum(np.absolute(self.difftumble)) / 2. / np.pi'
tumble_count.coordinates = 'time depth latitude longitude'
tumble_count[:] = self.tumblecount
# Compute tumble distance
tumble_dist = self.ncFile.createVariable('TUMBLE_DIST', 'float64', ('time', ))
tumble_dist.long_name = 'Implied distance traveled assuming tumbling translates to horizontal motion'
tumble_dist.comment = 'Computed with: TUMBLE_COUNT * 1.47 m'
tumble_dist.coordinates = 'time depth latitude longitude'
tumble_dist.units = 'm'
tumble_dist[:] = tumble_count[:] * 1.47
# Sum of rotation and tumbling distances
rot_plus_tumble_dist = self.ncFile.createVariable('ROT_PLUS_TUMBLE_DIST', 'float64', ('time', ))
rot_plus_tumble_dist.long_name = 'Implied distance traveled assuming pure rolling motion'
rot_plus_tumble_dist.comment = 'Computed with: ROT_DIST + TUMBLE_DIST'
rot_plus_tumble_dist.coordinates = 'time depth latitude longitude'
rot_plus_tumble_dist.units = 'm'
rot_plus_tumble_dist[:] = rot_dist[:] + tumble_dist[:]
# Tide data from OSTP Software calculation
tide = self.ncFile.createVariable('TIDE', 'float64', ('time'))
tide.long_name = 'OSTP2 Tide model height'
tide.coordinates = 'time depth latitude longitude'
tide.comment = self.tide_comment
tide.units = 'm'
tide[:] = np.interp(self.s2013, self.ps2013, self.tide)
# Add the global metadata, overriding with command line options provided
self.add_global_metadata()
self.ncFile.title = 'Orientation and acceleration data from Benthic Event Detector'
if self.args.title:
self.ncFile.title = self.args.title
if self.args.summary:
self.ncFile.summary = self.args.summary
self.ncFile.close()
def process_command_line(self):
import argparse
from argparse import RawTextHelpFormatter
examples = 'Examples:' + '\n\n'
examples += ' For 12 April 2013 BED01 deployment:\n'
examples += ' ' + sys.argv[0] + " --input BED00048.EVT --output BED00048.nc --lat 36.793458 --lon -121.845703 --depth 295 --decode\n"
examples += ' For 1 June 2013 BED01 Canyon Event:\n'
examples += ' ' + sys.argv[0] + " --input BED00038.EVT --lat 36.793458 --lon -121.845703 --depth 340 --decode\n"
examples += ' ' + sys.argv[0] + " --input BED00039.EVT --lat 36.785428 --lon -121.903602 --depth 530 --decode\n"
examples += ' ' + sys.argv[0] + " --input BED00038.EVT BED00039.EVT --output BED01_1_June_2013.nc --trajectory BEDSLocation1_ThalwegTrace.csv --decode\n"
examples += ' For 18 February 2014 BED03 Canyon Event:\n'
examples += ' ' + sys.argv[0] + " --input 30100046_partial_decimated10.EVT --lat 36.793367 --lon -121.8456035 --depth 292 --decode\n"
examples += ' For 15 January 2016 BED03 Canyon Event with clock set to PDT:\n'
examples += ' ' + sys.argv[0] + " -i 30200101.EVT.OUT -o 30200101.nc --lat 36.795040 --lon -121.869912 --depth 390 --seconds_offset 28800 -t 'BED03 Deployment in Monterey Canyon in October 2015 for the CCE project'\n"
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description='Convert BED event file(s) to a NetCDF file',
epilog=examples)
parser.add_argument('-i', '--input', action='store', nargs='*', required=True, help="Specify input event file name(s)")
parser.add_argument('-o', '--output', action='store', help="Specify output NetCDF file name, if different from <base>.nc of input file name")
parser.add_argument('-t', '--trajectory', action='store', help="csv file with columns of latitude and longitude where first and lat row corresponds to first and last records of input event files")
parser.add_argument('--lat', type=float, action='store', help="latitude of BED device")
parser.add_argument('--lon', type=float, action='store', help="longitude of BED device")
parser.add_argument('--depth', type=float, action='store', help="depth of BED device")
parser.add_argument('--seconds_offset', type=float, action='store', help="Add seconds to time in source file to make accurate GMT time", default=0.0)
parser.add_argument('--seconds_slope', type=float, action='store', help="Adjust time in source file for drift, per second", default=0.0)
parser.add_argument('--bar_offset', type=float, action='store', help="Add value to pressure in source file", default=0.0)
parser.add_argument('--bar_slope', type=float, action='store', help="Adjust pressure in source file for drift", default=0.0)
parser.add_argument('--bed_name', action='store', help='Name of the BED, e.g. BED06, BED10')
parser.add_argument('--yaw_offset', type=float, action='store', help="Add value to yaw rotation angle", default=0.0)
parser.add_argument('--decode', action='store_true', help="Pass the file contents through Bob's decode program")
parser.add_argument('--title', action='store', help='A short description of the dataset')
parser.add_argument('--summary', action='store', help='Additional information about the dataset')
parser.add_argument('--beg_depth', type=float, action='store', help='Begining depth for lookup from trajectory file')
parser.add_argument('--end_depth', type=float, action='store', help='Ending depth for lookup from trajectory file')
parser.add_argument('--stride_imu', type=int, action='store', help='Records of IMU data to skip', default=1)
parser.add_argument('--no_tide_removal', action='store_true', help='Default is to remove tides using OSTP2')
parser.add_argument('--compare_euler', action='store_true', help='Report differences between Quaternion.get_euler() and transforms3d.euler.quat2euler()')
parser.add_argument('--read_csv', action='store_true', help='Read from the csv format produced by decodeBEDS.py')
parser.add_argument('-v', '--verbose', type=int, choices=list(range(3)), action='store', default=0, help="Specify verbosity level, values greater than 1 give more details ")
self.args = parser.parse_args()
if not self.args.input:
parser.error("Must specify --input\n")
if self.args.trajectory:
pass
else:
if (self.args.lat and self.args.lon and self.args.depth):
pass
else:
parser.error("If no --trajectory specified then must specify --lat, --lon, and --depth")
self.commandline = ' '.join(sys.argv)
self.inputFileNames = self.args.input
if self.args.lat and self.args.lon and self.args.depth:
self.lat = self.args.lat
self.lon = self.args.lon
self.dpth = self.args.depth
self.featureType = 'timeseries'
if self.args.verbose > 0:
print("self.lat = %f, self.lon = %f, self.dpth = %f" % (self.lat, self.lon, self.dpth))
elif self.args.trajectory:
self.featureType = 'trajectory'
else:
raise Exception("Unknown featureType - must be timeseries or trajectory")
for fileName in self.inputFileNames:
if fileName[-3:] in self.invensense_extenstions:
self.sensorType = 'Invensense'
else:
raise Exception("Unknown file: %s. Input file must end in %s." % (fileName,
self.invensense_extenstions))
if __name__ == '__main__':
beds_netcdf = BEDS_NetCDF()
beds_netcdf.process_command_line()
beds_netcdf.createNetCDFfromFile()
print("Wrote file %s\n" % beds_netcdf.outFile)
| stoqs/stoqs | stoqs/loaders/CCE/bed2netcdf/bed2netcdf.py | Python | gpl-3.0 | 41,983 | [
"NetCDF"
] | 36b1067237efe4420b5f5239755b079c6f55261772526a34638bba52444d150f |
# -*- coding: utf-8 -*-
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,random
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
debridstatus = control.setting('debridsources')
class source:
def __init__(self):
self.domains = ['perfecthdmovies.pw']
self.base_link = ''
self.search_link = '/?s=%s'
def movie(self, imdb, title, year):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
self.zen_url = []
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
query = "http://www.perfecthdmovies.pw/?s=%s+%s" % (urllib.quote_plus(title),year)
link = client.request(query)
r = client.parseDOM(link, 'div', attrs = {'class': 'image'})
for links in r:
url = client.parseDOM(links, 'a', ret='href')[0]
title = client.parseDOM(links, 'img', ret='alt')[0]
title = cleantitle.get(title)
if year in title:
if cleanmovie in title:
self.zen_url.append([url,title])
# print "PERFECT HD MOVIES %s %s" % (title , url)
return self.zen_url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
self.zen_url = []
try:
if not debridstatus == 'true': raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
data['season'], data['episode'] = season, episode
self.zen_url = []
episodecheck = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
episodecheck = str(episodecheck).lower()
query = '%s+S%02dE%02d' % (urllib.quote_plus(title), int(data['season']), int(data['episode']))
link = "http://www.perfecthdmovies.pw/?s=" + str(query)
slink = client.request(link)
r = client.parseDOM(slink, 'div', attrs = {'class': 'image'})
for links in r:
url = client.parseDOM(links, 'a', ret='href')[0]
title = client.parseDOM(links, 'img', ret='alt')[0]
title = cleantitle.get(title)
if cleanmovie in title:
if episodecheck in title:
self.zen_url.append([url,title])
return self.zen_url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink,title in self.zen_url:
if "1080" in title: quality = "1080p"
elif "720" in title: quality = "HD"
else: quality = "SD"
info = ''
if "3d" in title.lower(): info = "3D"
if "hevc" in title.lower(): info = "HEVC"
mylink = client.request(movielink)
r = client.parseDOM(mylink, 'div', attrs = {'class': 'separator'})
for links in r:
movielinks = client.parseDOM(links, 'a', ret='href')
for url in movielinks:
if not any(value in url for value in ['uploadkadeh','wordpress','crazy4tv','imdb.com','youtube','userboard','kumpulbagi','mexashare','myvideolink.xyz', 'myvideolinks.xyz' , 'costaction', 'crazydl','.rar', '.RAR', 'safelinking','linx.2ddl.ag','upload.so','.zip', 'go4up', 'adf.ly','.jpg','.jpeg']):
if any(value in url for value in hostprDict):
if not "google" in url:
try:host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
except: host = 'Videomega'
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Phdmovies', 'url': url, 'info': info,'direct': False, 'debridonly': True})
return sources
except:
return sources
def resolve(self, url):
return url
def _getDOMContent(html, name, match, ret):
end_str = "</%s" % (name)
start_str = '<%s' % (name)
start = html.find(match)
end = html.find(end_str, start)
pos = html.find(start_str, start + 1)
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(end_str, end + len(end_str))
if tend != -1:
end = tend
pos = html.find(start_str, pos + 1)
if start == -1 and end == -1:
result = ''
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
else:
result = ''
if ret:
endstr = html[end:html.find(">", html.find(end_str)) + 1]
result = match + result + endstr
return result
def _getDOMAttributes(match, name, ret):
pattern = '''<%s[^>]* %s\s*=\s*(?:(['"])(.*?)\\1|([^'"].*?)(?:>|\s))''' % (name, ret)
results = re.findall(pattern, match, re.I | re.M | re.S)
return [result[1] if result[1] else result[2] for result in results]
def _getDOMElements(item, name, attrs):
if not attrs:
pattern = '(<%s(?: [^>]*>|/?>))' % (name)
this_list = re.findall(pattern, item, re.M | re.S | re.I)
else:
last_list = None
for key in attrs:
pattern = '''(<%s [^>]*%s=['"]%s['"][^>]*>)''' % (name, key, attrs[key])
this_list = re.findall(pattern, item, re.M | re. S | re.I)
if not this_list and ' ' not in attrs[key]:
pattern = '''(<%s [^>]*%s=%s[^>]*>)''' % (name, key, attrs[key])
this_list = re.findall(pattern, item, re.M | re. S | re.I)
if last_list is None:
last_list = this_list
else:
last_list = [item for item in this_list if item in last_list]
this_list = last_list
return this_list
def parse_dom(html, name='', attrs=None, ret=False):
if attrs is None: attrs = {}
if isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
print "none"
try:
html = [html.decode("utf-8", "replace")]
except:
html = [html]
elif isinstance(html, unicode):
html = [html]
elif not isinstance(html, list):
return ''
if not name.strip():
return ''
if not isinstance(attrs, dict):
return ''
ret_lst = []
for item in html:
for match in re.findall('(<[^>]*\n[^>]*>)', item):
item = item.replace(match, match.replace('\n', ' ').replace('\r', ' '))
lst = _getDOMElements(item, name, attrs)
if isinstance(ret, str):
lst2 = []
for match in lst:
lst2 += _getDOMAttributes(match, name, ret)
lst = lst2
else:
lst2 = []
for match in lst:
temp = _getDOMContent(item, name, match, ret).strip()
item = item[item.find(temp, item.find(match)):]
lst2.append(temp)
lst = lst2
ret_lst += lst
# log_utils.log("Done: " + repr(ret_lst), xbmc.LOGDEBUG)
return ret_lst
| repotvsupertuga/repo | plugin.video.zen/resources/lib/sources/phdmovies_mv_tv.py | Python | gpl-2.0 | 8,060 | [
"ADF"
] | 4f96dd4aa3344d2b3dd0cdba053fb5e21ff7829b812fe101dd831898c9d940c6 |
from pyneuroml.neuron import export_to_neuroml1
export_to_neuroml1("test.hoc", "test.level1.xml", 1)
export_to_neuroml1("test.hoc", "test.level2.xml", 2)
| 34383c/pyNeuroML | examples/export_neuroml1.py | Python | lgpl-3.0 | 158 | [
"NEURON"
] | 28ab94e263f3899c21a14b5a27d186458ace523c747baff64bc014dc5a7492b4 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# notifypassword - Send forgotten password from user database to user
# Copyright (C) 2003-2011 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Send forgotten user password from user database to user. Allows password
reminder to saved notification address or email from Distinguished Name field
of user entry.
"""
import sys
import getopt
from shared.defaults import keyword_auto
from shared.notification import notify_user
from shared.useradm import init_user_adm, user_password_reminder
def usage(name='notifypassword.py'):
"""Usage help"""
print """Send forgotten password to user from user database.
Usage:
%(name)s [NOTIFY_OPTIONS]
Where NOTIFY_OPTIONS may be one or more of:
-a Send reminder to email address from database
-c CONF_FILE Use CONF_FILE as server configuration
-d DB_PATH Use DB_PATH as user data base file path
-e EMAIL Send reminder to custom email address
-h Show this help
-I CERT_DN Send reminder for user with ID (distinguished name)
-s PROTOCOL Send reminder to notification protocol from settings
-v Verbose output
One or more destinations may be set by combining multiple -e, -s and -a
options.
"""\
% {'name': name}
# ## Main ###
if '__main__' == __name__:
(args, app_dir, db_path) = init_user_adm()
conf_path = None
verbose = False
raw_targets = {}
user_id = None
opt_args = 'ac:d:e:hI:s:v'
try:
(opts, args) = getopt.getopt(args, opt_args)
except getopt.GetoptError, err:
print 'Error: ', err.msg
usage()
sys.exit(1)
for (opt, val) in opts:
if opt == '-a':
raw_targets['email'] = raw_targets.get('email', [])
raw_targets['email'].append(keyword_auto)
elif opt == '-c':
conf_path = val
elif opt == '-d':
db_path = val
elif opt == '-e':
raw_targets['email'] = raw_targets.get('email', [])
raw_targets['email'].append(val)
elif opt == '-h':
usage()
sys.exit(0)
elif opt == '-I':
user_id = val
elif opt == '-s':
val = val.lower()
raw_targets[val] = raw_targets.get(val, [])
raw_targets[val].append('SETTINGS')
elif opt == '-v':
verbose = True
else:
print 'Error: %s not supported!' % opt
usage()
sys.exit(0)
if not user_id:
print "No user_id provided!"
sys.exit(1)
(configuration, password, addresses, errors) = \
user_password_reminder(user_id, raw_targets, conf_path,
db_path, verbose)
if errors:
print "Address lookup errors:"
print '\n'.join(errors)
if not addresses:
print "Error: found no suitable addresses"
sys.exit(1)
logger = configuration.logger
notify_dict = {'JOB_ID': 'NOJOBID', 'USER_CERT': user_id, 'NOTIFY': []}
for (proto, address_list) in addresses.items():
for address in address_list:
notify_dict['NOTIFY'].append('%s: %s' % (proto, address))
print "Sending password reminder(s) for '%s' to:\n%s" % \
(user_id, '\n'.join(notify_dict['NOTIFY']))
notify_user(notify_dict, [user_id, password], 'PASSWORDREMINDER', logger,
'', configuration)
| heromod/migrid | mig/server/notifypassword.py | Python | gpl-2.0 | 4,268 | [
"Brian"
] | 46ca55ac7d38e599d68988d8576e48cde12e22fe3a9f9da59a90665d3a25b0e8 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import json
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Anti-Grain Geometry',
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) ISC',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) GPL (v2)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'FreeType (BSD like)',
'FreeType (BSD like) with patent clause',
'GPL (v2) LGPL (v2.1 or later)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'Independent JPEG Group License',
'ISC',
'LGPL (unversioned/unknown version)',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v2.1 or later)',
'LGPL (v3 or later)',
'MIT/X11 (BSD like)',
'MIT/X11 (BSD like) LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (v2)',
'MPL (v1.1) GPL (v2) LGPL (v2 or later)',
'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v1.1) LGPL (v2 or later)',
'MPL (v1.1) LGPL (v2.1 or later)',
'MPL (v2.0)',
'Ms-PL',
'Public domain',
'Public domain BSD',
'Public domain BSD (3 clause)',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
'SunSoft (BSD like)',
'University of Illinois/NCSA Open Source License (BSD like)',
('University of Illinois/NCSA Open Source License (BSD like) '
'MIT/X11 (BSD like)'),
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'buildtools/third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
# This contains files copied from elsewhere from the tree. Since the copied
# directories might have suppressions below (like simplejson), whitelist the
# whole directory. This is also not shipped code.
'chrome/common/extensions/docs/server2/third_party': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD MIT/X11 (BSD like)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSD (4 clause) ISC',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
'MPL (v1.1) LGPL (unversioned/unknown version)',
],
'third_party/WebKit': [
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# http://crbug.com/326117
# https://bitbucket.org/chrisatlee/poster/issue/21
'third_party/chromite/third_party/poster': [
'UNKNOWN',
],
# http://crbug.com/333508
'third_party/clang_format/script': [
'UNKNOWN',
],
# http://crbug.com/333508
'buildtools/clang_format/script': [
'UNKNOWN',
],
# https://mail.python.org/pipermail/cython-devel/2014-July/004062.html
'third_party/cython': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/catapult/tracing/third_party/devscripts': [
'GPL (v2 or later)',
],
# https://github.com/shazow/apiclient/issues/8
# MIT license.
'third_party/catapult/third_party/apiclient': [
'UNKNOWN',
],
'third_party/catapult/dashboard/third_party/apiclient': [
'UNKNOWN',
],
# https://bugs.launchpad.net/beautifulsoup/+bug/1481316
# MIT license.
'third_party/catapult/third_party/beautifulsoup': [
'UNKNOWN'
],
'third_party/catapult/dashboard/third_party/beautifulsoup': [
'UNKNOWN'
],
# https://code.google.com/p/graphy/issues/detail?id=6
# Apache (v2.0)
'third_party/catapult/third_party/graphy': [
'UNKNOWN',
],
'third_party/catapult/dashboard/third_party/graphy': [
'UNKNOWN',
],
# https://github.com/GoogleCloudPlatform/appengine-mapreduce/issues/71
# Apache (v2.0)
'third_party/catapult/third_party/mapreduce': [
'UNKNOWN',
],
'third_party/catapult/dashboard/third_party/mapreduce': [
'UNKNOWN',
],
# https://code.google.com/p/webapp-improved/issues/detail?id=103
# Apache (v2.0).
'third_party/catapult/third_party/webapp2': [
'UNKNOWN',
],
'third_party/catapult/dashboard/third_party/webapp2': [
'UNKNOWN',
],
# https://github.com/Pylons/webob/issues/211
# MIT license.
'third_party/catapult/third_party/WebOb': [
'UNKNOWN',
],
'third_party/catapult/dashboard/third_party/WebOb': [
'UNKNOWN',
],
# https://github.com/Pylons/webtest/issues/141
# MIT license.
'third_party/catapult/third_party/webtest': [
'UNKNOWN',
],
'third_party/catapult/dashboard/third_party/webtest': [
'UNKNOWN',
],
# https://bitbucket.org/ianb/paste/issues/12/add-license-headers-to-source-files
# MIT license.
'third_party/catapult/third_party/Paste': [
'UNKNOWN',
],
'third_party/catapult/dashboard/third_party/Paste': [
'UNKNOWN',
],
# https://github.com/google/oauth2client/issues/231
# Apache v2.0.
'third_party/catapult/third_party/oauth2client': [
'UNKNOWN',
],
'third_party/catapult/dashboard/third_party/oauth2client': [
'UNKNOWN',
],
# https://bitbucket.org/gutworth/six/issues/129/add-license-headers-to-source-files
# MIT license.
'third_party/catapult/third_party/six': [
'UNKNOWN',
],
'third_party/catapult/dashboard/third_party/six': [
'UNKNOWN',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/fontconfig': [
# https://bugs.freedesktop.org/show_bug.cgi?id=73401
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/jsoncpp/source': [
# https://github.com/open-source-parsers/jsoncpp/issues/234
'UNKNOWN',
],
'third_party/junit/src': [
# https://github.com/junit-team/junit/issues/1132
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
# Many liblouis files are mirrored but not used in the NaCl module.
# They are not excluded from the mirror because of lack of infrastructure
# support. Getting license headers added to the files where missing is
# tracked in https://github.com/liblouis/liblouis/issues/22.
'third_party/liblouis/src': [
'GPL (v3 or later)',
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/src': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/boringssl': [
# There are some files in BoringSSL which came from OpenSSL and have no
# license in them. We don't wish to add the license header ourselves
# thus we don't expect to pass license checks.
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# https://bitbucket.org/ned/coveragepy/issue/313/add-license-file-containing-2-3-or-4
# BSD 2-clause license.
'third_party/pycoverage': [
'UNKNOWN',
],
'third_party/pyelftools': [ # http://crbug.com/222831
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/speech-dispatcher/libspeechd.h': [
'GPL (v2 or later)',
],
'third_party/sqlite': [
'UNKNOWN',
],
# http://crbug.com/334668
# MIT license.
'tools/swarming_client/third_party/httplib2': [
'UNKNOWN',
],
# http://crbug.com/334668
# Apache v2.0.
'tools/swarming_client/third_party/oauth2client': [
'UNKNOWN',
],
# http://crbug.com/471372
# BSD
'tools/swarming_client/third_party/pyasn1': [
'UNKNOWN',
],
# http://crbug.com/471372
# Apache v2.0.
'tools/swarming_client/third_party/rsa': [
'UNKNOWN',
],
# https://github.com/kennethreitz/requests/issues/1610
'tools/swarming_client/third_party/requests': [
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# https://github.com/KhronosGroup/WebGL/issues/435
'third_party/webgl/src': [
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
# Not shipped, downloaded on trybots sometimes.
'tools/telemetry/third_party/gsutil': [
'BSD MIT/X11 (BSD like)',
'UNKNOWN',
],
'tools/telemetry/third_party/pyserial': [
# https://sourceforge.net/p/pyserial/feature-requests/35/
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'v8/src/third_party/kernel/tools/perf/util/jitdump.h': [ # http://crbug.com/391716
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
used_suppressions = set()
errors = []
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
matched_prefixes = [
prefix for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES
if filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]]
if matched_prefixes:
used_suppressions.update(set(matched_prefixes))
continue
errors.append({'filename': filename, 'license': license})
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
for error in errors:
print "'%s' has non-whitelisted license '%s'" % (
error['filename'], error['license'])
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
# Do not print unused suppressions so that above message is clearly
# visible and gets proper attention. Too much unrelated output
# would be distracting and make the important points easier to miss.
return 1
print "\nSUCCESS\n"
if not len(args):
unused_suppressions = set(
PATH_SPECIFIC_WHITELISTED_LICENSES.iterkeys()).difference(
used_suppressions)
if unused_suppressions:
print "\nNOTE: unused suppressions detected:\n"
print '\n'.join(unused_suppressions)
return 0
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
| Just-D/chromium-1 | tools/checklicenses/checklicenses.py | Python | bsd-3-clause | 19,353 | [
"Galaxy"
] | 9942cd8942c82adde65e9b3b5c91e4ac85c74349effe40ced49fd00f78735697 |
import numpy, pylab, scipy, scipy.optimize, scipy.interpolate, copy
from xrd_diffraction_conversion_fcns import *
import matplotlib.delaunay as dlny
def numtostring(x, n=0): #exponential notation with n significnat digits. if n not specified then trivial string conversion
if n < 1:
return str(x)
return "%.*e" % (n-1, x)
def polarr_cart(x, y):
x=1.0*x
y=1.0*y
return numpy.sqrt(x**2+y**2)
def polart_cart(x, y):
x=1.0*x
y=1.0*y
if x==0:
return (y!=0)*(numpy.pi/2+numpy.pi*(y<0))
if x<0:
return numpy.pi-numpy.arctan(-1.0*y/x)
if y<0:
return 2.0*numpy.pi-numpy.arctan(-1.0*y/x)
return numpy.arctan(y/x)
def polar_cart(x,y):#this is the function to call and excepts floats or arrays
if isinstance(x, numpy.ndarray):
return polarr_cart(x, y), numpy.float32(map(polart_cart, x, y))
return polarr_cart(x, y), polart_cart(x, y)
def firstder(y, dx=1.0):#takes an array of values y and the dx corresponding to 1 index and returns same length array of the 5-point stencil of second derivative, copying the 2 values on each end
secd=(8.0*(y[3:-1]-y[1:-3])+y[:-4]-y[4:])/(12.0*dx)
temp=numpy.empty(y.shape, dtype=numpy.float32)
temp[:2]=secd[0]
temp[-2:]=secd[-1]
temp[2:-2]=secd[:]
return temp
def secder(y, dx=1.0):#takes an array of values y and the dx corresponding to 1 index and returns same length array of the 5-point stencil of second derivative, copying the 2 values on each end
secd=((-30.0)*y[2:-2]+16.0*(y[1:-3]+y[3:-1])-(y[:-4]+y[4:]))/(12.0*dx**2)
temp=numpy.empty(y.shape, dtype=numpy.float32)
temp[:2]=secd[0]
temp[-2:]=secd[-1]
temp[2:-2]=secd[:]
return temp
def arrayder_x(arr, dx=1.0):
return numpy.array([firstder(col, dx) for col in arr.T]).T
def arrayder_y(arr, dx=1.0):
return numpy.array([firstder(col, dx) for col in arr])
def arrayder_xx(arr, dx=1.0): #9-point stencil error order dx**2. outer ring of values are copied from inner
siz=arr.size
# +1, +1 -1,+1 +1,0 -1,0 +1, -1 -1, -1 0, 1 0,0 0, -1
secd=(arr[2:siz, 2:siz]+arr[0:-2, 2:siz]+arr[2:siz, 1:-1]+arr[0:-2, 1:-1]+arr[2:siz, 0:-2]+arr[0:-2, 0:-2]-2.0*(arr[1:-1, 2:siz]+arr[1:-1, 1:-1]+arr[1:-1, 0:-2]))/(3.0*dx**2)
temp=numpy.empty(arr.shape, dtype=numpy.float32)
temp[1:-1, 1:-1]=secd[:, :]
temp[1:-1,0]=secd[:, 0]
temp[1:-1,-1]=secd[:, -1]
temp[0, :]=temp[1, :]
temp[-1, :]=temp[-2, :]
return temp
def arrayder_yy(arr, dx=1.0): #9-point stencil error order dx**2. outer ring of values are copied from inner
return arrayder_xx(arr.T, dx).T
def arrayder_xy(arr, dx=1.0): #7-point stencil error order dx**2. outer ring of values are copied from inner
siz=arr.size
# +1, +1 +1,0 -1,0 -1, -1 0, 1 0,0 0, -1
secd=(-arr[2:siz, 2:siz]+arr[2:siz, 1:-1]+arr[0:-2, 1:-1]-arr[0:-2, 0:-2]+arr[1:-1, 2:siz]-2.0*arr[1:-1, 1:-1]+arr[1:-1, 0:-2])/(-2.0*dx**2)
temp=numpy.empty(arr.shape, dtype=numpy.float32)
temp[1:-1, 1:-1]=secd[:, :]
temp[1:-1,0]=secd[:, 0]
temp[1:-1,-1]=secd[:, -1]
temp[0, :]=temp[1, :]
temp[-1, :]=temp[-2, :]
return temp
#def arrayder_xy(arr, dx): #4-point stencil error order dx**2. outer ring of values are copied from inner
# siz=arr.size
# # +1, +1 -1, -1 1,-1 -1, 1
# secd=(-arr[2:siz, 2:siz]-arr[0:-2, 0:-2]-2.0*arr[2:siz, 0:-2]+arr[0:-2, 2:siz])/(4.0*dx**2)
# temp=numpy.empty(arr.shape, dtype=numpy.float32)
# temp[1:-1, 1:-1]=secd[:, :]
# temp[1:-1,0]=secd[:, 0]
# temp[1:-1,-1]=secd[:, -1]
# temp[0, :]=temp[1, :]
# temp[-1, :]=temp[-2, :]
# return temp
def chimap_gen(qimage, chiimage, chigrid): #chigrid in degrees, returns chimap in degrees
chiimageabsdeg=numpy.abs(chiimage*180.0/numpy.pi)
chiends=slotends_qgrid(chigrid)
chimap=numpy.zeros(shape=chiimage.shape, dtype='int16')
for count, (chimin, chimax) in enumerate(zip(chiends[:-1], chiends[1:])):
chimap[(chiimageabsdeg>=chimin)&(chiimageabsdeg<chimax)]=count+1
chimap[chiimage<0]*=-1
return chimap
def imap_gen(qimage, qgrid):
qends=slotends_qgrid(qgrid)
imap=numpy.zeros(shape=qimage.shape, dtype='uint16')
for count, (qmin, qmax) in enumerate(zip(qends[:-1], qends[1:])):
imap[(qimage>=qmin)&(qimage<=qmax)]=count+1
return imap
def interp(func, y, xcrit, maxtries=20):
#func must be monotonic, y should be at least length 3
trylist=list(y)
trylist.sort
tryans=[func(i) for i in trylist]
i=0
while i<len(trylist)-1:
if tryans[i]==tryans[i+1]:
del trylist[i]
del tryans[i]
else:
i+=1
tries=0
while tries<maxtries and not (len(trylist)>=4 and xcrit>=min(tryans) and xcrit<=max(tryans)):
tries+=1
hightry=trylist[-1]*(1.0+0.2*tries**2)
lowtry=trylist[0]/(1.0+0.2*tries**2)
highans=func(hightry)
lowans=func(lowtry)
if lowans!=tryans[0]:
trylist=[lowtry]+trylist
tryans=[lowans]+tryans
if highans!=tryans[-1]:
trylist=trylist+[hightry]
tryans=tryans+[highans]
tries=0
if len(trylist)<2:
tries=maxtries
while tries<maxtries and not (len(trylist)>=4):
tries+=1
temp=2**tries
newtrys=numpy.linspace(1./2**temp,1-1./2**temp,2**(temp-1))
newtrys=list(trylist[0]+(trylist[-1]-trylist[0])*newtrys)
even=False
while len(newtrys)>0 and not (len(trylist)>=4):
even=not even
temp=newtrys.pop((even and (0, ) or (-1, ))[0])#switch between trying next highest and next lowest
ans=func(temp)
if ans in tryans:
if ans==tryans[0]:#move the outer boundaries in so searching is more efficient
trylist[0]=temp
if ans==tryans[-1]:
trylist[-1]=temp
else:
trylist=sorted(trylist+[temp])
tryans=sorted(tryans+[temp])
if tries==maxtries:
print 'interp problem'
return None
if tryans!=tryans.sort:
trylist.sort
#print 'in interp', xcrit, tryans, trylist
ycrit=scipy.interpolate.spline(tryans, trylist, xcrit)
if numpy.isnan(ycrit):
print 'interp problem'
return None
else:
return ycrit
def qq_gen(innn): #innn should be array of intensity values dtype='float32'
slots=innn.size
zer=numpy.ndarray.tolist(numpy.zeros(slots, dtype='float32'))
return numpy.array([zer[:i]+numpy.ndarray.tolist(innn[i]*innn[i:]) for i in xrange(slots)], dtype='float32').T
def intbyarray(data, imap, dqchiimage, slots=None, mean=False): #data must be a square array, optionally dtype='uint16', map is same size as data with each pixel value v, v=0->ignored, else included in integration bin v-1
if slots is None:
slots=imap.max()
if dqchiimage is None:
data=numpy.float32(data)
else:
data=numpy.float32(numpy.abs(dqchiimage)*data)
if mean:
ans=numpy.array([(data[imap==i]).mean() for i in xrange(1, slots+1, 1)])
else:
ans=numpy.array([(data[imap==i]).sum() for i in xrange(1, slots+1, 1)])
ans[numpy.where(numpy.isnan(ans))]=0.0
return ans
def integrationnormalization(killmap, imap, dqchiimage, slots=None):#this pretty much is the reciprocal of the integration of killmap so that haveing a small number of pixels in certain q-range doesn't affect magnitude of icounts
if slots is None:
slots=imap.max()
if dqchiimage is None:
data=numpy.float32(killmap)
else:
data=numpy.float32(numpy.abs(dqchiimage)*killmap)
ans=numpy.array([(data[imap==i]).sum() for i in xrange(1, slots+1, 1)])
ans[numpy.where(numpy.isnan(ans))]=0.0
ans[ans<0]=0.0#shouldn't be necessary
ans[ans>0]=1/ans[ans>0]
return ans
def binimage(data, bin=3, zerokill=False, mapbin=None): #data is square array,averages bin by bin sections of the array and returns smaller array #mapbin changes the grid
size=data.shape[0]
if zerokill:
nonzeromap=binboolimage(data!=0, bin=bin)
if 'float' in str(data.dtype): #this isn't compatiible with mapbin:
b = numpy.float32([data[:, i:i+bin].sum(axis=1) for i in xrange(0, size, bin)])
ans=numpy.float32(numpy.array([b[:, k:k+bin].sum(axis=1) for k in xrange(0, size, bin)])/bin**2)
if zerokill:
ans*=nonzeromap
else:
try:
if 'int' in str(data.dtype):
a, b, c=str(data.dtype).partition('int')
c=eval(c)
else:
a='u'
b='int'
c=1
c+=numpy.ceil(numpy.log(bin**2)/numpy.log(2.)/8.)
if c>64:
c='64'
else:
c='%d' %int(c)
dt=''.join((a, b, c))
except:
dt='int64'
b = numpy.array([data[:, i:i+bin].sum(axis=1, dtype='int32') for i in xrange(0, size, bin)])
if data.dtype=='bool':
ans=numpy.bool_(numpy.array([b[:, k:k+bin].sum(axis=1, dtype='int32') for k in xrange(0, size, bin)])//bin**2)
else:
ans=numpy.array(numpy.array([b[:, k:k+bin].sum(axis=1, dtype='int32') for k in xrange(0, size, bin)])//bin**2, dtype=data.dtype)
if zerokill:
ans*=nonzeromap
if not (mapbin is None):
ans=numpy.array((numpy.int32(ans)-1)//mapbin+1, dtype=data.dtype)
return ans
def mapbin(data, mapbin=3):
return (data-1)//mapbin+1
def unbinimage(data, bin=3):
size=data.shape[0]*bin
d=numpy.empty((size, size), dtype=data.dtype)
a=numpy.zeros(bin, dtype=data.dtype)
return numpy.array([(numpy.array([a+i for i in data[j//bin]])).flatten() for j in range(bin*data.shape[0])])
def binboolimage(data, bin=3): #data is square array, dtype='bool'. every pixel in bin must be True for binned pixel to be True
size=data.shape[0]
b = numpy.array([data[:, i:i+bin].prod(axis=1, dtype='bool') for i in xrange(0, size, bin)], dtype='bool')
return numpy.array([b[:, k:k+bin].prod(axis=1, dtype='bool') for k in xrange(0, size, bin)], dtype='bool')
def unbinboolimage(data, bin=3):
size=data.shape[0]*bin
d=numpy.empty((size, size), dtype='bool')
a=numpy.zeros(bin, dtype='bool')
return numpy.array([(numpy.array([a+i for i in data[j//bin]])).flatten() for j in range(bin*data.shape[0])])
def combineimageswithwieghts(wts, imagearr):# it would be nice to use numpy.dot but that doesn't work with a 3-d array
return numpy.array([w*im for w, im in zip(wts, imagearr)], dtype=imagearr[0].dtype).sum(axis=0)
def bckndsubtract(data, bckndarr, killmap=None, btype='minanom', banom_f_f=None, banomcalc=None, linweights=None):
#data,bckndarr,killmap must be same size. banom will be adjusted to that size
#if btype is 'min' or 'ave' just calculates and returns - killmap can be None for this but must be passed otherwise
#if btype is 'minanom' and calculation of anomalous backnd and factors is to be avoided: pass banom_f_f=(banom,fmin,fanom)
#if btype is 'minanom' and calculation must happen do not pass banom_f_f , pass banomcalc=(imap,qgrid,attrdict, None or bimap, None or bqgrid, None or fraczeroed, None or factorprecision) -don'e have to include trailing None's - also, in this case all the arrays must be full sized
#returns tuple (bcknd subtracted data) if no banom, fmin, fanom, calc done, otherwise ( , banom, fmin, fanom, bimap, bqgrid,fraczeroed)
# if 'minanom' then bckndarr is bmin
if btype=='lin':
bckndarr=combineimageswithwieghts(linweights, bckndarr)
if btype=='min' or btype=='ave' or btype=='lin':
a=bckndarr>data
data-=bckndarr
data[a]=0
if killmap is None:
return (data, bckndarr)
else:
return (data*killmap, bckndarr*killmap)
elif btype=='minanom':
if banom_f_f is None or len(banom_f_f)!=3:
bac=banomcalc
while len(bac)<7:
bac+=(None,)
if (bac[3] is not None) and (bac[4] is not None):
bimapqgridstr=', bimap=bac[3], bqgrid=bac[4]'
else:
bimapqgridstr=''
if bimapqgridstr=='' and (bac[4] is not None):
bimapqgridstr2=', bqgrid=bac[4]'
else:
bimapqgridstr2=''
if bac[5] is not None:
fzstr=', fraczeroed=bac[5]'
else:
fzstr=''
if bac[6] is not None:
fpstr=', factorprecision=bac[6]'
else:
fpstr=''
cbbf=eval(''.join(('calc_bmin_banom_factors(data, bckndarr, killmap, bac[0], bac[1],bac[2]', bimapqgridstr, bimapqgridstr2, fzstr, fpstr, ')')))
fmin=cbbf.fmin
fanom=cbbf.fanom
banom=cbbf.banom
banomreturn=banom
bimap=cbbf.bimap
bqgrid=cbbf.bqgrid
returnall=True
else:
returnall=False
banom=banom_f_f[0]
fmin=banom_f_f[1]
fanom=banom_f_f[2]
if data.shape[0]>banom.shape[0]:
if (data.shape[0]%banom.shape[0])==0:
banom=unbinimage(banom, data.shape[0]/banom.shape[0])
else:
print 'INCOMMENSURATE DATA ARRAY AND ANAMALOUS BACKGROUND ARRAY'
return (data, )
if data.shape[0]<banom.shape[0]:
if (banom.shape[0]%data.shape[0])==0:
banom=binimage(banom, banom.shape[0]/data.shape[0])
else:
print 'INCOMMENSURATE DATA ARRAY AND ANAMALOUS BACKGROUND ARRAY'
return (data, )
totbcknd=(fmin*bckndarr+fanom*banom)*killmap
data*=killmap
a=data<totbcknd
data-=totbcknd
data[a]=0
if returnall:
fracz=a.sum()/(killmap.sum())
return (data, banom, fmin, fanom, bimap, bqgrid, fracz)
else:
return (data,totbcknd)
else:
print 'UNKNOWN BACKND TYPE IN BACKNDSUBTRACT'
return (data, bckndarr)
class calc_bmin_banom_factors():
def __init__(self, data, bmin, killmap, imap, qgrid, attrdict, fraczeroed=0.005, factorprecision=0.005, bimap=None, bqgrid=None, qimage=None):
#must pass either bimap or qimage.
#everyhitng should be binned to same size specified as bin
# if providing bimap must provide correct bqgrid - if bimap provided, will use that bin
#data,bmin,killmap,imap must be full sized
#factor precision is % not additive
#takes an image with bmin subtracted and takes a bqgrid annulus and finds what constant value could be subtracted from that and satisfy frac zero. an image is interpolated from these values
#this image is azimuthally symmetric and should be roughly the background from xrays that are fluoresced or diffracted by amorphous stuff like air. then there is trading between this type
#of backnd and bmin to see how you weigh them to subtract the highest possible number of pixels from the image while keeping fraczeroed. other constraint is fbmin>=1
if data.shape!=bmin.shape or data.shape!=killmap.shape or data.shape!=imap.shape :
print 'calc_bmin_banom_factors NOT CURRENTLY SUPPORTING DIFFERENT ARRAY SHAPES'
self.fz=fraczeroed
self.fp=factorprecision
self.detsize=data.shape[0]
self.center=numpy.float32(bincenterind_centerind(centerindeces_fit2dcenter(attrdict['cal'], detsize=self.detsize), bin))
self.L=attrdict['cal'][2]
self.wl=attrdict['wavelength']
self.psize=attrdict['psize']
self.dtype=data.dtype
if bqgrid is None:
minbinwidth=8 #this should be much larger than the widest features (peaks) not to be substracted
b2=numpy.uint16(numpy.ceil((qgrid[2]-1)*1.0*qgrid[1]/minbinwidth))
# b1=qgrid[1]*(qgrid[2]-1.0)/(b2-1) #put center of first and last bin same as qgrid but overall range wider
# self.bqgrid=[qgrid[0], b1, b2]
self.bqgrid=qgrid_minmaxnum(qgrid[0], minmaxint_qgrid(qgrid)[1], b2)
else:
self.bqgrid=bqgrid
if (bimap is None) or (self.detsize%bimap.shape[0]!=0):
if qimage is None:
print 'aborted because need to calculate bimap but cannot without qimage'
return
self.bimap=imap_gen(qimage, self.bqgrid)
# qslots=slotends_qgrid(self.bqgrid)
# pixslots=pix_q(qslots, self.L,self.wl, psize=0.1*bin)
# self.bimap=makeintmap(pixslots,self.center, size=(3450//bin))
else:
self.bimap=bimap
self.bin=self.detsize/self.bimap.shape[0]
#self.center=numpy.float32(bincenterind_centerind(centerindeces_fit2dcenter(attrdict['cal'], detsize=self.detsize), self.bin))
self.killmap=killmap*(imap!=0)
#print self.killmap.sum()
self.data=data*self.killmap
self.bmin=bmin*self.killmap
self.killmap[self.data<self.bmin]=0 # do not count pixels that were already zeroed from bmin "zeroed" pixels are only those due to the bmin, banom wieghting
self.data*=self.killmap
self.bmin*=self.killmap
self.bimap*=self.killmap
self.numpix=self.killmap.sum()
#print 'self.numpix', self.numpix
self.bdata=self.data-self.bmin
qvals=q_qgrid_ind(self.bqgrid)
pixvalssq=pix_q(qvals, self.L,self.wl, psize=self.psize*self.bin)**2
bqmin=numpy.array([self.bqmin_gen(i+1) for i in range(self.bqgrid[2])])
#print '**', bqmin
#self.banom_int=scipy.interpolate.UnivariateSpline(pixvalssq, bqmin)
self.banom_int=lambda xeval: scipy.interpolate.spline(pixvalssq, bqmin, xeval)
self.banom=self.banom_gen()
nanlist=numpy.isnan(self.banom)
if nanlist.sum()>0:
print "INTERPOLATION ERROR making banom"
self.banom[nanlist]=0
banomcounts=self.banom.sum()
if banomcounts==0:
self.fmin=1.0
self.fanom=0.0
else:
bmincounts=self.bmin.sum()
delcounts=bmincounts*self.fp
delfmin=delcounts/(1.0*bmincounts)
delfanom=delcounts/(1.0*banomcounts)
self.fmin=1.0
self.fanom=1.0
self.fanom=interp(self.fracz_fanom, numpy.array([0.8, 0.9, 1.0, 1.1, 1.2])*self.fanom, self.fz)
if self.fanom is None:
self.fanom=0
else:
if self.fanom<0:
self.fanom=0
while (self.btot_gen(self.fmin+delfmin, self.fanom-delfanom)>self.data).sum() < (self.btot_gen(self.fmin, self.fanom)>self.data).sum():
self.fmin+=delfmin
self.fanom-=delfanom
trylist=numpy.array(range(4))*self.fp+1
temp=interp(self.fracz_fanom, trylist*self.fanom, self.fz)
if temp is None:
print "INTERPOLATION ERROR increasing fanom:", trylist
self.fanom=0
break
else:
self.fanom=temp
#self.bimap, self.bqgrid, self.fmin, self.fanom,self.banom ready to be read
totbcknd=self.btot_gen(self.fmin, self.fanom)
#self.data*=self.killmap
self.fracz=(totbcknd>self.data).sum()/numpy.float32(self.numpix)
def fracz_fanom(self, fanom):
return ((self.btot_gen(self.fmin, fanom)>self.data).sum())/numpy.float32(self.numpix)
def bqmin_gen(self, slotnum):
a=self.bdata[self.bimap==slotnum]
a.sort()
if a.size==0:
return 0
else:
return a[numpy.uint16(a.size*self.fz)]
def banom_gen(self):
temp=numpy.array([[self.banom_int((i-self.center[1])**2+(j-self.center[0])**2)[0] for i in range(self.detsize/self.bin)] for j in range(self.detsize/self.bin)], dtype='int64')#just use largest size int available instead of checking against self.dtype
temp[temp<0]=0
return numpy.array(temp, dtype=self.dtype)
def btot_gen(self, fmin, fanom):
return (self.bmin*fmin+self.banom*fanom)*self.killmap
#class calc_bmin_banom_factors():
# def __init__(self, data, bmin, killmap, imap, qgrid, attrdict, fraczeroed=0.005, factorprecision=0.005, bimap=None, bqgrid=None, bin=3):
# #3450/bin must be int
# # if providing bimap must provide correct bqgrid - if bimap provided, will use that bin
# #data,bmin,killmap,imap must be full sized
# #factor precision is % not additive
# #takes an image with bmin subtracted and takes a bqgrid annulus and finds what constant value could be subtracted from that and satisfy frac zero. an image is interpolated from these values
# #this image is azimuthally symmetric and should be roughly the background from xrays that are fluoresced or diffracted by amorphous stuff like air. then there is trading between this type
# #of backnd and bmin to see how you weigh them to subtract the highest possible number of pixels from the image while keeping fraczeroed. other constraint is fbmin>=1
# self.fz=fraczeroed
# self.fp=factorprecision
# self.center=numpy.float32([attrdict['cal'][1], attrdict['cal'][0]])/bin#inverse order
# self.L=attrdict['cal'][2]
# self.wl=attrdict['wavelength']
# if bqgrid is None:
# minbinwidth=8 #this should be much larger than the widest features (peaks) not to be substracted
# b2=numpy.uint16(numpy.ceil((qgrid[2]-1)*1.0*qgrid[1]/minbinwidth))
## b1=qgrid[1]*(qgrid[2]-1.0)/(b2-1) #put center of first and last bin same as qgrid but overall range wider
## self.bqgrid=[qgrid[0], b1, b2]
# self.bqgrid=qgrid_minmaxnum(qgrid[0], minmaxint_qgrid(qgrid)[1], b2)
# else:
# self.bqgrid=bqgrid
# if (bimap is None) or (3450%bimap.shape[0]!=0):
# qslots=slotends_qgrid(self.bqgrid)
# pixslots=pix_q(qslots, self.L,self.wl, psize=0.1*bin)
# self.bimap=makeintmap(pixslots,self.center, size=(3450//bin))
# else:
# self.bimap=bimap
#
# self.bin=3450//self.bimap.shape[0]
# self.center=numpy.float32([attrdict['cal'][1], attrdict['cal'][0]])/self.bin#inverse order
# self.killmap=binboolimage(killmap*(imap!=0), self.bin)
#
# self.bimap*=self.killmap
#
# self.data=binimage(data, self.bin)*self.killmap
# self.bmin=binimage(bmin, self.bin)*self.killmap
#
# self.numpix=self.killmap.sum()
# self.fz+=(self.bmin>self.data).sum()/numpy.float32(self.numpix)
# self.bdata=self.data-self.bmin
# self.bdata[self.bmin>self.data]=0
# qvals=q_qgrid_ind(self.bqgrid)
# pixvalssq=pix_q(qvals, self.L,self.wl, psize=0.1*self.bin)**2
#
# bqmin=numpy.array([self.bqmin_gen(i+1) for i in range(self.bqgrid[2])])
# self.banom_int=scipy.interpolate.UnivariateSpline(pixvalssq, bqmin)
# self.banom=self.banom_gen()
# nanlist=numpy.isnan(self.banom)
# if nanlist.sum()>0:
# print "INTERPOLATION ERROR making banom"
# self.banom[nanlist]=0
# banomcounts=self.banom.sum()
# if banomcounts==0:
# self.fmin=1.0
# self.fanom=0.0
# else:
# bmincounts=self.bmin.sum()
# delcounts=bmincounts*self.fp
# delfmin=delcounts/(1.0*bmincounts)
# delfanom=delcounts/(1.0*banomcounts)
# self.fmin=1.0
# self.fanom=1.0
# self.fanom=interp(self.fracz_fanom, numpy.array([0.8, 0.9, 1.0, 1.1, 1.2])*self.fanom, self.fz)
# if self.fanom<0:
# self.fanom=0
# if self.fanom is None:
# print "INTERPOLATION ERROR getting intial fanom"
# self.fanom=1.0
# while (self.btot_gen(self.fmin+delfmin, self.fanom-delfanom)>self.data).sum() < (self.btot_gen(self.fmin, self.fanom)>self.data).sum():
# self.fmin+=delfmin
# self.fanom-=delfanom
# trylist=numpy.array(range(4))*self.fp+1
# temp=interp(self.fracz_fanom, trylist*self.fanom, self.fz)
# if temp is None:
# print "INTERPOLATION ERROR increasing fanom:", trylist
# else:
# self.fanom=temp
# #self.bimap, self.bqgrid, self.fmin, self.fanom,self.banom ready to be read
# totbcknd=(self.fmin*self.bmin+self.fanom*self.banom)*self.killmap
# self.data*=self.killmap
# a=self.data<totbcknd
# self.fracz=a.sum()/(1.0*self.killmap.sum())
#
#
# def fracz_fanom(self, fanom):
# return ((self.btot_gen(self.fmin, fanom)>self.data).sum())/numpy.float32(self.numpix)
#
# def bqmin_gen(self, slotnum):
# a=self.bdata[self.bimap==slotnum]
# a.sort()
# if a.size==0:
# return 0
# else:
# return a[numpy.uint16(a.size*self.fz)]
#
# def banom_gen(self):
# temp=numpy.array([[self.banom_int((i-self.center[1])**2+(j-self.center[0])**2)[0] for i in range(3450/self.bin)] for j in range(3450/self.bin)], dtype='int32')*self.killmap
# temp[temp<0]=0
# return numpy.uint16(temp)
#
# def btot_gen(self, fmin, fanom):
# return self.bmin*fmin+self.banom*fanom
def find2darrayzeros(x, y, z):#x and y are 1d arrays and z is the 2d surface. find some x,y values that linearly interpolate to z=0 with a True for success and False if all of z had same sign
zb=numpy.zeros(z[:-1, :-1].shape, dtype='bool')
zl=[z[:-1, :-1], z[1:, :-1], z[:-1, 1:], z[1:, 1:]]
for ar1, ar2 in zip(zl, zl[1:]+zl[0]):
zb+=numpy.logical_xor(ar1>0., ar2>0.)
xi, yi=numpy.where(zb)
if len(xi)>0:
wt=numpy.sum(1./numpy.abs(numpy.float32([z[xi, yi], z[xi+1, yi], z[xi, yi+1], z[xi+1, yi+1]])), axis=0)#wieght by 1/abs(z)
xv=numpy.float32([x[inds]/numpy.abs(z[inds, othinds]) for inds in (xi, xi+1) for othinds in (yi, yi+1)]).sum(axis=0)/wt
yv=numpy.float32([y[inds]/numpy.abs(z[othinds, inds]) for inds in (yi, yi+1) for othinds in (xi, xi+1)]).sum(axis=0)/wt
return True, xv, yv
else:
xi, yi=numpy.where(numpy.abs(z)==numpy.min(numpy.abs(z)))
return False, x[xi[0:1]], y[yi[0:1]]
def AveArrUpToRank(arr, rank=0.5):
arr=arr.flatten()
arr=numpy.sort(arr)
return arr[:int(round(rank*len(arr)))].mean(dtype='float32')
def FindLinearSumBcknd(counts, killmap, b0, b1, f0vals, f1vals, fraczeroed=0.05, rankfornorm=0.5, fprecision=0.01):#takes the n x image counts and kill and bcknd images and using the f0vals,f1vals as guesses for the weights of the normalized bcknd images, find the f0,f1 that sum to maximum total counts while staying below fraczeroed pixels being zeroed
b0=numpy.float32(b0)
b0ptbyptbool=(b0.ndim==3)
if b0ptbyptbool:
b0wt=[AveArrUpToRank(b0v[numpy.where(killmap)], rank=rankfornorm) for b0v in b0]
b0l=[b0v/b0wtv for b0v, b0wtv in zip(b0, b0wt)]
else:
b0wt=AveArrUpToRank(b0[numpy.where(killmap)], rank=rankfornorm)
b0/=b0wt
b1=numpy.float32(b1)
b1ptbyptbool=(b1.ndim==3)
if b1ptbyptbool:
b1wt=[AveArrUpToRank(b1v[numpy.where(killmap)], rank=rankfornorm) for b1v in b1]
b1l=[b1v/b1wtv for b1v, b1wtv in zip(b1, b1wt)]
else:
b1wt=AveArrUpToRank(b1[numpy.where(killmap)], rank=rankfornorm)
b1/=b1wt
nz=(killmap==1).sum(dtype='float32')*fraczeroed
print 'nz: ', nz, ' trials: ', len(f0vals)*len(f1vals)
print 'The trial values are \nf0:', f0vals, '\nf1:', f1vals
f0final=[]
f1final=[]
for counter, c in enumerate(counts):
print 'Starting image ', counter
if b0ptbyptbool:
b0=b0l[counter]
b0wtv=b0wt[counter]
else:
b0wtv=b0wt
if b1ptbyptbool:
b1=b1l[counter]
b1wtv=b1wt[counter]
else:
b1wtv=b1wt
vol0=(b0*killmap).sum(dtype='float32')
vol1=(b1*killmap).sum(dtype='float32')
c=numpy.float32(c)
cwt=AveArrUpToRank(c[killmap], rank=rankfornorm)
c/=cwt
nzero_f0f1=numpy.float32([[((c-f0*b0-f1*b1)*killmap<0.).sum(dtype='float32') for f1 in f1vals] for f0 in f0vals])
print 'num trials w too many zeroed: ', len(numpy.where(nzero_f0f1>nz)[0])
garb, f0poss, f1poss=find2darrayzeros(f0vals, f1vals, nzero_f0f1-nz)
print 'Zeros were found within the array of guesses?', garb
f0mod=[]
f1mod=[]
print 'f0poss', f0poss
print 'f1poss', f1poss
for f0, f1 in zip(f0poss, f1poss):
lowbool=((c-f0*b0-f1*b1)*killmap<0.).sum(dtype='float32')<nz
newlowbool=lowbool
fct=1.+(lowbool*2.-1.)*fprecision
while lowbool==newlowbool:
f0*=fct
f1*=fct
newlowbool=((c-f0*b0-f1*b1)*killmap<0.).sum(dtype='float32')<nz
if lowbool: #use the factors that low-ball the nz
f0/=fct
f1/=fct
f0mod+=[f0]
f1mod+=[f1]
print 'f0mod', f0mod
print 'f1mod', f1mod
f0mod=numpy.float32(f0mod)
f1mod=numpy.float32(f1mod)
print 'tot vol', vol0*f0mod/b0wtv+vol1*f1mod/b1wtv
i=numpy.argmax(vol0*f0mod/b0wtv+vol1*f1mod/b1wtv) #vol0 and vol1 were calcuated before the biwt scaling so the wieghts have to be used here
f0final+=[f0mod[i]*cwt/b0wtv]
f1final+=[f1mod[i]*cwt/b1wtv]
return numpy.float32(f0final), numpy.float32(f1final)
class fitfcns:
#.finalparams .sigmas .parnames useful, returns fitfcn(x)
def genfit(self, fcn, initparams, datatuple, markstr='unspecified', parnames=[], interaction=0, maxfev=2000, weights=None):
self.maxfev=maxfev
self.performfit=True
self.initparams=initparams
self.sigmas=scipy.zeros(len(initparams))
self.parnames=parnames
self.finalparams=initparams
self.error=False
if weights is None:
def wts(x):
return 1
elif weights=='parabolic':
a=(datatuple[0][0]+datatuple[0][-1])/2.0
b=(datatuple[0][-1]-datatuple[0][0])/2.0
def wts(x):
return 1.0+((x-a)/b)**2
def res1(p, x1, y):
return (y-fcn(p, x1))*wts(x1)
def res2(p, x1,x2,y):
return y-fcn(p, x1, x2)
def res3(p, x1,x2,x3, y):
return y-fcn(p, x1, x2, x3)
def res4(p, x1,x2,x3,x4, y):
return y-fcn(p, x1, x2, x3, x4)
resdic={1:res1, 2:res2, 3:res3, 4:res4}
i=0
for arr in datatuple: #if the numerical data is given as a list or tuple then convert to arrays. regardless convert to float64 because leastsq REQUIRES THIS
datatuple=datatuple[0:i]+tuple([numpy.float64(arr)])+datatuple[i+1:]
i=i+1
while self.performfit:
fitout = scipy.optimize.leastsq(resdic[len(datatuple)-1],self.initparams, args=datatuple, maxfev=self.maxfev, full_output=1)#, warning=False)
self.performfit=False
if fitout[4]!=1:
print 'Fitting Error at ', markstr,': ', fitout[3]
self.error=True
else:
self.finalparams=fitout[0]
self.covmat=fitout[1]
self.sigmas=scipy.array([self.covmat[i, i] for i in range(len(self.sigmas))])
def fitfcn(x):
return fcn(self.finalparams, x)
return fitfcn
def poly(self, p, x):#both must be numpy arrays
return numpy.array([p[i]*(x**i) for i in range(p.size)]).sum(0)
def polyfit(self, datatuple, initparams, markstr='unspecified', interaction=0, maxfev=2000, weights=None):
#initparams can be an array of coefficients [constant,lin term, quad term,...] or an integer indicating the order of the polynomial
if isinstance(initparams, int):
initparams=numpy.ones(initparams+1)
else:
initparams=numpy.float32(initparams)
parnames=[]
i=0
for par in initparams:
parnames+=[''.join(('coef', `i`))]
i+=1
return self.genfit(self.poly, initparams, datatuple, markstr, parnames, interaction, maxfev, weights=weights)
def gaussianfit(self, datatuple, initparams=scipy.array([1, 0, 1]), markstr='unspecified', interaction=0, showplot=True, maxfev=2000, weights=None):
return self.genfit(self.Gaussian, initparams, datatuple, markstr, parnames=['coef', 'center', 'sigma'], interaction=interaction, maxfev=maxfev, weights=weights)
def gaussian(self, p, x):
return p[0]*scipy.exp(-0.5*((x-p[1])/p[2])**2)
def lorentzianfit(self, datatuple, initparams=scipy.array([1, 0, 1]), markstr='unspecified', interaction=0, showplot=True, maxfev=2000, weights=None):
return self.genfit(self, self.Lorentzian, initparams, datatuple, markstr, parnames=['coef', 'center', 'gamma'], interaction=interaction, maxfev=maxfev, weights=weights)
def lorentzian(self, p, x):
return (p[0]/scipy.pi)*p[2]/((x-p[1])**2+p[2]**2)
def hannsmooth(x, window):
side=window//2
s=numpy.r_[2*x[0]-x[side-1:0:-1],x,2*x[-1]-x[-2:-1*side-1:-1]]
win=numpy.hanning(2*side+1)
win/=win.sum()
return numpy.convolve(win,s,mode='same')[side-1:1-side]
def savgolsmooth(x, window, order = 4, dx=1.0, deriv=0): #based on scipy cookbook
side=numpy.uint16(window//2)
s=numpy.r_[2*x[0]-x[side:0:-1],x,2*x[-1]-x[-2:-1*side-2:-1]]
# a second order polynomal has 3 coefficients
b = numpy.mat([[k**i for i in range(order+1)] for k in range(-1*side, side+1)])
m = numpy.linalg.pinv(b).A[deriv] #this gives the dth ? of the base array (.A) of the pseudoinverse of b
# precompute the offset values for better performance
offsets = range(-1*side, side+1)
offset_data = zip(offsets, m)
smooth_data=[numpy.array([(weight * s[i + offset]) for offset, weight in offset_data]).sum() for i in xrange(side, len(s) - side)]
smooth_data=numpy.array(smooth_data)/(dx**deriv)
return smooth_data
def wellspacedgrid(numpts, yvals=False): #numpts must be a perfect square
sn=numpy.sqrt(numpts)
temp=numpy.r_[numpy.array(range(numpts-1))*sn/(numpts-1.0)%1, 1]
temp=temp.reshape((sn, sn))
temp2=copy.copy(temp)
for k in range(numpy.uint16(sn//4)):
temp[2*k+1, :]=temp2[-2*k-2, :]
temp[-2*k-2, :]=temp2[2*k+1, :]
xvals=temp.flatten()
if yvals:
temp3=numpy.array(numpy.array(range(sn)))/(sn-1)
temp4=numpy.zeros(sn)
return(xvals, numpy.add.outer(temp3, temp4).flatten())
else:
return xvals
def bckndmincurve(allqvals, allivals, delq=None, maxcurv=16.2, derivatepoints=5): #maxcurv is the maximum negative curvature in real units
derside=derivatepoints//2
dq=1.0*(allqvals[1]-allqvals[0])
numq=allqvals.size
if delq is None:
qvals=allqvals
ivals=allivals
dindex=1
else:
dindex=numpy.uint16(numpy.round(delq/dq))
qvals=allqvals[0:numq:dindex]
ivals=allivals[0:numq:dindex]
dq*=dindex
numq=qvals.size
dcurv=maxcurv/10.0 #this is how much curvature can be added to a given point in any one move
dcurvfin=maxcurv/2.0
winside_iter=2**numpy.array(range(numpy.uint16(numpy.log2(numq/2)//1),-2,-1))
bvals=numpy.zeros(qvals.size)
count=0
for winside in winside_iter:
if winside==0:
qindpts=numpy.array(range(numq)) #qindpts is the center values where pixels will ba added
else:
qindpts=numpy.uint16(range(0, numq, 2*winside))
windowlen=winside*2+1
if winside<2:
if dindex>1:
winside=1
window=numpy.array([0, dcurvfin*12.0*dq**2/30, 0])
else:
winside=0
window=numpy.array([dcurvfin*12.0*dq**2/30])
else:
windowpeak=dcurv*((windowlen-1.0)*dq)**2/(19.74) #the 19.74 is for hanning window
window=numpy.hanning(windowlen)*windowpeak
repeat=True
count=0
firstqint=qindpts[1]-qindpts[0]
while repeat:
qindpts+=numpy.uint16(((firstqint*count/numpy.pi)%firstqint)//1)
qindpts%=(numq-1)
count+=1
repeat=False
for pt in qindpts:
ind_2=pt-winside-derside
if ind_2<0:
ind_2=0
ind_1=pt-winside
if ind_1<0:
ind_1=0
ind1=pt+winside+1
if ind1>numq:
ind1=numq
ind2=pt+winside+derside+1
if ind2>numq:
ind2=numq
if (ind2-ind_2)<(2*derside+1):
if ind_2==0:
ind2+=derside
else:
ind_2-=derside
a=winside-(pt-ind_1)
b=winside+(ind1-pt)
winsub=copy.copy(window)
winsub=winsub[a:b]
winfill_=numpy.zeros(ind_1-ind_2)
winfill=numpy.zeros(ind2-ind1)
redfrac=1
if dindex>1: #this if is to make sure no ivals get zeroed in between bcknd points
newb=bvals[ind_1:ind1]+winsub
interpbwinsub=numpy.multiply.outer(winsub,numpy.array(range(dindex,0,-1))/(1.0*dindex)).flatten()[:1-dindex]+numpy.append(numpy.multiply.outer(winsub[1:],numpy.array(range(dindex))/(1.0*dindex)).flatten(),0)
interpnewb=numpy.multiply.outer(newb,numpy.array(range(dindex,0,-1))/(1.0*dindex)).flatten()[:1-dindex]+numpy.append(numpy.multiply.outer(newb[1:],numpy.array(range(dindex))/(1.0*dindex)).flatten(),0)
ivalscompare=allivals[ind_1*dindex:ind_1*dindex+interpnewb.size]
if (ivalscompare<interpnewb).sum()>0:
itemp=numpy.where(interpbwinsub>0)
redfrac=(1.0-numpy.max((interpnewb[itemp]-ivalscompare[itemp])/interpbwinsub[itemp]))
else:
if (ivals[ind_1:ind1]<(bvals[ind_1:ind1]+winsub)).sum()>0:
redfrac=(1.0-numpy.max(((bvals[ind_1:ind1]+winsub)-ivals[ind_1:ind1])/winsub))
if redfrac>0.1:
winsub*=redfrac
if numpy.min(secder(bvals[ind_2:ind2]+numpy.r_[winfill_, winsub, winfill], dq))>-maxcurv:
bvals[ind_1:ind1]+=winsub
repeat=True
if not allqvals[-1] in qvals:
qvals=numpy.append(qvals, allqvals[-1])
bvals=numpy.append(bvals, allivals[-1])
else:
qvals[-1]=allqvals[-1]
bvals[-1]=allivals[-1]
if allqvals.size!=qvals.size:
if numpy.any(bvals)!=0.:
# binterpolator=scipy.interpolate.UnivariateSpline(qvals, bvals)
# bvals=binterpolator(allqvals)
bvals=scipy.interpolate.spline(qvals, bvals, allqvals)
else:
bvals=allqvals*0.
return bvals
def bcknd1dprogram(qgrid, ivals, attrdictORangles=None, smoothqwindow=0.5, cubiccritfrac=[.3, .3, .3, .3], maxcurvqinterval=0.4, maxcurv=16.2, returnall=False):
#cubiccritfrac is the fraction of points above the cubic bcknd fit are removed from the next fit
#critfrac is the fraction of points above the bcknd fit are removed from the next fit, the length gives the number of fit iterations
#attrdictORtuple : can be attrdict and if not then an array of powdersolidangle values same length as qvals
qvals=q_qgrid_ind(qgrid)
notnaninds=numpy.where(numpy.logical_not(numpy.isnan(ivals)))
qvals=qvals[notnaninds]
ivals=ivals[notnaninds]
if not isinstance(cubiccritfrac, list):
cubiccritfrac=[cubiccritfrac, cubiccritfrac, cubiccritfrac, cubiccritfrac] #if user sends just the critical fraction then default to 4 iterations using that fraction
if isinstance(attrdictORangles, dict):
L=attrdictORangles['cal'][2]
wl=attrdictORangles['wavelength']
psize=attrdictORangles['psize']
angles=powdersolidangle_q(qvals, L, wl, psize=psize)
elif isinstance(attrdictORangles, numpy.ndarray):
angles=attrdictORangles
else:
angles=numpy.ones(ivals.shape, dtype='float32')
fraczeroed=0.0
dq=qgrid[1]
smwin=(1.0*smoothqwindow/dq//2)*2+1
qv=qvals
iv=ivals/angles
qv_0=qv[:]
iv_0=iv[:]
def fitfcn(x):
return 50.0+5.0*x-0.04*(x**2)
bfit=numpy.zeros(len(qv), dtype='float32')
for cf in cubiccritfrac:
fit=fitfcns()
fitfcn=fit.polyfit((qv, iv), [fitfcn(0), (4.0*fitfcn(1.0)-fitfcn(2.0))/2.0, (fitfcn(2.0)-fitfcn(1.0))/2.0])
if fit.error:
break
frachigh=(iv-fitfcn(qv))/fitfcn(qv)
frachighcritval=numpy.sort(frachigh)[(1-cf)*frachigh.size//1]
qv=qv[frachigh<frachighcritval]
iv=iv[frachigh<frachighcritval]
if not qv_0[-1] in qv:
qv=numpy.append(qv, qv_0[-1])
iv=numpy.append(iv, iv_0[-1])
if not qv_0[0] in qv:
qv=numpy.append(qv_0[0], qv)
iv=numpy.append(iv_0[0], iv)
bfit=fitfcn(qv_0)
bfit[bfit<0]=0
numzeroed=fraczeroed*qv_0.size
count=0
while numpy.array(bfit>iv_0).sum()>numzeroed:
bfit*=0.98
count+=1
if count>1000:
bfit=0.0*bfit
break
iv_1=iv_0-bfit
iv_2=savgolsmooth(iv_1, smwin, order=4, deriv=0)
iv_2[iv_2<0]=0
bvals=bckndmincurve(qv_0, iv_2, delq=maxcurvqinterval, maxcurv=maxcurv)
iv_3=iv_2-bvals
iv_3[iv_3<0]=0
if returnall:
return (iv_3, iv_2, iv_1, iv_0,bvals, bfit, angles, notnaninds)
else:
ireturn=numpy.ones(qgrid[2], dtype='float32')*numpy.nan
print iv_3.shape, ireturn.shape, len(notnaninds[0])
ireturn[notnaninds]=numpy.float32(iv_3)[:]
return ireturn
def qqnorm_gen(qq, critcounts=1.0):
qqnorm=numpy.zeros(qq.shape, dtype=numpy.float32)
qq[qq<critcounts]=0.0
#return numpy.array([[qq[i, j]/numpy.sqrt(qq[i, i]*qq[j, j]) for j in range(qq.shape[1]) if qq[i, i]*qq[j, j]>0] for i in range(qq.shape[0])])
size=qq.shape[0]
diagarr=numpy.array([numpy.ones(size)*qq[i, i] for i in range(size)])
indeces=qq*diagarr*diagarr.T>0
qqnorm[indeces]=qq[indeces]/numpy.sqrt(diagarr[indeces]*diagarr.T[indeces])
return qqnorm
def arrayzeroind2d(arr): #finds the indeces where the surface of array values is zero near those indeces, excluding exterior indeces - if arr is a list or tuple of arrays then has to be zero in all of them
if isinstance(arr, numpy.ndarray):
arr=[arr]
elif isinstance(arr, tuple):
arr=[arr[0]]
neighsumsq=[]
for A in arr:
signarr=numpy.sign(A)
siz=A.size #if they are not all the same size there will be an error
neighsumsq.append((signarr[1:-1, 1:-1]+signarr[1:-1, 0:-2]+signarr[1:-1, 2:siz]+signarr[0:-2, 1:-1]+signarr[2:siz, 1:-1])**2)#sum of self plus 4 neighbors for the interior of the array
s=''
nss=neighsumsq
for i in range(len(nss)):
s=''.join((s, '(nss[', `i`, ']<25)&'))
zeroind=numpy.where(eval(s[:-1]))
return (zeroind[0]+1, zeroind[1]+1)#adding 1 to each part of the indeces tuple changes interior indecesd to full array indeces
def clustercoords_radius(coordlist, critdistsq): #takes a list (or array) of coords , i.e. [(1,2),(3,4)]and those within critical radius of each other, replaces them with the centroid of that group
#assumes distance less than 10^5
if len(coordlist)==0:
print 'CANNOT CLUSTER EMPTY LIST OF POINTS'
return numpy.array([])
coordlist=numpy.array(coordlist)
#print 'initial coords ', len(coordlist)
sepsq=numpy.array([[(a[1]-b[1])**2.0+(a[0]-b[0])**2.0 for a in coordlist] for b in coordlist])#array where i,j is the distance between points indsepsq[i] and indsepsq[j]
sepsqnozero=sepsq+numpy.eye(coordlist.shape[0], coordlist.shape[0])*10.0**10
coordindclusters=[]
coordstobeadded=[]
while sepsqnozero.min()<critdistsq:
temp=myargmin(sepsqnozero)
ind1, ind2=temp//sepsq.shape[0],temp%sepsq.shape[0]#indeces such that the minimum separation is between coords indsepsq[ind1] and indsepsq[ind2]
indgrp1=set(numpy.where(sepsq[ind1, :]<critdistsq)[0]) #this is all indeces that correspond to coords within critical radius of indsepsq[ind1], which will include the zero separation of ind1
indgrp2=set(numpy.where(sepsq[ind2, :]<critdistsq)[0])
indgrp=indgrp1|indgrp2#union to get rid of duplicates. this union means we group together everthing within critical radius of either coords which "overestimates" the critical radius but avoid calculating a centroid that later must be combined with other coords
grpcentroid=tuple(numpy.array([coordlist[i] for i in indgrp]).mean(axis=0))
sepsqnozero[list(indgrp), :]=10.0**10
sepsqnozero[:, list(indgrp) ]=10.0**10
coordindclusters+=[list(indgrp)]
coordstobeadded+=[grpcentroid]
#at the end of the loop, coordstobeadded is a list of the cnetroids with each corresponding to a list in coordindclusters, which has the indeces of the coords in the original coordlist
coordindtoberemoved=set([])
for ls in coordindclusters:
coordindtoberemoved=coordindtoberemoved|set(ls)
individualcoords=list(coordlist[list(set(range(coordlist.shape[0]))-coordindtoberemoved), :])
allcentroids=coordstobeadded+individualcoords
coordclusters=[coordlist[indlist, :] for indlist in coordindclusters]
coordclusters+=[[coord] for coord in individualcoords]
#print 'after clustering ', len(allcentroids)
return (allcentroids, coordclusters) #2d array, first index gives you the ith coordinate i.e. coordlist[0] is array(4,5) REMEMBER TO ROUND THE CENTROIDS IF YOU WANT INTEGERS
def maxwithincluster(arr, clusters, centroids, border=0):
#cluster is a list of lists of [x,y] indeces and for each list, centroid is the [x,y] of the centroid. if clusters are indeces, centroid need not eb integer
xlen=arr.shape[0]
ylen=arr.shape[1]
maxcoords=[]
for tup in zip(clusters, centroids):
clust=tup[0]
cent=tup[1]
xcoords, ycoords=numpy.array(clust).T
xlow=numpy.uint16(max(0, min(xcoords)-border))
xhigh=numpy.uint16(min(xlen, max(xcoords)+border+1))
ylow=numpy.uint16(max(0, min(ycoords)-border))
yhigh=numpy.uint16(min(ylen, max(ycoords)+border+1))
maxval=arr[xlow:xhigh, ylow:yhigh].max()
posns=numpy.where(arr[xlow:xhigh, ylow:yhigh]==maxval)
mindistcent=myargmin(numpy.array([(cent[0]-xlow-posns[0][i])**2+(cent[1]-ylow-posns[1][i])**2 for i in range(len(posns[0]))]))
maxcoords+=[[posns[0][mindistcent]+xlow, posns[1][mindistcent]+ylow]]
temp=zip(*maxcoords) #this now gets the intesection of the point indeces but it is a list of tuples instead of a tuple of ndarrays
return ((numpy.uint16(numpy.array(temp[0])), numpy.uint16(numpy.array(temp[1]))), maxcoords)
#the sceond elements of the return tuple is the list of [x,y] list where the maximum value within the rectangle defined by the extermes of the cluster points+border is located. in the even of multiple coords with same max value, the one closest to the centroid is given. the first element is the same info except a tuple of xvalarray,yvalarray
def arraypeaksearch(arr, ciss=0.1, belowdiagonal=False, dx=1, critcurvature=0, critvalue=None): #return tuple of arrays with x,y coords where there is a positive peak
qq_x=arrayder_x(arr, dx) #don't worry about dx in the derivatives becuase they all cancel out or magnitude doesn't matter
qq_y=arrayder_y(arr, dx)
qq_xx=arrayder_xx(arr, dx)
qq_yy=arrayder_yy(arr, dx)
qq_xy=arrayder_xy(arr, dx)
firstderind=arrayzeroind2d([qq_x, qq_y])
secderind=numpy.where((qq_xx*qq_yy>(qq_xy)**2)&(qq_xx<critcurvature)&(qq_yy<critcurvature))
firstderindset=set(zip(firstderind[0], firstderind[1]))
secderindset=set(zip(secderind[0], secderind[1]))
if critvalue is None:
pklist=numpy.array(list(firstderindset&secderindset))
else:
valind=numpy.where(arr>critvalue)
valset=set(zip(valind[0], valind[1]))
pklist=numpy.array(list(firstderindset&secderindset&valset))
if len(pklist)==0:
print 'NO PEAKS FOUND'
return None
else:
if belowdiagonal:
pklist=pklist[pklist.T[0]>=pklist.T[1]-numpy.sqrt(ciss)]
clustcoords=clustercoords_radius(pklist, ciss)
temp=zip(*clustcoords[0]) #this now gets the intesection of the point indeces but it is a list of tuples instead of a tuple of ndarrays
return ((numpy.uint16(numpy.round(numpy.array(temp[0]))), numpy.uint16(numpy.round(numpy.array(temp[1])))), numpy.round(clustcoords[0]), clustcoords[1])
#returns cluster, 1st element is a cluster of 2 arrays, the x and y indeces of peaks, the ith x and ith y give the centroid of the ith list of indeces in the third element of the return cluster
#the second element has the same information as the first but it is a array of an [x,y] array
def peakbounds(arr, peakcenters, maxHWHM, critfracofpeak=0.5):
#take subsets of arr and perform Gaussian fitting. the rectangular subsets include a border of sigmaindlimit*sigmas around the starts centers
#fits to gaussians with zero offset, that is at a few sigmas away from center, the value is zero and the value is bigger than zero at each starting center
#assumes sigmas*sigmalimit > ctrdevlimit so that the limit on a coordinate is its start position +- ctrdevlim without worry this will go out of the fitarr rectangle
xlen=arr.shape[0]
ylen=arr.shape[1]
peakcenterbounds=[]
for coord in peakcenters: #one gaussian for each coord
xlow=numpy.uint16(max(0, coord[0]-maxHWHM))
xhigh=numpy.uint16(min(xlen, coord[0]+maxHWHM+1))
ylow=numpy.uint16(max(0, coord[1]-maxHWHM))
yhigh=numpy.uint16(min(ylen, coord[1]+maxHWHM+1))
critval=arr[coord[0], coord[1]]*critfracofpeak
searchsig_x=arr[coord[0]:xlow:-1, coord[1]]
searchsigx=arr[coord[0]:xhigh, coord[1]]
searchsig_y=arr[coord[0], coord[1]:ylow:-1]
searchsigy=arr[coord[0], coord[1]:yhigh]
searcharrs=[searchsig_x, searchsigx, searchsig_y, searchsigy]
#each of these arrays will be zero when the value drops below a third of the peak, this is roughly sigma indeces away from the center
HWHM=[]
HWHMbycritval=[]
defaults=[xlow, xhigh-1, ylow, yhigh-1]
startcoord=[coord[0], coord[0], coord[1], coord[1]]
direction=[-1, 1, -1, 1]
for ardef in zip(searcharrs, defaults, startcoord, direction):
sar=ardef[0]
temp=numpy.where(sar//critval==0)
if len(sar)<3:
dsar=numpy.zeros(len(sar))
else:
dsar=numpy.array([sar[i]<sar[i+1] and sar[i+1]<sar[i+2] for i in range(len(sar)-2)]+[sar[-2]<sar[-1]])
temp2=numpy.where(dsar==1)
if len(temp[0])==0: #the zero was not found before running into the end of the fitarray so go to max sigma
HWHMbycritval+=[0]
if len(temp2[0])==0: #the value keep decreasing but never get to below critical value
HWHM+=[ardef[1]]
else:
HWHM+=[ardef[2]+ardef[3]*temp2[0][0]]
else:
selectind=temp[0][0]
if len(temp2[0])>0:
selectind=min(temp[0][0], temp2[0][0]) #in the range looked at, the critical value is reached and valuyes start increasing, use whichever index comes first
if selectind==temp[0][0]:
HWHMbycritval+=[1]
else:
HWHMbycritval+=[0]
HWHM+=[ardef[2]+ardef[3]*selectind]
peakcenterbounds+=[list(coord)+HWHM+HWHMbycritval]
return peakcenterbounds # each element is a list [xcenter,ycenter, x-,x+,y-,y+,boolx-,x+,y=,y+] where e.g. y+ is the index where the array has dropped below critfrac of value or started increasing or rwached end of array. the bools are a list of 4 0's or 1's, 1 if the sigma was determined by crossing the value vice default of due to increasing
def qqpeakdiagnostics(qq, qqnorm, peakcenterbounds):
fullpeakinfo=[]
for cs in peakcenterbounds:
pk=qq[cs[0], cs[1]]
qqrectangle=qq[cs[2]:cs[3]+1, cs[4]:cs[5]+1]
s=qqrectangle.sum()
normave=(qqnorm[cs[2]:cs[3]+1, cs[4]:cs[5]+1]*qqrectangle/s).sum()
fullpeakinfo+=[(cs, [pk, s, normave])]
return fullpeakinfo #list each element is a tuple, first tuple element is from peakceneterbounds, second is a list of the peak value, integral of peak value over the rectangle, and the expected value of qqnorm using the qqarray rectangle as a probabiliy distribution.
def makegreyplotimage(arr, logcounts=True):
if logcounts:
minnonzero=arr[arr>0].min()
arr[arr<=0]=minnonzero
arr=numpy.log10(arr)
maxcts=arr.max()
mincts=arr.min()
plotarr=numpy.array([[[1-(1.0*val-mincts)/(maxcts-mincts)]*3 for val in col] for col in arr]) #grayscale the counts
return (plotarr, (mincts, maxcts))
def makeqqnormpeakplotimage(arr, qqpktuplist, logcounts=True):
cenpix=3
qqnormave=numpy.array([a[1][2] for a in qqpktuplist])
# qqmin=qqnormave.min()
# qqmax=qqnormave.max()
# qqnormave=(1.0*qqnormave-qqmin)/(qqmax-qqmin)
qqnacolor=numpy.array([numpy.ones(len(qqnormave)), qqnormave, numpy.zeros(len(qqnormave))]).T
xcen=numpy.array([a[0][0] for a in qqpktuplist])
ycen=numpy.array([a[0][1] for a in qqpktuplist])
if logcounts:
minnonzero=arr[arr>0].min()
arr[arr<=0]=minnonzero
arr=numpy.log10(arr)
maxcts=arr.max()
mincts=arr.min()
plotarr=numpy.array([[[1-(1.0*val-mincts)/(maxcts-mincts)]*3 for val in col] for col in arr]) #grayscale the counts
for cd in zip(xcen, ycen, qqnacolor):
plotarr[cd[0], max(cd[1]-cenpix, 0):cd[1]+cenpix+1, :]=cd[2][:]#color the expected value of the qqnorm from blue to red
plotarr[max(cd[0]-cenpix, 0):cd[0]+cenpix+1, cd[1], :]=cd[2][:]
b=numpy.array([0.0, 0.0, 1.0])
boxplotlistx=[[a[0][2], a[0][3], a[0][3], a[0][2], a[0][2]] for a in qqpktuplist]
boxplotlisty=[[a[0][4], a[0][4], a[0][5], a[0][5], a[0][4]] for a in qqpktuplist]
for cds in zip(boxplotlistx, boxplotlisty):
plotarr[cds[0][0]:cds[0][1], cds[1][1], :]=numpy.array([b]*(cds[0][1]-cds[0][0]))
plotarr[cds[0][2], cds[1][1]:cds[1][2], :]=numpy.array([b]*(cds[1][2]-cds[1][1]))
plotarr[cds[0][3]:cds[0][2], cds[1][3], :]=numpy.array([b]*(cds[0][2]-cds[0][3]))
plotarr[cds[0][4], cds[1][4]:cds[1][3], :]=numpy.array([b]*(cds[1][3]-cds[1][4]))
return (plotarr, (mincts, maxcts))
def arrayzeroind1d(arr, postoneg=False):
sarr=numpy.sign(arr)
if postoneg:
zeroind=numpy.where(sarr[:-1]>sarr[1:])[0]
else:
zeroind=numpy.where(sarr[:-1]*sarr[1:]<=0)[0]
return (1.0*zeroind*arr[(zeroind+1,)]-(zeroind+1)*arr[(zeroind,)])/(arr[(zeroind+1,)]-arr[(zeroind,)]) #returns array of the floating point "index" linear interpolation between 2 indeces
def clustercoords1d(pkind, critqsepind):#results will be sorted
pkind.sort()
newpks=[]
i=0
while i <(len(pkind)-1):
if (pkind[i+1]-pkind[i])<critqsepind:
newpks+=[(pkind[i+1]+pkind[i])/2.0]
i+=2
else:
newpks+=[pkind[i]]
i+=1
return newpks #not exactly centroid but close enough
def clustercoordsbymax1d(arr, pkind, critqsepind):#results will be sorted. wherever there are peak indeces too close together. the peak index next to the peak index with highest arr value gets removed
pkind.sort()
indindslow=numpy.where((pkind[1:]-pkind[:-1])<critqsepind)[0]
indindshigh=indindslow+1
while indindslow.size>0:
maxindindindlow=myargmax(arr[pkind[(indindslow,)]])
maxindindindhigh=myargmax(arr[pkind[(indindshigh,)]])
if arr[pkind[indindslow[maxindindindlow]]]>arr[pkind[indindshigh[maxindindindhigh]]]:
pkind=numpy.delete(pkind, indindshigh[maxindindindlow])
else:
pkind=numpy.delete(pkind, indindslow[maxindindindhigh])
indindslow=numpy.where((pkind[1:]-pkind[:-1])<critqsepind)[0]
indindshigh=indindslow+1
return pkind
def peaksearch1d(innn, dx=.1, critcounts=10, critqsepind=5, critcurve=None, max_withincritsep=False): #dx is delta q for one index. zeros of the first derivative of inn are grouped together if within critsepind. only negative slope in the firstder is used so no secder is necessary unless specify a critical curvature in count nm^2
ifirstder=firstder(innn, dx)
zeroind=arrayzeroind1d(ifirstder, postoneg=True)
temp=numpy.where(innn[(numpy.uint16(numpy.round(zeroind)),)]>critcounts)
fullpkind=zeroind[temp]
if fullpkind.size==0:
return fullpkind
if max_withincritsep:
pkind=clustercoordsbymax1d(innn, numpy.uint16(numpy.round(fullpkind)), critqsepind)
else:
pkind=clustercoords1d(fullpkind, critqsepind)#these pk indeces are floating point!!!
if critcurve is not None:
isecder=secder(innn, dx)
temp=numpy.where(isecder[(numpy.uint16(numpy.round(pkind)),)]<(-1*critcurve))
pkind=numpy.array(pkind)[temp]
pkind=list(pkind)
pkind.reverse()#highest to smallest for pairing below
return numpy.array(pkind, dtype=numpy.float32)
def qqindeces_innn(knnn, qgrid, qgrid_qq, qqpktuplist, qqsigmasep=3.0, qqindsep=None, qqanisoalloyfrac=(0.01, 0.05)): #if qqindsep is a string, will assume separation scales with given fraction of 1/q=frac of d. if absolute qq then indeces assumed to be in innn indeces, i.e. spaced by qgrid[1]
#the base units are indeces in the 1d spectra, ignore peaks outside of the index range or qq
startind=(qgrid_qq[0]-qgrid[0])//qgrid[1] #assume this is positive, can'e build qq over a larger range than that available in innn
indratio=qgrid_qq[1]//qgrid[1]
endind=startind+(qgrid_qq[2]-1)*indratio #start and end indeces for the 1d images using the range of the qq analysis
pkind=knnn[(knnn>startind)&(knnn<endind)]#throw away peak positions outside of the range used in qq
def ind1d_ind2d(ind2d):
returnnumber=False
if isinstance(ind2d, float) or isinstance(ind2d, int):
ind2d=list(ind2d)
returnnumber=True
ind2d=numpy.array(ind2d)
ind1d=ind2d*indratio+startind
if returnnumber:
return ind1d[0]
else:
return list(ind1d)
# qqpkinfosub=[]
# origind_subind=[]
# count=-1
# for tup in zip(qqpks, qqpkinfo):
# count+=1
# if tup[1][2]>critqqnorm:
# qqpkinfosub+=[tup] #pkinfosub now has both qqpks and qqpkinfo in it
# origind_subind+=[count]#array indexed like qqpkinfosub with values equal to the index of the original qqpk arrays
qqpkcenqqunits=[tup[0][0:2] for tup in qqpktuplist]
qqpkcen=[ind1d_ind2d(tup[0][0:2]) for tup in qqpktuplist]
qqsigs=[list(numpy.abs(numpy.int32(tup[0][2:6])-numpy.int32([tup[0][0]]*2+[tup[0][1]]*2))*indratio) for tup in qqpktuplist] #the*2 is to repeat x,x,y,y
qqpkbools=numpy.array([tup[0][6:10] for tup in qqpktuplist])
qqnorm=numpy.array([tup[1][2] for tup in qqpktuplist])
qqpkvolume=numpy.array([tup[1][1] for tup in qqpktuplist])
if not (qqindsep is None):
qqcritsep=numpy.ones(range(len(pkind)), dtype=numpy.float32)*qqindsep #rememebr this is already in innn indeces
else:
alpha,beta=qqanisoalloyfrac
qqx=numpy.array(q_qgrid_ind(qgrid_qq, numpy.array([cd[0] for cd in qqpkcen])))#DEAL WITH THE ACTUAL Q VALUES FOR THIS - may be more efficient way to do it
qqy=numpy.array(q_qgrid_ind(qgrid_qq, numpy.array([cd[1] for cd in qqpkcen])))
K=alpha**2*beta**2*qqx*qqy-alpha**4*(qqx**2+qqy**2+2.0*qqx*qqy)
L=2*alpha**2*(qqx+qqy)
r=-1.0*qqx*(beta**2*qqy-L)/(qqy*(beta**2*qqx-L))
s=-0.25*(L**2-(2.0*alpha*beta*qqx)**2-(2.0*alpha*beta)**2*qqx*qqy+beta**4*qqx**2)/(beta**2*qqx**2*K)
t=-0.25*(4*alpha**2-beta**2)/K
AA=(t+s)*qgrid[1]**2
BB=(t+r**2*s)*qgrid[1]**2
CC=2*(t+r*s)*qgrid[1]**2
unassignedpkindind=set(range(len(pkind))) #set of all indeces of pkind
qq_innn_dicts=[]
qqindsets_pkind=[set([]) for p in pkind]
for qi in range(len(pkind)-1):
for qj in range(qi+1, len(pkind)): #this assure qi!=qj, qi>qj
qhigh=pkind[qi]
qlow=pkind[qj]
xqqsep=numpy.array([qhigh-cd[0] for cd in qqpkcen])
yqqsep=numpy.array([qlow-cd[1] for cd in qqpkcen])
xsigind=numpy.uint16([(numpy.sign(x)+1)//2 for x in xqqsep])
ysigind=numpy.uint16([(numpy.sign(y)+1)//2+2 for y in yqqsep])
qqwheresig=numpy.where(numpy.array([qqpkbools[i][xsigind[i]]+qqpkbools[i][ysigind[i]] for i in range(len(qqpkbools))])==2)#both bools for the respective x direction and y direction must have been determined by critical counts for a sigma measurement to be made
xsig=numpy.array([qqsigs[i][xsigind[i]] for i in range(len(qqsigs))])
ysig=numpy.array([qqsigs[i][ysigind[i]] for i in range(len(qqsigs))])
xsig[xsig==0]=0.001#just to avoid div by zero
ysig[ysig==0]=0.001
insertqqind=numpy.zeros(len(qqpkbools))
if not qqindsep is None:
withindefaultlim=(xqqsep**2+yqqsep**2)<(qqcritsep[qi]*numpy.sqrt(xqqsep**2+yqqsep**2))
else:
withindefaultlim=(AA*xqqsep**2+BB*yqqsep**2+CC*numpy.abs(xqqsep*yqqsep))<1.0
withinsiglim=(xqqsep**2/xsig+yqqsep**2/ysig)<(qqsigmasep*numpy.sqrt(xqqsep**2+yqqsep**2)) #the mathematical form is to wieght the sigma deviations by the dot products with the vector separation implied by x,yqqsep
insertqqind=withindefaultlim
insertqqind[qqwheresig]=withinsiglim[qqwheresig]
indtoadd=numpy.where(insertqqind==True)[0]
for ind in indtoadd:
delsig=None
delqind=None
temp=numpy.sqrt(xqqsep[ind]**2+yqqsep[ind]**2)
if ind in qqwheresig[0]:
if temp==0:
delsig=temp
else:
delsig=(xqqsep[ind]**2/xsig[ind]+yqqsep[ind]**2/ysig[ind])/temp
else:
if temp==0:
delqind=temp
else:
delqind=(xqqsep[ind]**2+yqqsep[ind]**2)/temp
qq_innn_dicts+=[{'qqpkind':ind, 'qqcenteriind':ind1d_ind2d(qqpkcenqqunits[ind]),'qqcenterqqind':qqpkcenqqunits[ind], 'qindhighlow':(qhigh, qlow), 'kindhighlow':(qi, qj), 'delsig':delsig, 'delqind':delqind, 'qqpkvolume':qqpkvolume[ind], 'qqpknorm':qqnorm[ind]}]#all indeces are in units of 1dint (and floating point) except for qqcenter
unassignedpkindind-=set([qi, qj])
qqindsets_pkind[qi]|=set([ind])
qqindsets_pkind[qj]|=set([ind])
unassignedpkind=[pkind[i] for i in unassignedpkindind]
return (qq_innn_dicts, qqindsets_pkind, unassignedpkind) #list: dictionary for every qqindex in innn, list: for each pkind, a set of the qqindeces that contain that pkind, set of pkind from innn that are not in any of the indeces
def outerlist_order_norepeat(tup1,tup2):
# print tup1, '*', tup2
return [tuple(min([t1, t2],[t2, t1])) for t1 in tup1 for t2 in tup2 if t1!=t2] #since knnn is ordered highest to lowest, we want everything to be order (a,b) where a<b so that ifnnn_a>ifnnn_b
def ktupphases_qqind(ktuplist, critnumipks=0): #list of tuples (kindhigh,kindlow)
# def numkinphase(phasetupset):
# tempset=set([])
# for tempi in phasetupset:#if there are a bunch of indeces in here but they all have the same qpk values..well only the number of unique qpk values counts
# tempset|=set(tempi)
# return len(tempset)
# def numkinphasecmp(pa,pb):
# return 1*(numkinphase(pa)>numkinphase(pb))-1*(numkinphase(pa)<numkinphase(pb))
if critnumipks<=2:
critnumtups=1
else:
critnumtups=(critnumipks)*(critnumipks-1)//2 # i.e. "n choose 2"
def phasesizecmp(pa,pb):
return 1*(len(pa)>len(pb))-1*(len(pa)<len(pb))
# def crosstuplesthatexists(tup1, tup2):
# return [crosstup for crosstup in outerlist_order_norepeat(tup1,tup2) if crosstup in qqpkind_ktupDICT.keys() ]
def tuplistallexist(tuplist):
return all([(tup1 in ktuplist) for tup1 in tuplist])
phases=[]
print 'ktups', len(ktuplist)
for ktup in ktuplist:
phases.sort(cmp=phasesizecmp) #puts the longest phases first
if len(phases)%1==0:
print 'phases', len(phases)
for phs in phases: #phs is a pointer
# print phs, ' in ', phases
if not (ktup in phs):
hypotheticalphase=set([ktup])|phs
if sum([hypotheticalphase<=tempph for tempph in phases])==0: # if adding to this phases would not form a subset of an existing phase
crosstermtuplist=[]
for phsktup in phs:
crosstermtemp=outerlist_order_norepeat(phsktup, ktup) #list of lists of tups
if tuplistallexist(crosstermtemp):
crosstermtuplist+=crosstermtemp
else:
crosstermtemp=None
break
if not (crosstermtemp is None):
phases+=[copy.copy(phs)|set(crosstermtuplist)] #allows for subsets to exist in the building of phases to allow for branching of phases
phases+=[set([ktup])]#always make the ktup its own phases so all subsequent ktups can possible be added to it - this means at the end there will be a phase for every ktup
phases.sort(cmp=phasesizecmp, reverse=True)
i=0
while i<len(phases)-1:
if len(phases[i])>=critnumtups:
break
else:
del phases[i]
while i<len(phases)-1:
if any([phases[i]<=tempph for tempph in phases[i+1:]]): #is subset of any higher order phase
del phases[i]
else:
i+=1
return phases #list of phases, a phase is a set of tuples (kindhigh,kindlow)
def phasevecs_ktup(qqpkind_ktupDICT, kindtuplephases, qqpkindveclength, kindveclength, critnumqqpks): #phase (qqpks set) can be instanced by different tuple sets and this repititon is reflected in the returned data
qqpkind_ktupDICT
def qqindlist_ktupset(ktupset):
qqindset=set([])
for ktup in ktupset:
qqindset|=qqpkind_ktupDICT[ktup]
if len(qqindset)<critnumqqpks:
return [-1]
return list(qqindset)
def kind_ktupset(ktupset):
tempset=set([])
for ktup in ktupset:
tempset|=set(ktup)
return list(tempset)
qqindlist_phase=map(qqindlist_ktupset, kindtuplephases)
kindlist_phase=map(kind_ktupset, kindtuplephases)
while [-1] in qqindlist_phase:
del kindlist_phase[qqindlist_phase.index([-1])]
qqindlist_phase.remove([-1])
qqpkindvecindeces=numpy.uint16([[phasenum, qqind] for phasenum, qqindlist in enumerate(qqindlist_phase) for qqind in qqindlist]).T
kindvecindeces=numpy.uint16([[phasenum, kind] for phasenum, kindlist in enumerate(kindlist_phase) for kind in kindlist]).T
if len(qqindlist_phase)==0:
return (None, None)
qqpkindphasevecs=numpy.zeros((len(qqindlist_phase), qqpkindveclength), dtype=numpy.bool_)
qqpkindphasevecs[(qqpkindvecindeces[0], qqpkindvecindeces[1])]=True
kindphasevecs=numpy.zeros((len(qqindlist_phase), kindveclength), dtype=numpy.bool_)
kindphasevecs[(kindvecindeces[0], kindvecindeces[1])]=True
return (qqpkindphasevecs, kindphasevecs)
def indexarr_xzgrid(xgrid, zgrid, pointlist=None):
arr=numpy.reshape(numpy.int32(range(xgrid[2]*zgrid[2])), (xgrid[2], zgrid[2]))
if pointlist is None:
return arr
else:
kill=[[not (arr[i,j] in pointlist) for j in range(arr.shape[1])] for i in range(arr.shape[0])]
arr[numpy.where(kill)]=-1
return arr
def subsettest_boolarray(a, b): # 1 if a is a subset of b, -1 if b is a subset of a, 0 otherwise
return int(not(numpy.any(a*(a^b))))-int(not(numpy.any(b*(a^b)))) #if a is a subset of b, the excluseive or elements will be in b so a*XOR will be all zeros
def spatialblobinfo_phase(samplebool_phase, xgrid, zgrid, pointlist, radius_xpts=1, minptsinblob=3):
#on 7Feb2010: this function is probably not used anymore - if it is used, support must be added fro "USER-COMPILED' cases where xgrid,zgrid are meaningless
#img_phase index is the phase number, gives the set of img numbers that contain that phase
spacingratio=zgrid[1]/xgrid[1]
sampleindexarr=indexarr_xzgrid(xgrid, zgrid, pointlist)
mask=sampleindexarr>-1
phaseblobinfo=[]
phasenum=-1
for samplebool in samplebool_phase:
phasenum+=1
#phasemap=numpy.bool_([[imgindexarr[i,j] in imgset for j in range(imgindexarr.shape[1])] for i in range(imgindexarr.shape[0])])
phasemap=numpy.reshape(samplebool, (xgrid[2], zgrid[2]))
blobarr, blobneighfraclist = blobarrgen(phasemap, radius=radius_xpts, minptsinblob=minptsinblob, mask=mask, spacingratio=spacingratio)
blobnum=0 #intnetionall start at 1 in loop
for blobneighfrac in blobneighfraclist:
blobnum+=1
#phaseblobinfo+=[(phasenum, imgindexarr[numpy.where(blobarr==blobnum)], blobneighfrac)]
phaseblobinfo+=[(phasenum, (blobarr==blobnum).flatten(), blobneighfrac)]
return phaseblobinfo #list of tuples. each tuple is a phase,blob instance. the tuple is the index of the phase in the imgsets_phase array, the array of points in the blob (subset of pointlist), and the blob nieghbor fraction
def blobarrgen(arr, radius=1, minptsinblob=1, mask=None, spacingratio=1.0):
""" Find connected regions in a binary image.
If the image is not binary, then convert it to one
using arr > arr.min() as a criteria.
everything (e.g. radius) is in units of the spacing between x-(1st)indeces the z- or y- or 2nd index spacing is the x-spacing * spacingratio
Parameters:
arr: a numpy array with two dimensions
minptsinblob: patches less than minptsinblob pixels
will be rejected
returns [NewImage,hitsI,hitsJ]
where hitsI is a list of I indices
and hitsJ that of J indices
Newimage: an image with confluent areas marked
with the same number
The maximum of this image is the number
of areas found
"""
if arr.ndim != 2 :
print "2D arrays are required"
return
if mask is None:
mask=numpy.ones[arr.shape]
#If not a binary image, then convert to one:
if arr.max() != 1 and arr.min() != 0:
print "Not a binary image, converting to one:"
arrtmp = (arr > arr.min())*mask
else :
arrtmp = arr*mask
if arrtmp.sum < 2 :
print "Empty image"
return
blobarr = numpy.zeros((arrtmp.shape),dtype=numpy.uint16)
marker = 1
blobneighfrac=[]
#points that are eligible for blobs:
nonzerocoords = arrtmp.nonzero()
for (x,y) in zip(nonzerocoords[0],nonzerocoords[1]):
if arrtmp[x,y] != 0 :
ans = findsingleblob(arrtmp,x,y,radius, mask, spacingratio)
blobx = numpy.asarray(ans[0])
bloby = numpy.asarray(ans[1])
if len(blobx) >= minptsinblob:
blobneighfrac+=[ans[2]/(1.0*ans[2]+ans[3])] #this will only div by zero if these was a point that had no neighbors within mask
blobarr[blobx,bloby] = marker
marker = marker + 1
#zero the blob:
arrtmp[blobx, bloby] = 0
return (blobarr, blobneighfrac) #0 where there is no blob, integer deonting a blob. blobs cannot overlap
def findsingleblob(arr, x, y, radius=1, mask=None, spacingratio=1.0):
""" take a binary image, and if (x,y) is a nonzero pixel,
return a list of coordinates of the confluent area this
pixel is within.
everything (e.g. radius) is in units of the spacing between x-(1st)indeces the z- or y- or 2nd index spacing is the x-spacing * spacingratio
Based on the code from Eric S. Raymond posted to:
http://mail.python.org/pipermail/image-sig/2005-September/003559.html
Parameters:
arr : a binary image
x,y: start coordinates
Return:
[I,J] a list of indices
"""
Ni,Nj = arr.shape
if mask is None:
mask=numpy.ones[arr.shape]
arrcopy=copy.copy(arr)
edge = [[x,y]]
bloblist_x = [x]
bloblist_y = [y]
numneighlinks=0
numneighnolink=0
rng=range(-numpy.int16(numpy.floor(radius)),numpy.int16(numpy.floor(radius))+1)
xdevs=numpy.int16(numpy.outer(rng,numpy.ones(len(rng))).flatten())
ydevs=numpy.array(rng*len(rng))
loc=numpy.where(xdevs**2+(spacingratio*ydevs)**2<=radius**2)
xdevs=xdevs[loc]
ydevs=ydevs[loc]
numneigh=len(xdevs)-1
while edge:
newedge = []
#Check all pixels:
for (x,y) in edge:
neighlist=zip(list(x+xdevs),list(y+ydevs))
neighlist.remove((x,y))#remove the position of interest because it should not count as its own neighbor
arrcopy[x, y]=0
for (i,j) in neighlist :
#protect for out of range indices:
if i < 0 or j < 0 or i >= Ni or j >= Nj :
continue
elif mask[i, j]:
if arrcopy[i,j]:
arrcopy[i,j] = 0
#store the point to further examination
newedge.append((i,j))
#add it to the blob
bloblist_x.append(i)
bloblist_y.append(j)
#look at original image to see if this neighbor has same +1 value
if arr[i,j]:
numneighlinks+=1
else:
numneighnolink+=1
edge = newedge
#End of check all points (while)
return [bloblist_x, bloblist_y, numneighlinks, numneighnolink, numneigh]
#len(bloblist_x) is the number of points in the blob
#numneighlinks is twice the number of links due to double counting
#numneighnolink is comparable to numneighlinks
#numneighs*numneighlinks/(numneighlinks+numneighnolink) is the average value for number of neighbors with same +1 value
#len(bloblist_x)*numneighs+(numneighlinks+numneighnolink) is the number of times a neighbor was outside of mask
def flatten(x):
result = []
for el in x:
if isinstance(el, (list, tuple)):
result.extend(flatten(el))
else:
result.append(el)
return result
def peakinfo_pksavearr(peaks, fiterr=False, removebeyondqgrid=None):
inds=numpy.logical_not(numpy.isnan(peaks[0, :]))
if not removebeyondqgrid is None:
a, b, c=minmaxint_qgrid(removebeyondqgrid)
qvals=peaks[0, inds]
inds=inds[(qvals>a)&(qvals<b)]
if fiterr:
return peaks[0, inds], peaks[1, inds], peaks[2, inds], peaks[3, inds], peaks[4, inds], peaks[5, inds]
else:
return peaks[0, inds], peaks[1, inds], peaks[2, inds]
def waveletset1d(qgrid, qscalegrid, qposngrid):
# return numpy.float32([[[1.64795*(1.0-((q-qp)/qs)**2)*numpy.exp(-0.5*((q-qp)/qs)**2)/numpy.sqrt(2.0*numpy.pi*qs) for q in q_qgrid_ind(qgrid)] for qp in q_qgrid_ind(qposngrid)] for qs in scale_scalegrid_ind(qscalegrid)])
ans=[]
for qs in scale_scalegrid_ind(qscalegrid):
ans+=[numpy.float32([[1.64795*(1.0-((q-qp)/qs)**2)*numpy.exp(-0.5*((q-qp)/qs)**2)/numpy.sqrt(2.0*numpy.pi*qs) for q in q_qgrid_ind(qgrid)] for qp in q_qgrid_ind(qposngrid)])]
return numpy.float32(ans)
def wave1dkillfix(wave, targetenergy, dq=1):
plist=numpy.where(wave>0)
nlist=numpy.where(wave<0)
wavep=wave[plist]
waven=wave[nlist]
sump=wavep.sum()
sumn=-1.0*waven.sum()
enp=((dq*wavep)**2).sum()
enn=((dq*waven)**2).sum()
f=numpy.sqrt(targetenergy/(enp+((sump/sumn)**2)*enn))
wave[plist]*=f
wave[nlist]*=(f*sump/sumn)
return wave
def perform_ridges_wavetrans1d(wtrev, qsindlist, noiselevel, numscalesskippedinaridge=1.5):# the scale index of the wt has been reversed so that this fucntion steps from biggest to msallest wavelet scale. So the scale index of ridges is inverted from that of the previously saved wt
initpeakind=list(numpy.int16(numpy.round(peaksearch1d(wtrev[0], dx=1, critcounts=noiselevel, critqsepind=qsindlist[0], max_withincritsep=True))))#this dx no good if using curvature
ridges=[[ind]+[32767]*(wtrev.shape[0]-1) for ind in initpeakind]
for scalecount in range(1, wtrev.shape[0]):
wtrow=wtrev[scalecount, :]
peakind=list(numpy.int16(numpy.round(peaksearch1d(wtrow, dx=1, critcounts=noiselevel, critqsepind=qsindlist[scalecount], max_withincritsep=True))))
for ridgecount, ridge in enumerate(ridges):
if len(peakind)>0 and ridge[scalecount]==32767: #need peaks to assign and also if ridge forked in previous scale then that ridge has ended
temp=1
while ridge[scalecount-temp]==32767 or ridge[scalecount-temp]<0:
temp+=1
if temp-1<=numscalesskippedinaridge:
ridgerep=ridge[scalecount-temp]
# print 'PEAKIND', peakind
# print scalecount, 'RIDGE', ridge
# print 'ridgerep', ridgerep
# print 'critsep**2', (1.5*qsindlist[scalecount-1])**2
# print 'sep**2', (1.0*numpy.float32(peakind)-ridgerep)**2
closeenoughinds=list(numpy.where((1.0*numpy.float32(peakind)-ridgerep)**2<(1.5*qsindlist[scalecount-1])**2)[0])#the 1.3 loosens the contraint for associating a current peak with one from the previous (larger) qscale and thus makes mother->children associations more common
#print 'closeenoughinds1', closeenoughinds
allridgereps=numpy.float32([r[scalecount-temp] for r in ridges])
closeenoughinds=[ceind for ceind in closeenoughinds if ridgecount==myargmin((peakind[ceind]-allridgereps)**2)]#peaks are only close enough to a ridge if that ridge is the closest tot he peak
closestind=myargmin((numpy.float32(peakind)-ridgerep)**2)
if len(closeenoughinds)==1:
ridge[scalecount]=peakind.pop(closestind)
elif len(closeenoughinds)>1:
newridgestart=numpy.int16(ridge[:scalecount])
newridgestart[newridgestart!=32767]=-1*ridgecount-1
newridgestart=list(newridgestart)
closeenoughinds.sort(reverse=True) #this is imperative because otherwise the .pop() will mess things up
for ceind in closeenoughinds:
pkind=peakind.pop(ceind)
ridges+=[newridgestart+[pkind]+[32767]*(wtrev.shape[0]-scalecount-1)]
if ceind==closestind:
ridges[ridgecount]=ridge[:scalecount]+[-1*(len(ridges)-1)-1]*(len(ridge)-scalecount) #the forked ridge is fille dwith the ridge index of its closest new subridge
for pkind in peakind:
ridges+=[[32767]*scalecount+[pkind]+[32767]*(wtrev.shape[0]-scalecount-1)]
return ridges
#COPIED 12Jan2010
#def perform_ridges_wavetrans1d(wtrev, qsindlist, noiselevel, numscalesskippedinaridge=1.5):
#
# initpeakind=list(numpy.int16(numpy.round(peaksearch1d(wtrev[0], dx=1, critcounts=noiselevel, critqsepind=qsindlist[0], max_withincritsep=True))))#this dx no good if using curvature
# ridges=[[ind]+[32767]*(wtrev.shape[0]-1) for ind in initpeakind]
#
# for scalecount in range(1, wtrev.shape[0]):
# wtrow=wtrev[scalecount, :]
# peakind=list(numpy.int16(numpy.round(peaksearch1d(wtrow, dx=1, critcounts=noiselevel, critqsepind=qsindlist[scalecount], max_withincritsep=True))))
# for ridgecount, ridge in enumerate(ridges):
# if len(peakind)>0 and ridge[scalecount]==32767: #need peaks to assign and also if ridge forked in previous scale then that ridge has ended
# temp=1
# while ridge[scalecount-temp]==32767 or ridge[scalecount-temp]<0:
# temp+=1
# if temp-1<=numscalesskippedinaridge:
# ridgerep=ridge[scalecount-temp]
# closeenoughinds=list(numpy.where((1.0*numpy.float32(peakind)-ridgerep)**2<1.0*qsindlist[scalecount-1]**2)[0])
# closestind=myargmin((numpy.float32(peakind)-ridgerep)**2)
# if len(closeenoughinds)==1:
# ridge[scalecount]=peakind.pop(closestind)
# elif len(closeenoughinds)>1:
# newridgestart=numpy.int16(ridge[:scalecount])
# newridgestart[newridgestart!=32767]=-1*ridgecount-1
# newridgestart=list(newridgestart)
# closeenoughinds.sort(reverse=True) #this is imperative because otherwise the .pop() will mess things up
# for ceind in closeenoughinds:
# if ceind==closestind:
# ridges[ridgecount]=ridge[:scalecount]+[-1*(len(ridges)-1)-1]*(len(ridge)-scalecount) #the forked ridge is fille dwith the ridge index of its closest new subridge
# pkind=peakind.pop(ceind)
# ridges+=[newridgestart+[pkind]+[32767]*(wtrev.shape[0]-scalecount-1)]
#
#
# for pkind in peakind:
# ridges+=[[32767]*scalecount+[pkind]+[32767]*(wtrev.shape[0]-scalecount-1)]
# return ridges
#
#def perform_peaks_ridges1d(wt, ridges, ridgescalecritind=0, minridgelength=3):
# ridgeinds=numpy.where(((ridges!=32767).sum(axis=1)>=minridgelength)&(ridges[:, -1]!=32767))[0] #this is the ridge length including the "good" ridge components from other ridges associated through forking that has to be at least minridgelength but this is only good if the ridge goes to the smallest scale
# ridgeinds2=numpy.where(((ridges!=32767)*(ridges>=0)).sum(axis=1)>=minridgelength)[0]#this will catch the ridges that don't go to the end but are long enough on their own (not counting mother forks). mother forks ruled out later
# ridgeinds=numpy.array(list(set(ridgeinds)|set(ridgeinds2)))
# peaks=[]#list of [peak scaleind, posnind]
# if len(ridgeinds)==0:
# print 'no valid ridges'
# return []
# for ridge in ridges[ridgeinds]:
# rind=numpy.where(ridge!=32767)[0]
# if ridge[rind[-1]]>=0: #if this fails that means the ridge was forked and thus its peaks will be found in other ridges
# rind=numpy.where((ridge!=32767)&(ridge>=0))[0] #this will generally be a continuous sequence of indeces except for possibler holes of size numscalesskippedinaridge
# wtvals=(wt[(wt.shape[0]-1-rind, ridge[rind])]) #-rind inverts but resulting order is still that of rind
# indforincreasingtest=(rind>=ridgescalecritind)
# if (wtvals[indforincreasingtest][1:]>wtvals[indforincreasingtest][:-1]).sum()>0: #if a ridge wt value is bigger than its predecessor(larger scale) then the wt isn't strictly increasing with increasing qscale
# scaleind=(wt.shape[0]-1-rind[myargmax(wtvals[indforincreasingtest])]) #choose the wt scale and posn at the local maximum with largest wt
# posnind=ridge[rind[-1]]
# peaks+=[[scaleind, posnind]]
# return peaks
#
#
def perform_peaks_ridges1d(wt, ridges, ridgescalecritind=0, minridgelength=1, minchildlength=1, minridgewtsum=0., minchildwtsum=0., verbose=False):#wt scale ind is small->big but ridges is big->small and ridgescalecritind is of ridges
ridgeinds=numpy.where(((ridges!=32767).sum(axis=1)>=minridgelength)&(ridges[:, -1]!=32767))[0] #this is the ridge length including the "good" ridge components from other ridges associated through forking that has to be at least minridgelength but this is only good if the ridge goes to the smallest scale
ridgeinds2=numpy.where(((ridges!=32767)*(ridges>=0)).sum(axis=1)>=minridgelength)[0]#this will catch the ridges that don't go to the end but are long enough on their own (not counting mother forks). mother forks ruled out later
ridgeinds=numpy.array(list(set(ridgeinds)|set(ridgeinds2)))
if verbose:
print 'ridge inds passed length tests: ', ridgeinds
print ridges
peaks=[]#list of [peak scaleind, posnind]
mother_peaks=[] # every element is a tuple, the 1st elemnt is like an entry of peaks, the 2nd is a list of the children
ridgeind_peaks=[]
for count, ridge in enumerate(ridges):
rind=numpy.where(ridge!=32767)[0]
if len(rind)>0: #if this fails that means the ridge is essentially empty
motherbool=ridge[rind[-1]]<0
if verbose:
if motherbool:
tempstr='(MOTHER) '
else:
tempstr=''
print 'NEW RIDGE ', tempstr, count, ': ', ridge
print 'length test:', len(rind)>0
rind=numpy.where((ridge!=32767)&(ridge>=0))[0] #this will generally be a continuous sequence of indeces except for possibler holes of size numscalesskippedinaridge
wtvals=(wt[(wt.shape[0]-1-rind, ridge[rind])]) #-rind inverts but resulting order is still that of rind. wtvals is now the select values from wt but ordered from big->small wavelet scale
totridgewt=wtvals.sum()
ridgelen=len(rind)
motherind=motherridgeind_childridge(ridge)
if not motherind is None:
if verbose:
print 'tot wt of child test:', totridgewt, minchildwtsum, totridgewt>minchildwtsum
if totridgewt<=minchildwtsum:
continue
if verbose:
print 'length of child test:', ridgelen, minchildlength, ridgelen>=minchildlength
if ridgelen<minchildlength:
continue
mridge=ridges[motherind]
mrind=numpy.where((mridge!=32767)&(mridge>=0))[0] #this will generally be a continuous sequence of indeces except for possibler holes of size numscalesskippedinaridge
mwtvals=(wt[(wt.shape[0]-1-mrind, mridge[mrind])])
if verbose:
print 'mother ridge index: ', motherind, 'the ridge contributes ', mwtvals.sum(), " and is ", mridge
totridgewt+=mwtvals.sum()
ridgelen+=len(mrind) #if a child ridge has a mother that is the child of another mother, the wt and len from this grandmother does not count towards the total of the grandchild
if verbose:
print 'ridgelen test', ridgelen, minridgelength, ridgelen>=minridgelength
if ridgelen>=minridgelength:
indforlocalmaxtest=(rind>=ridgescalecritind) #if bigger ridge index, smaller wavelet scale (used to be called indforincreasingtest)
#if (wtvals[indforincreasingtest][1:]>wtvals[indforincreasingtest][:-1]).sum()>0: #if a ridge wt value is bigger than its predecessor(larger scale) then the wt isn't strictly increasing with increasing qscale
if verbose:
print 'the ridge indeces with wave scale less than critical:', indforlocalmaxtest
print 'any there test: ', len(wtvals[indforlocalmaxtest])>0
if len(wtvals[indforlocalmaxtest])>0:
print 'local max test: ', numpy.max(wtvals[indforlocalmaxtest])>=numpy.max(wtvals)
if len(wtvals[indforlocalmaxtest])>0 and numpy.max(wtvals[indforlocalmaxtest])>=numpy.max(wtvals): #local max is at a scale smaller than critical index - this does nto include mother ridge - the large-scale part of the ridge got to count towards the ridge length
if verbose:
print 'tot wt test:', totridgewt, minridgewtsum, totridgewt>minridgewtsum
if totridgewt>minridgewtsum:
scaleind=(wt.shape[0]-1-rind[myargmax(wtvals)]) #choose the wt scale and posn at the local maximum of wt - this does not include mother ridges. scaleind is now appropriate for wt (not ridges)
posnind=ridge[rind[-1]]#choose the position from the smallest scale in the ridge
if motherbool:
mother_peaks+=[(count, [scaleind, posnind], family_ridge(ridges, count)[2])]
else:
peaks+=[[scaleind, posnind]]
ridgeind_peaks+=[count]
ind_potentialpeaks=set(ridgeind_peaks)
for ind, pk, descendants in mother_peaks:
ind_potentialpeaks|=set([ind])
for ind, pk, descendants in mother_peaks:
if len(ind_potentialpeaks&set(descendants))==0:
peaks+=[pk]
if verbose:
print 'MOTHER RIDGE ', ind, ' BECOMES PEAK:', pk, '. The non-peak children are indeces ', descendants
else:
if verbose:
print 'MOTHER RIDGE ', ind, ' NOT A PEAK BECAUSE OF EXISTENCE OF DESCENDANTS: ', descendants
return peaks
def motherridgeind_childridge(ridge):#returns None if the ridge has no mother - assumes the ridge is indexed in decreasing order of qscale
validridgeind=numpy.where((ridge!=32767))[0]
negridgeind=numpy.where((ridge!=32767)&(ridge<0))[0]
if len(negridgeind)>0 and negridgeind[0]==validridgeind[0]:#the second condition fails if this ridge is a mother ridge that is not the child of a different ridge
return -1*ridge[negridgeind[0]]-1
else:
return None
def children_ridge(ridges, ind):#returns list of children - assumes the ridge is indexed in decreasing order of qscale
ridge=ridges[ind]
mothind=motherridgeind_childridge(ridge)
if mothind is not None:
mothset=set([mothind])
else:
mothset=set([])
children=set(numpy.where(ridges==(-1*ind-1))[0])-mothset
return sorted(list(children))
def family_ridge(ridges, ind):#returns mother (None if the ridge has no mother) and list of children - assumes the ridge is indexed in decreasing order of qscale
ridge=ridges[ind]
mothind=motherridgeind_childridge(ridge)
children=children_ridge(ridges, ind)
descendants=children
generation=children
while len(generation)>0:
nextgeneration=[]
for chind in generation:
nextgeneration+=children_ridge(ridges, chind)
generation=nextgeneration
descendants+=nextgeneration
return mothind, children, sorted(descendants)
#for testing ridges
#def scale_scalegrid_ind(scalegrid, index='all'):
# if index=='all':
# index=numpy.array(range(numpy.uint16(scalegrid[2])), dtype=numpy.float32)
# elif isinstance(index, list):
# index=numpy.array(index)
# return scalegrid[0]*(scalegrid[1]**index)
#import h5py
#
#print '^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
#h5file=h5py.File('/mnt/SharedData/CHESS2008/2008CHESSh5analysis/dummyJan2010_20081121bsub3RuPtX.dat.h5',mode='r')
#wtgrp=h5file['2/analysis/mar345/wavetrans1d']
##r19=wtgrp['ridges'][19]
##ridges=r19[numpy.array([20])]
#ridges=wtgrp['ridges'][43][:][:]
#qscalegrid=wtgrp.attrs['qscalegrid']
#ridgeqscalevals=scale_scalegrid_ind(qscalegrid)[::-1]
#ridgescalecritind=numpy.where(ridgeqscalevals<=1.2)[0]
#ridgescalecritind=ridgescalecritind[0]
#print perform_peaks_ridges1d(wtgrp['wavetrans'][43,:,:], ridges, ridgescalecritind=ridgescalecritind, minridgelength=100)
#h5file.close()
def Gaussian(pars, x):
return pars[2]*numpy.exp(-0.5*((x-pars[0])/pars[1])**2)
def Lorentzian(pars, x):#defined in nontraditional way so that pars[2] is the peak height
return pars[2]/(1+((x-pars[0])/pars[1])**2)
def fitpeakset(qvals, counts, initpars, peakfcn):#peak function must be a function that accepts a list of 3 parameters (the reshape 3 needs to be changed if num params differs)
numgauss=len(initpars)
zeroedpeakinds=[]
repeatwithpkremoved=True #peaks are removed if their fitted height is <0. At the end, these peaks are added to the fit parameter list with 0 height and 0 error
while repeatwithpkremoved:
initparscpy=copy.copy(list(initpars))
for pkind in reversed(zeroedpeakinds):#reverse so opo gets the right index
initparscpy.pop(pkind)
if len(initparscpy)==0:
break
initparsflat=numpy.float64(initparscpy).flatten()
def fitfcn(p, x):
allpars=numpy.reshape(p, (p.size//3, 3))
if isinstance(x, numpy.ndarray):
val=numpy.zeros(x.size, dtype='float32')
else:
val=0.0
for pars in allpars:
val+=peakfcn(pars, x)
return val
def residfcn(p, y, x):
err=y-fitfcn(p, x)
return err
counts=numpy.float64(counts)
qvals=numpy.float64(qvals)
fitout=scipy.optimize.leastsq(residfcn, initparsflat, args=(counts, qvals), full_output=1)
if not (fitout[4] in [1, 2]):
print 'Fitting Error', fitout[4],': ', fitout[3]
finalparams=numpy.float32(fitout[0])
finalparamsshaped=numpy.reshape(finalparams, (len(finalparams)//3, 3))
negpeakinds=numpy.where(finalparamsshaped[:, 2]<0)[0]
zeroedpeakinds+=list(negpeakinds)
zeroedpeakinds.sort()
repeatwithpkremoved=len(negpeakinds)>0
# print '^^^^^^^^^^^^^^^'
# print initparsflat
# print finalparamsshaped
# pylab.plot(qvals, counts, 'b.')
# pylab.show()
if not (fitout[1] is None):
covmat=fitout[1]
sigmas=numpy.float32([covmat[i, i] for i in range(len(finalparams))])
else:
print 'COVARIANCE NOT CALCULATED:', fitout[4],': ', fitout[3]
sigmas=numpy.zeros(len(finalparams), dtype='float32')
finalresid=numpy.sqrt((residfcn(finalparams, qvals, counts)**2).sum())
#pylab.plot(qvals, counts, 'k.', qvals, fitfcn(finalparams, qvals), 'r-')
sigmashaped=numpy.reshape(sigmas, (len(finalparams)//3, 3))
for pkind in zeroedpeakinds:
finalparamsshaped=list(finalparamsshaped)
sigmashaped=list(sigmashaped)
finalparamsshaped.insert(pkind, initpars[pkind][:2]+[0.])
sigmashaped.insert(pkind, [0., 0., 0.])
finalparamsshaped=numpy.float64(finalparamsshaped)
sigmashaped=numpy.float64(sigmashaped)
return (finalparamsshaped, sigmashaped, finalresid)
def windows_peakpositions(qgrid, qscales, qposns, windowextend_qscales):
posns=ind_qgrid_q(qgrid, qposns, fractional=True)
widths=1.0*windowextend_qscales*qscales/qgrid[1]
#print [[p, w] for p, w in zip(posns, widths)]
extentsets=[set(range(int(round(p-w)), 1+int(round(p+w)))) for p, w in zip(posns, widths)]
#print extentsets
windowsets=[]
currentset=copy.copy(extentsets[0])
peakindlists=[]
currentpeaks=[]
for i in range(len(extentsets)):
#if len(currentset&extentsets[i])>0: this is good enough if later peaks can't extend throught he current peak to previous windows but just in case...
testset=copy.copy(extentsets[i])
for j in range(i, len(extentsets)):
testset|=extentsets[j]
if len(currentset&testset)>0:
currentset|=extentsets[i]
currentpeaks+=[i]
else:
windowsets+=[currentset]
peakindlists+=[currentpeaks]
currentset=extentsets[i]
currentpeaks=[i]
if len(currentset)>0:
windowsets+=[currentset]
peakindlists+=[currentpeaks]
indrangeandpeakinds=tuple([])
for w, p in zip(windowsets, peakindlists):
minind=max(min(w), 0)
maxplusone=min(1+max(w), qgrid[2])
indrangeandpeakinds+=(([minind, maxplusone], p),)
return indrangeandpeakinds
def fillgapswithinterp(allindslist, partindslist, partvals, indexinterval_fitinds=8):#allindslist equally spaced and contiguous
partindsset=set(partindslist)
partvals=numpy.float32(partvals)
if 0 in partindslist:
startinds=[]
else:
startinds=[0]
for i in allindslist[:-1]:
if (i in partindsset) and not (i+1 in partindsset):
startinds+=[i+1]
stopinds=[]
for i in allindslist[1:]:
if (i in partindsset) and not (i-1 in partindsset):
stopinds+=[i-1]
if not (allindslist[-1] in partindslist):
stopinds+=[allindslist[-1]]
fullvals=numpy.zeros(len(allindslist), dtype='float32')
fullvals[partindslist]=partvals[:]
for i, j in zip(startinds, stopinds):
indstofill=numpy.float32(range(i, j+1))
fitinds=sorted(list(partindsset.intersection(set(range(i-indexinterval_fitinds*(len(indstofill)-3), i, indexinterval_fitinds)+range(j+1, j+1+indexinterval_fitinds*(3+len(indstofill)), indexinterval_fitinds))))) #use range ainstead of allindslist becuase could eb out of range
fitvals=numpy.float32([partvals[partindslist.index(f)] for f in fitinds])
fitinds=numpy.float32(fitinds)
splineorder=min(len(fitinds)-1, 3)
if splineorder==0:#only one data point to use. can't be no data points to use becuase thie hole has an edge
fillvals=numpy.float32([fitvals[0]]*len(indstofill))
else:
# interpfcn=scipy.interpolate.UnivariateSpline(fitinds,fitvals,k=splineorder)
# fillvals=numpy.float32(interpfcn(indstofill))
fillvals=scipy.interpolate.spline(fitinds,fitvals,indstofill, k=splineorder)
fullvals[numpy.uint16(numpy.round(indstofill))]=fillvals[:]
return fullvals
def stripbadcharsfromnumstr(numstr):
valchars=[c for c in numstr if c.isdigit() or c=='.' or c=='-']
return ''.join(valchars)
#def cart_comp(comp):
# if isinstance(comp, list) or (isinstance(comp, numpy.ndarray) and comp.ndim==1):
# return [1.-comp[0]-0.5*comp[1], comp[1]]
# else:
# return numpy.float32([1.-comp[:, 0]-0.5*comp[:, 1], comp[:, 1]]).T
def cart_comp(comp):
comp=numpy.float32(comp)
if comp.ndim==1:
return [1.-comp[0]-0.5*comp[1], comp[1]]
else:
return numpy.float32([1.-comp[:, 0]-0.5*comp[:, 1], comp[:, 1]]).T
def compdistarr_comp(comp):#comp must be array of compositions, each element of comp is supposed to be a 3-array of the fractions
return numpy.float32([[numpy.sqrt(((a-b)**2).sum()) for b in comp] for a in comp]/numpy.sqrt(2.))
def findcompnieghbors(comp, pointlist=None, critcompdist=.15):#returns a list, the ith element is a list of the indeces of comp the are neighbors of comp[i]. if i is not in pointlist, it does not have neighbors and is noone's neighbor
comp=numpy.float32(comp)
if pointlist is None:
pointlist=range(comp.shape[0])
pointlist=list(pointlist)
allind=range(comp.shape[0])
comp=comp[numpy.uint16(pointlist)]
finitecompaxes=numpy.where(comp.sum(axis=0)>0.)[0]
if comp.shape[1]==2 or len(finitecompaxes)<=2: #binary
print 'USED SIMPLY BINARY FORMULA FOR NEIGHBORS'
a=comp[:, finitecompaxes[0]]
sortind=a.argsort()
disp=numpy.array([-1, 1], dtype='int16')
neighbors=[[sortind[j] for j in numpy.where(sortind==i)[0][0]+disp if j>=0 and j<len(pointlist)] for i in range(len(pointlist))]
neighbors=[sorted(n) for n in neighbors]
else:
cart=cart_comp(comp)
tri=dlny.Triangulation(cart[:, 0], cart[:, 1])
neighdict=tri.node_graph()
neighbors=[sorted(list(neighdict[k])) for k in sorted(list(neighdict.keys()))]
compdist=compdistarr_comp(comp)
n=[]
for i in allind:
if i in pointlist:
j=pointlist.index(i)
n+=[[pointlist[ind] for ind in neighbors[j] if compdist[ind, j]<critcompdist]]
else:
n+=[[]]
return n
def findposnnieghbors(xcoords, zcoords, pointlist=None, critdist=999.):#returns a list, the ith element is a list of the indeces of comp the are neighbors of comp[i]. if i is not in pointlist, it does not have neighbors and is noone's neighbor
xcoords=numpy.float32(xcoords)
zcoords=numpy.float32(zcoords)
if pointlist is None:
pointlist=range(xcoords.shape[0])
pointlist=list(pointlist)
allind=range(xcoords.shape[0])
xcoords=xcoords[numpy.uint16(pointlist)]
zcoords=zcoords[numpy.uint16(pointlist)]
tri=dlny.Triangulation(xcoords, zcoords)
neighdict=tri.node_graph()
neighbors=[sorted(list(neighdict[k])) for k in sorted(list(neighdict.keys()))]
dist=numpy.sqrt(numpy.add.outer(xcoords, -1.0*xcoords)**2+numpy.add.outer(zcoords, -1.0*zcoords)**2)
n=[]
for i in allind:
if i in pointlist:
j=pointlist.index(i)
n+=[[pointlist[ind] for ind in neighbors[j] if dist[ind, j]<critdist]]
else:
n+=[[]]
return n
def findneighborswithinradius(distarray, critdist, pointlist=None): #distarray should be squre array where i,j is the distance between i and j
if pointlist is None:
pointlist=range(distarray.shape[0])
pointlist=list(pointlist)
allind=range(distarray.shape[0])
n=[]
for i in allind:
if i in pointlist:
n+=[[ind for ind in pointlist if distarray[ind, i]<critdist and ind!=i]]
else:
n+=[[]]
return n
def myargmin(a): #this is to resolve the problem I reported in numpy Ticket #1429
if len(a.shape)>1:
print 'WARNING: behavior of myargmin not tested for multidimmensional arrays'
if not numpy.isnan(a[0]):
return numpy.argmin(a)
if numpy.min(numpy.isnan(a)):#everything is nan
return 0
ind=numpy.argmin(numpy.isnan(a))
return ind+numpy.argmin(a[ind:])
def myargmax(a): #this is to resolve the problem I reported in numpy Ticket #1429
if len(a.shape)>1:
print 'WARNING: behavior of myargmin not tested for multidimmensional arrays'
if not numpy.isnan(a[0]):
return numpy.argmax(a)
if numpy.min(numpy.isnan(a)):#everything is nan
return 0
ind=numpy.argmin(numpy.isnan(a))
return ind+numpy.argmax(a[ind:])
def dezing(arr,critval=None):
if critval is None:
if isinstance(arr[0, 0], int):
critval=numpy.iinfo(arr.dtype).max
else:
critval=arr.max()
c=numpy.where((arr[1:-1,1:-1]>=critval)*(arr[:-2,1:-1]<critval)*(arr[2:,1:-1]<critval)*(arr[1:-1,:-2]<critval)*(arr[1:-1,2:]<critval))
c0=c[0]+1
c1=c[1]+1
arr[c0,c1]=(arr[c0-1,c1]+arr[c0+1,c1]+arr[c0,c1-1]+arr[c0,c1+1])/4
return arr
def removesinglepixoutliers(arr,critratiotoneighbors=1.5, removepctilebeforeratio=None, returninds=False):
#c=numpy.where(arr[1:-1,1:-1]>(critratiotoneighbors*(arr[:-2,1:-1]+arr[2:,1:-1]+arr[1:-1,:-2]+arr[1:-1,2:])))
if removepctilebeforeratio is None:
c=numpy.where((arr[1:-1,1:-1]>(critratiotoneighbors*arr[:-2,1:-1]))*\
(arr[1:-1,1:-1]>(critratiotoneighbors*arr[2:,1:-1]))*\
(arr[1:-1,1:-1]>(critratiotoneighbors*arr[1:-1,:-2]))*\
(arr[1:-1,1:-1]>(critratiotoneighbors*arr[1:-1,2:])))
else:
arr2=copy.copy(arr)
temp=numpy.sort(arr2.flatten())
v=temp[removepctilebeforeratio*arr2.size//1]
print 'subtracting ', v, ' before removing outliers'
arr2-=v
arr2[arr<v]=0#to avoid problems with negative subtraction result in uint arrays
c=numpy.where((arr2[1:-1,1:-1]>(critratiotoneighbors*arr2[:-2,1:-1]))*\
(arr2[1:-1,1:-1]>(critratiotoneighbors*arr2[2:,1:-1]))*\
(arr2[1:-1,1:-1]>(critratiotoneighbors*arr2[1:-1,:-2]))*\
(arr2[1:-1,1:-1]>(critratiotoneighbors*arr2[1:-1,2:])))
c0=c[0]+1
c1=c[1]+1
print len(c0), ' pixels being replaced'
arr[c0,c1]=(arr[c0-1,c1]+arr[c0+1,c1]+arr[c0,c1-1]+arr[c0,c1+1])/4
if returninds:
return arr, (c0, c1)
return arr
def readh5pyarray(arrpoint):
return eval('arrpoint'+('['+':,'*len(arrpoint.shape))[:-1]+']')
def readblin(h5mar, bin=0):
bs=['blin0', 'blin1']
if bin:
bs=[b+'bin%d' %bin for b in bs]
return numpy.array([readh5pyarray(h5mar[b]) for b in bs]), numpy.array([h5mar[b].attrs['weights'][:] for b in bs]).T
class calc_blin_factors():#can be used for 1-d or 2-d data if for 2-d you collaps it to 1-d using killmap indeces
#assumes all have been scaled by IC3
def __init__(self, data, b0, b1, f0=0.5, f1=0.5, fraczeroed=0.005, factorprecision=0.005, maxtries=100, refineduringloop=True):
self.warning=''
self.fz=fraczeroed
self.fp=factorprecision
self.dtype=data.dtype
self.data=data
self.b0=b0
self.b1=b1
self.numpix=numpy.array(data.shape).prod()
#print 'self.numpix', self.numpix
b0counts=self.b0.sum()
b1counts=self.b1.sum()
delcounts=min(b0counts, b1counts)*self.fp
delf0=delcounts/(1.0*b0counts)
delf1=delcounts/(1.0*b1counts)
self.f0=f0
self.f1=f1
#print [self.fracz_gain(x) for x in numpy.array([.1, .3, 0.8, 0.9, 1.0, 1.1, 1.2, 2., 5., 10.])]
self.gain=interp(self.fracz_gain, numpy.array([.1, .3, 0.8, 0.9, 1.0, 1.1, 1.2, 2., 5., 10.]), self.fz, maxtries=maxtries)
self.gaincheck()
self.gainrefine()
self.f0*=self.gain
self.f1*=self.gain
fznow=self.fracz_f0f1(self.f0, self.f1)
fz0up=self.fracz_f0f1(self.f0+delf0, self.f1-delf1)
fz1up=self.fracz_f0f1(self.f0-delf0, self.f1+delf1)
tries=0
#print 'starting it ', self.f0, self.f1, fznow, fz0up, fz1up
while ((fz0up<=fznow) or (fz1up<=fznow)) and tries<maxtries:
tries+=1
bool0up=fz0up<fznow
bool1up=fz1up<fznow
if fz0up==fznow:
temp=self.btot_gen(self.f0, self.f1)
temp2=self.btot_gen(self.f0+delf0, self.f1-delf1)
bool0up=temp2[temp2>self.data].sum()<temp[temp>self.data].sum()#if no change in fraczeroed, but the totals counts over the bcknd is reduced, do it
elif fz1up==fznow:
temp=self.btot_gen(self.f0, self.f1)
temp2=self.btot_gen(self.f0-delf0, self.f1+delf1)
bool1up=temp2[temp2>self.data].sum()<temp[temp>self.data].sum()
if bool0up:
self.f0+=delf0
self.f1-=delf1
elif bool1up:
self.f0-=delf0
self.f1+=delf1
else:
break
trylist=numpy.array(range(4))*self.fp+1
temp=interp(self.fracz_gain, trylist, self.fz, maxtries=maxtries)
if temp is None:
print "INTERPOLATION ERROR increasing gain:", trylist
self.gain=1.
self.gaincheck()
self.gainrefine()
else:
self.gain=temp
self.gaincheck()
#print 'after gain check ', self.gain, self.fracz_gain(self.gain)
if refineduringloop:
self.gainrefine()
#print 'after gain refine ', self.gain, self.fracz_gain(self.gain)
self.f0*=self.gain
self.f1*=self.gain
fznow=self.fracz_f0f1(self.f0, self.f1)
fz0up=self.fracz_f0f1(self.f0+delf0, self.f1-delf1)
fz1up=self.fracz_f0f1(self.f0-delf0, self.f1+delf1)
#print self.f0, self.f1, fznow, fz0up, fz1up
if not tries<maxtries:
self.warning+='iterative determination of blin factors ended by max iterations\n'
self.gain=1.
self.gainrefine()
self.f0*=self.gain
self.f1*=self.gain
totbcknd=self.btot_gen(self.f0, self.f1)
self.fracz=(totbcknd>self.data).sum()/numpy.float32(self.numpix)
#print self.f0, self.f1, self.fracz
#self.plot()
def gaincheck(self, maxallowed=0.99):#if comes in bwteen 0 and maxallowed nothing done, otherwise gets it between 0 and maxallowed
fz=self.fracz_gain(self.gain)
if fz>0.:
return
shrinkfactor=1.
while fz==0.:
self.gain+=self.fp/shrinkfactor
fz=self.fracz_gain(self.gain)
if fz>maxallowed:
fz=0.
self.gain-=self.fp/shrinkfactor
shrinkfactor*=2.
def gainrefine(self):
lowbool=self.fracz_gain(self.gain)<self.fz
newlowbool=lowbool
delgain=(lowbool*2.-1.)*self.fp
while lowbool==newlowbool:
self.gain+=delgain
newlowbool=self.fracz_gain(self.gain)<self.fz
if lowbool: #use the factors that low-ball the nz
self.gain-=delgain
self.gaincheck(maxallowed=self.fz)
def fracz_f0f1(self, f0, f1):
return (self.btot_gen(f0, f1)>self.data).sum(dtype='float32')/self.numpix
def fracz_gain(self, gain):
return self.fracz_f0f1(self.f0*gain, self.f1*gain)
def btot_gen(self, f0, f1):
return self.b0*f0+self.b1*f1
def plot(self):
pylab.plot(self.data, label='data')
pylab.plot(self.b0, label='b0')
pylab.plot(self.b1, label='b1')
pylab.plot(self.btot_gen(self.f0, self.f1), label='totb')
pylab.legend()
pylab.show()
def f0_f1_exp_ic(exposures_data, ic_data, exposures_b0, ic_b0, exposures_b1, ic_b1):#this can be used to get the weights used to match 2 vectros to a data vector using any 2 linear independant components, in this case ion counts and number of exposures
c=ic_data
e=exposures_data
c0=ic_b0
e0=exposures_b0
c1=ic_b1
e1=exposures_b1
if e0==0 and c0==0:
f0=0
f1=1
elif e1==0 and c1==0:
f0=1
f1=0
elif e0==0:#b0 is pure c
f0=c/c0
if e1==0:
f1=0.
else:
f1=e/e1
elif (c1-c0*e1/e0)==0:
if c0==0:
f0=e/e0
if c1==0:
f1=0.
else:
f1=c/c1
else:
f0=(c/c0+e/e0)/2. #this is just a guess of something resonable to do in this strange condition (c1-c0*e1/e0)==0:
if c1==0 or e1==0:
f1=0.
else:
f1=((c-f0*c0)/c1+(e-f0*e0)/e1)/2.
else: #this is the soltuion to the linear set of equations, as long as no divisors are zero
f1=(c-c0*e/e0)/(c1-c0*e1/e0)
f0=(e-f1*e1)/e0
return f0, f1
| johnmgregoire/vanDover_CHESS | xrd_math_fcns.py | Python | bsd-3-clause | 112,346 | [
"Gaussian"
] | 71db6ea5d5ab04ec60edc5528701776d190d4837740b67151d2e472a14cbac6f |
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use input() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
import tarfile
import tensorflow.python.platform
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPU's prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _generate_image_and_label_batch(image, label, min_queue_examples):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [IMAGE_SIZE, IMAGE_SIZE, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'FLAGS.batch_size' images + labels from the example queue.
num_preprocess_threads = 16
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * FLAGS.batch_size,
min_after_dequeue=min_queue_examples)
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [FLAGS.batch_size])
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Raises:
ValueError: if no data_dir
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
'data_batch_%d.bin' % i)
for i in xrange(1, 5)]
for f in filenames:
if not gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = cifar10_input.read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.image.random_crop(reshaped_image, [height, width])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# randomize the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples)
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Raises:
ValueError: if no data_dir
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
if not eval_data:
filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
'data_batch_%d.bin' % i)
for i in xrange(1, 5)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin',
'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = cifar10_input.read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(resized_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples)
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64],
stddev=1e-4, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
dim = 1
for d in pool2.get_shape()[1:].as_list():
dim *= d
reshape = tf.reshape(pool2, [FLAGS.batch_size, dim])
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu_layer(reshape, weights, biases, name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu_layer(local3, weights, biases, name=scope.name)
_activation_summary(local4)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.nn.xw_plus_b(local4, weights, biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Reshape the labels into a dense Tensor of
# shape [batch_size, NUM_CLASSES].
sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
indices = tf.reshape(tf.range(FLAGS.batch_size), [FLAGS.batch_size, 1])
concated = tf.concat(1, [indices, sparse_labels])
dense_labels = tf.sparse_to_dense(concated,
[FLAGS.batch_size, NUM_CLASSES],
1.0, 0.0)
# Calculate the average cross entropy loss across the batch.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, dense_labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| kcartier/tensorflow-toe-in-the-water | tensorflow/models/image/cifar10/cifar10.py | Python | apache-2.0 | 17,878 | [
"Gaussian"
] | 068ffef01c6e9eec52c010dd1fb7ccf862fd85de846c2127d039b5715cc6e72a |
import logging
import datetime
import simplejson
import tempfile
try:
from hashlib import md5
except:
from md5 import md5
from dirac.lib.base import *
from dirac.lib.diset import getRPCClient, getTransferClient
from dirac.lib.credentials import getUsername, getSelectedGroup, getSelectedSetup
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import Time, List
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Core.Security import CS
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from dirac.lib.webBase import defaultRedirect
log = logging.getLogger( __name__ )
class AccountingplotsController( BaseController ):
__keysCache = DictCache()
def __getUniqueKeyValues( self, typeName ):
userGroup = getSelectedGroup()
if 'NormalUser' in CS.getPropertiesForGroup( userGroup ):
cacheKey = ( getUsername(), userGroup, getSelectedSetup(), typeName )
else:
cacheKey = ( userGroup, getSelectedSetup(), typeName )
data = AccountingplotsController.__keysCache.get( cacheKey )
if not data:
rpcClient = getRPCClient( "Accounting/ReportGenerator" )
retVal = rpcClient.listUniqueKeyValues( typeName )
if 'rpcStub' in retVal:
del( retVal[ 'rpcStub' ] )
if not retVal[ 'OK' ]:
return retVal
#Site ordering based on TierLevel / alpha
if 'Site' in retVal[ 'Value' ]:
siteLevel = {}
for siteName in retVal[ 'Value' ][ 'Site' ]:
sitePrefix = siteName.split( "." )[0].strip()
level = gConfig.getValue( "/Resources/Sites/%s/%s/MoUTierLevel" % ( sitePrefix, siteName ), 10 )
if level not in siteLevel:
siteLevel[ level ] = []
siteLevel[ level ].append( siteName )
orderedSites = []
for level in sorted( siteLevel ):
orderedSites.extend( sorted( siteLevel[ level ] ) )
retVal[ 'Value' ][ 'Site' ] = orderedSites
data = retVal
AccountingplotsController.__keysCache.add( cacheKey, 300, data )
return data
def index( self ):
# Return a rendered template
# return render('/some/template.mako')
# or, Return a response
return defaultRedirect()
def dataOperation( self ):
return self.__showPlotPage( "DataOperation", "/systems/accounting/dataOperation.mako" )
def job( self ):
return self.__showPlotPage( "Job", "/systems/accounting/job.mako" )
def WMSHistory( self ):
return self.__showPlotPage( "WMSHistory", "/systems/accounting/WMSHistory.mako" )
def pilot( self ):
return self.__showPlotPage( "Pilot", "/systems/accounting/Pilot.mako" )
def SRMSpaceTokenDeployment( self ):
return self.__showPlotPage( "SRMSpaceTokenDeployment", "/systems/accounting/SRMSpaceTokenDeployment.mako" )
def plotPage( self ):
try:
typeName = str( request.params[ 'typeName' ] )
except:
c.errorMessage = "Oops. missing type"
return render( "/error.mako" )
return self.__showPlotPage( typeName , "/systems/accounting/%s.mako" % typeName )
def __showPlotPage( self, typeName, templateFile ):
#Get unique key values
retVal = self.__getUniqueKeyValues( typeName )
if not retVal[ 'OK' ]:
c.error = retVal[ 'Message' ]
return render ( "/error.mako" )
c.selectionValues = simplejson.dumps( retVal[ 'Value' ] )
#Cache for plotsList?
data = AccountingplotsController.__keysCache.get( "reportsList:%s" % typeName )
if not data:
repClient = ReportsClient( rpcClient = getRPCClient( "Accounting/ReportGenerator" ) )
retVal = repClient.listReports( typeName )
if not retVal[ 'OK' ]:
c.error = retVal[ 'Message' ]
return render ( "/error.mako" )
data = simplejson.dumps( retVal[ 'Value' ] )
AccountingplotsController.__keysCache.add( "reportsList:%s" % typeName, 300, data )
c.plotsList = data
return render ( templateFile )
@jsonify
def getKeyValuesForType( self ):
try:
typeName = str( request.params[ 'typeName' ] )
except:
return S_ERROR( "Missing or invalid type name!" )
retVal = self.__getUniqueKeyValues( typeName )
if not retVal[ 'OK' ] and 'rpcStub' in retVal:
del( retVal[ 'rpcStub' ] )
return retVal
def __parseFormParams(self):
params = request.params
return parseFormParams(params)
def __translateToExpectedExtResult( self, retVal ):
if retVal[ 'OK' ]:
return { 'success' : True, 'data' : retVal[ 'Value' ][ 'plot' ] }
else:
return { 'success' : False, 'errors' : retVal[ 'Message' ] }
def __queryForPlot( self ):
retVal = self.__parseFormParams()
if not retVal[ 'OK' ]:
return retVal
params = retVal[ 'Value' ]
repClient = ReportsClient( rpcClient = getRPCClient( "Accounting/ReportGenerator" ) )
retVal = repClient.generateDelayedPlot( *params )
return retVal
def getPlotData( self ):
retVal = self.__parseFormParams()
if not retVal[ 'OK' ]:
c.error = retVal[ 'Message' ]
return render( "/error.mako" )
params = retVal[ 'Value' ]
repClient = ReportsClient( rpcClient = getRPCClient( "Accounting/ReportGenerator" ) )
retVal = repClient.getReport( *params )
if not retVal[ 'OK' ]:
c.error = retVal[ 'Message' ]
return render( "/error.mako" )
rawData = retVal[ 'Value' ]
groupKeys = rawData[ 'data' ].keys()
groupKeys.sort()
if 'granularity' in rawData:
granularity = rawData[ 'granularity' ]
data = rawData['data']
tS = int( Time.toEpoch( params[2] ) )
timeStart = tS - tS % granularity
strData = "epoch,%s\n" % ",".join( groupKeys )
for timeSlot in range( timeStart, int( Time.toEpoch( params[3] ) ), granularity ):
lineData = [ str( timeSlot ) ]
for key in groupKeys:
if timeSlot in data[ key ]:
lineData.append( str( data[ key ][ timeSlot ] ) )
else:
lineData.append( "" )
strData += "%s\n" % ",".join( lineData )
else:
strData = "%s\n" % ",".join( groupKeys )
strData += ",".join( [ str( rawData[ 'data' ][ k ] ) for k in groupKeys ] )
response.headers['Content-type'] = 'text/csv'
response.headers['Content-Disposition'] = 'attachment; filename="%s.csv"' % md5( str( params ) ).hexdigest()
response.headers['Content-Length'] = len( strData )
return strData
@jsonify
def generatePlot( self ):
return self.__translateToExpectedExtResult( self.__queryForPlot() )
def generatePlotAndGetHTML( self ):
retVal = self.__queryForPlot()
if not retVal[ 'OK' ]:
return "<h2>Can't regenerate plot: %s</h2>" % retVal[ 'Message' ]
return "<img src='getPlotImg?file=%s'/>" % retVal[ 'Value' ][ 'plot' ]
def getPlotImg( self ):
"""
Get plot image
"""
if 'file' not in request.params:
c.error = "Maybe you forgot the file?"
return render( "/error.mako" )
plotImageFile = str( request.params[ 'file' ] )
if plotImageFile.find( ".png" ) < -1:
c.error = "Not a valid image!"
return render( "/error.mako" )
transferClient = getTransferClient( "Accounting/ReportGenerator" )
tempFile = tempfile.TemporaryFile()
retVal = transferClient.receiveFile( tempFile, plotImageFile )
if not retVal[ 'OK' ]:
c.error = retVal[ 'Message' ]
return render( "/error.mako" )
tempFile.seek( 0 )
data = tempFile.read()
response.headers['Content-type'] = 'image/png'
response.headers['Content-Disposition'] = 'attachment; filename="%s.png"' % md5( plotImageFile ).hexdigest()
response.headers['Content-Length'] = len( data )
response.headers['Content-Transfer-Encoding'] = 'Binary'
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate, max-age=0"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = ( datetime.datetime.utcnow() - datetime.timedelta( minutes = -10 ) ).strftime( "%d %b %Y %H:%M:%S GMT" )
return data
@jsonify
def getPlotListAndSelectionValues(self):
result = {}
try:
typeName = str( request.params[ 'typeName' ] )
except:
return S_ERROR( "Missing or invalid type name!" )
retVal = self.__getUniqueKeyValues( typeName )
if not retVal[ 'OK' ] and 'rpcStub' in retVal:
del( retVal[ 'rpcStub' ] )
return retVal
selectionValues = retVal['Value']
data = AccountingplotsController.__keysCache.get( "reportsList:%s" % typeName )
if not data:
repClient = ReportsClient( rpcClient = getRPCClient( "Accounting/ReportGenerator" ) )
retVal = repClient.listReports( typeName )
if not retVal[ 'OK' ]:
return retVal
data = simplejson.dumps( retVal[ 'Value' ] )
AccountingplotsController.__keysCache.add( "reportsList:%s" % typeName, 300, data )
try:
plotsList = eval(data)
except:
return S_ERROR('Failed to convert a string to a list!')
return S_OK({'SelectionData':selectionValues, 'PlotList':plotsList})
def parseFormParams(params):
pD = {}
extraParams = {}
pinDates = False
for name in params:
if name.find( "_" ) != 0:
continue
value = params[ name ]
name = name[1:]
pD[ name ] = str( value )
#Personalized title?
if 'plotTitle' in pD:
extraParams[ 'plotTitle' ] = pD[ 'plotTitle' ]
del( pD[ 'plotTitle' ] )
#Pin dates?
if 'pinDates' in pD:
pinDates = pD[ 'pinDates' ]
del( pD[ 'pinDates' ] )
pinDates = pinDates.lower() in ( "yes", "y", "true", "1" )
#Get plotname
if not 'grouping' in pD:
return S_ERROR( "Missing grouping!" )
grouping = pD[ 'grouping' ]
#Get plotname
if not 'typeName' in pD:
return S_ERROR( "Missing type name!" )
typeName = pD[ 'typeName' ]
del( pD[ 'typeName' ] )
#Get plotname
if not 'plotName' in pD:
return S_ERROR( "Missing plot name!" )
reportName = pD[ 'plotName' ]
del( pD[ 'plotName' ] )
#Get times
if not 'timeSelector' in pD:
return S_ERROR( "Missing time span!" )
#Find the proper time!
pD[ 'timeSelector' ] = int( pD[ 'timeSelector' ] )
if pD[ 'timeSelector' ] > 0:
end = Time.dateTime()
start = end - datetime.timedelta( seconds = pD[ 'timeSelector' ] )
if not pinDates:
extraParams[ 'lastSeconds' ] = pD[ 'timeSelector' ]
else:
if 'endTime' not in pD:
end = False
else:
end = Time.fromString( pD[ 'endTime' ] )
del( pD[ 'endTime' ] )
if 'startTime' not in pD:
return S_ERROR( "Missing starTime!" )
else:
start = Time.fromString( pD[ 'startTime' ] )
del( pD[ 'startTime' ] )
del( pD[ 'timeSelector' ] )
for k in pD:
if k.find( "ex_" ) == 0:
extraParams[ k[3:] ] = pD[ k ]
#Listify the rest
for selName in pD:
pD[ selName ] = List.fromChar( pD[ selName ], "," )
return S_OK( ( typeName, reportName, start, end, pD, grouping, extraParams ) )
| DIRACGrid/DIRACWeb | dirac/controllers/systems/accountingPlots.py | Python | gpl-3.0 | 10,892 | [
"DIRAC"
] | 98ac951e95c16becad5c291e3481c4eab1c882e45356ee3804587bec74e8e005 |
#
# This module contains messages
#
from ige.ospace.Const import *
from ige import NoSuchObjectException
import client, types, string, res, gdata
from ige import log
#
# Transform routines
#
def techID2Name(techID):
if techID >= 1000:
return _(client.getTechInfo(techID).name.encode())
else:
return client.getPlayer().shipDesigns[techID].name
def objID2Name(objID):
obj = client.get(objID, noUpdate = 0)
return getattr(obj, 'name', res.getUnknownName())
def objIDList2Names(objIDs):
names = []
for objID in objIDs:
obj = client.get(objID, noUpdate = 1)
if hasattr(obj, 'owner') and obj.owner != obj.oid:
try:
owner = _(' (%s)') % client.get(obj.owner, noUpdate = 1).name
except AttributeError:
owner = ''
else:
owner = ''
text = _('%s%s') % (getattr(obj, 'name', res.getUnknownName()), owner)
names.append(text)
return string.join(names, ', ')
def stratID2Name(resID):
return _(gdata.stratRes[resID])
def float2percent(number):
return int(number * 100)
def plType2Name(plType):
return gdata.planetTypes[plType]
def designID2Name(designID):
return client.getPlayer().shipDesigns[designID].name
def votes2Txt((votes, voters)):
lines = []
nominated = votes.keys()
nominated.sort(lambda a, b: cmp(votes[b], votes[a]))
for playerName in nominated:
if playerName == None:
continue
l = []
for name in voters[playerName]:
l.append(name)
text = " %s got %d votes from %s." % (
playerName,
votes[playerName],
", ".join(l),
)
lines.append(text)
if None in votes:
l = []
for name in voters[None]:
l.append(name)
text = " %s abstained [%d votes]." % (
", ".join(l),
votes[None],
)
lines.append(text)
return "\n".join(lines)
#
# Data
#
# severity codes
CRI = 3
MAJ = 2
MIN = 1
INFO = 0
NONE = INFO
# i18n (delayed translation)
def N_(msg): return msg
msgData = {}
def addMsg(msgID, name, transform = None, severity = NONE):
global msgData
msgData[msgID] = (name, transform, severity)
addMsg(MSG_COMPLETED_RESEARCH, N_('Research completed: %(1)s'), (techID2Name,), CRI)
addMsg(MSG_WASTED_SCIPTS, N_('%(1)d research points not used.'), severity = MIN)
addMsg(MSG_CANNOTBUILD_SHLOST, N_('Cannot build on planet - ship may be lost.'), severity = CRI)
addMsg(MSG_CANNOTBUILD_NOSLOT, N_('Cannot build on planet - no free slot.'), severity = CRI)
# NOT NEEDED addMsg(MSG_DESTROYED_BUILDING, N_('Structure destroyed: %(1)s'), (techID2Name,), MAJ)
addMsg(MSG_WASTED_PRODPTS, N_('Construction problem: no task\n\n%(1)d construction points was not used because there was no task to fulfill.'), (int,), severity = INFO)
addMsg(MSG_LOST_PLANET, N_('Planet lost.'), severity = CRI)
addMsg(MSG_COMPLETED_STRUCTURE, N_('Structure completed: %(1)s'), (techID2Name,), MIN)
addMsg(MSG_COMPLETED_SHIP, N_('Ship completed: %(1)s'), (techID2Name,), MIN)
addMsg(MSG_GAINED_PLANET, N_('New planet.'), severity = CRI)
addMsg(MSG_COMBAT_RESULTS, N_('Combat with: %(4)s. HP lost: we %(1)d, they %(2)d.\n\nEnemy lost %(2)d HP, we lost %(1)d HP and %(3)d ships/structures. We attacked/were attacked by %(4)s.'), (int, int, int, objIDList2Names), MAJ)
addMsg(MSG_COMBAT_LOST, N_('Battle lost: we were defeated by %(1)s.'), (objID2Name,), CRI)
addMsg(MSG_DESTROYED_FLEET, N_('Fleet destroyed.'), severity = CRI)
addMsg(MSG_COMBAT_WON, N_('Battle won: we defeated %(1)s.'), (objID2Name,), CRI)
addMsg(MSG_NEW_GOVCENTER, N_('A new government center established.'), severity = CRI)
addMsg(MSG_REVOLT_STARTED, N_('Planet revolt started - production halved for the next turns.'), severity = CRI)
addMsg(MSG_REVOLT_ENDED, N_('Planet revolt ended - production restored.'), severity = CRI)
addMsg(MSG_INVALID_TASK, N_('Construction of %(1)s is not valid - construction suspended.'), (techID2Name,), severity = CRI)
addMsg(MSG_NOSUPPORT_POP, N_('Population decreased.\n\nPopulatin of this planet has decreased. Build more facilities producing food.'), severity = CRI)
addMsg(MSG_COMPLETED_PROJECT, N_('Project finished: %(1)s'), (techID2Name,), MIN)
addMsg(MSG_ENABLED_TIME, N_('Time in galaxy started to run...'), severity = CRI)
addMsg(MSG_MISSING_STRATRES, N_('Strategic resource missing: %(1)s'), (stratID2Name,), MAJ)
addMsg(MSG_DELETED_RESEARCH, N_('Research task deleted: %(1)s'), (techID2Name,), CRI)
addMsg(MSG_EXTRACTED_STRATRES, N_('Strategic resource extracted: %(1)s'), (stratID2Name,), MIN)
addMsg(MSG_EXTRACTED_ANTIMATTER_SYNTH, N_('Strategic resource synthesized: 4 units of %(1)s'), (stratID2Name,), MIN)
addMsg(MSG_DOWNGRADED_PLANET_ECO, N_('Planet downgraded to: %(1)s'), (plType2Name,), CRI)
addMsg(MSG_UPGRADED_PLANET_ECO, N_('Planet upgraded to: %(1)s'), (plType2Name,), CRI)
addMsg(MSG_UPGRADED_SHIP, N_('Ship upgraded from %(1)s to %(2)s'), (unicode,unicode), MIN)
addMsg(MSG_DELETED_DESIGN, N_('Obsolete ship design deleted: %(1)s'), (unicode,), CRI)
addMsg(MSG_CANNOT_UPGRADE_SR, N_('Cannot upgrade ship from %(1)s to %(2)s\n\nCannot upgrade ship from %(1)s to %(2)s because of we have not enough of %(3)s.'), (unicode,unicode,stratID2Name), MAJ)
addMsg(MSG_DAMAGE_BY_SG, N_('Malfunctional Star Gate, lost %(1)d %% HP\n\nOur fleet has arrived at system with no or malfunctional Star Gate or Comm/Scann Center. Every ship lost %(1)d %% hitpoints due to intensive deceleration.'), (int,), MAJ)
addMsg(MSG_GAINED_FAME, N_('Gained %(1)d fame.'), severity = INFO)
addMsg(MSG_LOST_FAME, N_('Lost %(1)d fame.'), severity = MIN)
# GNC
addMsg(MSG_GNC_EMR_FORECAST, N_("EMR Forecast\n\nLevel of the electromagnetic radiation is believed to be about %(1)d %% of the average level for the next %(2)s turns"), (float2percent, res.formatTime), severity = MIN)
addMsg(MSG_GNC_EMR_CURRENT_LVL, N_("EMR Forecast\n\nCurrent level of the electromagnetic radiation is about %(1)d %% of the average level."), (float2percent,), severity = MIN)
addMsg(MSG_GNC_VOTING_COMING, N_("Elections!\n\nIt's 2:00 turns before elections! Don't hesitate and vote for the best commander!"), (), severity = MAJ)
addMsg(MSG_GNC_VOTING_NOWINNER, N_("Election results! Nobody won...\n\nThe results from the last elections have been published. Nobody was strong enough to be elected as a leader of our galaxy. Can we find such person another day?\n\nThe official election results follow:\n\n%(1)s\n\n"), (votes2Txt,), severity = MAJ)
addMsg(MSG_GNC_VOTING_LEADER, N_("Election results! Leader elected!\n\nThe results from the last elections have been published. %(1)s has proved to be the most supported person and has been elected as our Leader. May be, %(1)s can become an Imperator one day.\n\nThe official election results follow:\n\n%(2)s\n\n"), (unicode, votes2Txt,), severity = MAJ)
addMsg(MSG_GNC_VOTING_IMPERATOR, N_("Election results! Imperator elected!\n\nThe results from the last elections have been published. %(1)s has proved to be the most supported person and has been elected as our glorified Imperator. Congratulations - you proved to be the best of all of us!\n\nThe official election results follow:\n\n%(2)s\n\n"), (unicode, votes2Txt,), severity = MAJ)
# i18n
del N_
#
# Interface
#
def getMsgText(msgID, data):
msg, transform, severity = msgData.get(msgID, (None, None, None))
# create default messages
if not msg:
return _('ERROR\nMissing text for msg %d: %s') % (msgID, repr(data))
# there is message text -> create message
# force unicode
msg = _(msg)
if data == None:
return msg
try:
# tranform data
newData = {}
if not (type(data) == types.ListType or type(data) == types.TupleType):
data = (data,)
if transform:
index = 1
for tranFunc in transform:
newData[str(index)] = tranFunc(data[index - 1])
index += 1
else:
index = 1
for item in data:
newData[str(index)] = item
index += 1
text = msg % newData
except Exception, e:
# wrong arguments -> default message
log.warning("Erorr while formating message")
return _('ERROR\nWrong format for msg %d: %s\nException: %s: %s\nFormat: %s') % (msgID, repr(data), str(e.__class__), str(e), msg)
return text
def getMsgSeverity(msgID):
return msgData.get(msgID, (None, None, NONE))[2]
def getFullMessageText(message):
"""Gets full text of automaticaly generated message
If message has no data to generate, it returns empty
string.
"""
text = ""
if message.has_key("data"):
sourceID, msgID, locationID, turn, data = message["data"]
sev = getMsgSeverity(msgID)
currTurn = client.getTurn()
player = client.getPlayer()
# source
if sourceID != OID_NONE and sourceID != player.oid:
obj = client.get(sourceID, noUpdate = 1)
if obj:
source = getattr(obj, 'name', res.getUnknownName())
else:
source = _('N/A')
else:
source = _('-')
text = '%s%s\n' % (text, _("Source: %s") % source)
# location
if locationID != OID_NONE:
obj = client.get(locationID, noUpdate = 1)
location = getattr(obj, 'name', res.getUnknownName())
else:
location = _('-')
text = '%s%s\n' % (text, _("Location: %s") % location)
text = '%s%s\n' % (text, _("Severity: %s") % _(gdata.msgSeverity[sev]))
text = '%s%s\n' % (text, _("Time: %s [%s]") % (
res.formatTime(turn),
res.formatTime(turn - currTurn),
))
text = '%s%s\n' % (text, "")
text = '%s%s\n' % (text, getMsgText(msgID, data))
return text
| mozts2005/OuterSpace | client-msg-wx/lib/messages.py | Python | gpl-2.0 | 9,437 | [
"Galaxy"
] | eee42ccc7e4b0bb4c2041ae52e83c42d8b97ef5da804dc5e1fa3ac4aac5b4c42 |
"""Unit test for roman72.py
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/05/05 21:57:20 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
import roman72
import unittest
class KnownValues(unittest.TestCase):
knownValues = ( (1, 'I'),
(2, 'II'),
(3, 'III'),
(4, 'IV'),
(5, 'V'),
(6, 'VI'),
(7, 'VII'),
(8, 'VIII'),
(9, 'IX'),
(10, 'X'),
(50, 'L'),
(100, 'C'),
(500, 'D'),
(1000, 'M'),
(31, 'XXXI'),
(148, 'CXLVIII'),
(294, 'CCXCIV'),
(312, 'CCCXII'),
(421, 'CDXXI'),
(528, 'DXXVIII'),
(621, 'DCXXI'),
(782, 'DCCLXXXII'),
(870, 'DCCCLXX'),
(941, 'CMXLI'),
(1043, 'MXLIII'),
(1110, 'MCX'),
(1226, 'MCCXXVI'),
(1301, 'MCCCI'),
(1485, 'MCDLXXXV'),
(1509, 'MDIX'),
(1607, 'MDCVII'),
(1754, 'MDCCLIV'),
(1832, 'MDCCCXXXII'),
(1993, 'MCMXCIII'),
(2074, 'MMLXXIV'),
(2152, 'MMCLII'),
(2212, 'MMCCXII'),
(2343, 'MMCCCXLIII'),
(2499, 'MMCDXCIX'),
(2574, 'MMDLXXIV'),
(2646, 'MMDCXLVI'),
(2723, 'MMDCCXXIII'),
(2892, 'MMDCCCXCII'),
(2975, 'MMCMLXXV'),
(3051, 'MMMLI'),
(3185, 'MMMCLXXXV'),
(3250, 'MMMCCL'),
(3313, 'MMMCCCXIII'),
(3408, 'MMMCDVIII'),
(3501, 'MMMDI'),
(3610, 'MMMDCX'),
(3743, 'MMMDCCXLIII'),
(3844, 'MMMDCCCXLIV'),
(3888, 'MMMDCCCLXXXVIII'),
(3940, 'MMMCMXL'),
(3999, 'MMMCMXCIX'),
(4000, 'MMMM'),
(4500, 'MMMMD'),
(4888, 'MMMMDCCCLXXXVIII'),
(4999, 'MMMMCMXCIX'))
def testToRomanKnownValues(self):
"""toRoman should give known result with known input"""
for integer, numeral in self.knownValues:
result = roman72.toRoman(integer)
self.assertEqual(numeral, result)
def testFromRomanKnownValues(self):
"""fromRoman should give known result with known input"""
for integer, numeral in self.knownValues:
result = roman72.fromRoman(numeral)
self.assertEqual(integer, result)
class ToRomanBadInput(unittest.TestCase):
def testTooLarge(self):
"""toRoman should fail with large input"""
self.assertRaises(roman72.OutOfRangeError, roman72.toRoman, 5000)
def testZero(self):
"""toRoman should fail with 0 input"""
self.assertRaises(roman72.OutOfRangeError, roman72.toRoman, 0)
def testNegative(self):
"""toRoman should fail with negative input"""
self.assertRaises(roman72.OutOfRangeError, roman72.toRoman, -1)
def testNonInteger(self):
"""toRoman should fail with non-integer input"""
self.assertRaises(roman72.NotIntegerError, roman72.toRoman, 0.5)
class FromRomanBadInput(unittest.TestCase):
def testTooManyRepeatedNumerals(self):
"""fromRoman should fail with too many repeated numerals"""
for s in ('MMMMM', 'DD', 'CCCC', 'LL', 'XXXX', 'VV', 'IIII'):
self.assertRaises(roman72.InvalidRomanNumeralError, roman72.fromRoman, s)
def testRepeatedPairs(self):
"""fromRoman should fail with repeated pairs of numerals"""
for s in ('CMCM', 'CDCD', 'XCXC', 'XLXL', 'IXIX', 'IVIV'):
self.assertRaises(roman72.InvalidRomanNumeralError, roman72.fromRoman, s)
def testMalformedAntecedent(self):
"""fromRoman should fail with malformed antecedents"""
for s in ('IIMXCC', 'VX', 'DCM', 'CMM', 'IXIV',
'MCMC', 'XCX', 'IVI', 'LM', 'LD', 'LC'):
self.assertRaises(roman72.InvalidRomanNumeralError, roman72.fromRoman, s)
def testBlank(self):
"""fromRoman should fail with blank string"""
self.assertRaises(roman72.InvalidRomanNumeralError, roman72.fromRoman, "")
class SanityCheck(unittest.TestCase):
def testSanity(self):
"""fromRoman(toRoman(n))==n for all n"""
for integer in range(1, 5000):
numeral = roman72.toRoman(integer)
result = roman72.fromRoman(numeral)
self.assertEqual(integer, result)
class CaseCheck(unittest.TestCase):
def testToRomanCase(self):
"""toRoman should always return uppercase"""
for integer in range(1, 5000):
numeral = roman72.toRoman(integer)
self.assertEqual(numeral, numeral.upper())
def testFromRomanCase(self):
"""fromRoman should only accept uppercase input"""
for integer in range(1, 5000):
numeral = roman72.toRoman(integer)
roman72.fromRoman(numeral.upper())
self.assertRaises(roman72.InvalidRomanNumeralError,
roman72.fromRoman, numeral.lower())
if __name__ == "__main__":
unittest.main()
| tapomayukh/projects_in_python | sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/roman/stage7/romantest72.py | Python | mit | 5,848 | [
"VisIt"
] | 82d8e83649c5d4fdb9fb37300b607ba0c2d299cc0eff77a1e8dd5563f14798b9 |
#
# QAPI types generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_fwd_struct(name, members):
return mcgen('''
typedef struct %(name)s %(name)s;
typedef struct %(name)sList
{
%(name)s *value;
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_fwd_enum_struct(name, members):
return mcgen('''
typedef struct %(name)sList
{
%(name)s value;
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_struct(structname, fieldname, members):
ret = mcgen('''
struct %(name)s
{
''',
name=structname)
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_var(argname))
if structured:
push_indent()
ret += generate_struct("", argname, argentry)
pop_indent()
else:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(argentry), c_name=c_var(argname))
if len(fieldname):
fieldname = " " + fieldname
ret += mcgen('''
}%(field)s;
''',
field=fieldname)
return ret
def generate_enum_lookup(name, values):
ret = mcgen('''
const char *%(name)s_lookup[] = {
''',
name=name)
i = 0
for value in values:
ret += mcgen('''
"%(value)s",
''',
value=value)
ret += mcgen('''
NULL,
};
''')
return ret
def generate_enum_name(name):
if name.isupper():
return c_fun(name, False)
new_name = ''
for c in c_fun(name, False):
if c.isupper():
new_name += '_'
new_name += c
return new_name.lstrip('_').upper()
def generate_enum(name, values):
lookup_decl = mcgen('''
extern const char *%(name)s_lookup[];
''',
name=name)
enum_decl = mcgen('''
typedef enum %(name)s
{
''',
name=name)
# append automatically generated _MAX value
enum_values = values + [ 'MAX' ]
i = 0
for value in enum_values:
enum_decl += mcgen('''
%(abbrev)s_%(value)s = %(i)d,
''',
abbrev=de_camel_case(name).upper(),
value=generate_enum_name(value),
i=i)
i += 1
enum_decl += mcgen('''
} %(name)s;
''',
name=name)
return lookup_decl + enum_decl
def generate_union(name, typeinfo):
ret = mcgen('''
struct %(name)s
{
%(name)sKind kind;
union {
void *data;
''',
name=name)
for key in typeinfo:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(typeinfo[key]),
c_name=c_fun(key))
ret += mcgen('''
};
};
''')
return ret
def generate_type_cleanup_decl(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj);
''',
c_type=c_type(name),type=name)
return ret
def generate_type_cleanup(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj)
{
QapiDeallocVisitor *md;
Visitor *v;
if (!obj) {
return;
}
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
visit_type_%(type)s(v, &obj, NULL, NULL);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_type=c_type(name),type=name)
return ret
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chp:o:",
["source", "header", "prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-types.c'
h_file = 'qapi-types.h'
do_c = False
do_h = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* deallocation functions for schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qapi/qapi-dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''', prefix=prefix))
fdecl.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include <stdbool.h>
#include <stdint.h>
''',
guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
exprs = filter(lambda expr: not expr.has_key('gen'), exprs)
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_fwd_struct(expr['type'], expr['data'])
elif expr.has_key('enum'):
ret += generate_enum(expr['enum'], expr['data']) + "\n"
ret += generate_fwd_enum_struct(expr['enum'], expr['data'])
fdef.write(generate_enum_lookup(expr['enum'], expr['data']))
elif expr.has_key('union'):
ret += generate_fwd_struct(expr['union'], expr['data']) + "\n"
ret += generate_enum('%sKind' % expr['union'], expr['data'].keys())
fdef.write(generate_enum_lookup('%sKind' % expr['union'], expr['data'].keys()))
else:
continue
fdecl.write(ret)
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_struct(expr['type'], "", expr['data']) + "\n"
ret += generate_type_cleanup_decl(expr['type'] + "List")
fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['type'])
fdef.write(generate_type_cleanup(expr['type']) + "\n")
elif expr.has_key('union'):
ret += generate_union(expr['union'], expr['data'])
ret += generate_type_cleanup_decl(expr['union'] + "List")
fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['union'])
fdef.write(generate_type_cleanup(expr['union']) + "\n")
elif expr.has_key('enum'):
ret += generate_type_cleanup_decl(expr['enum'] + "List")
fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n")
else:
continue
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
| piyushroshan/xen-4.3.2 | tools/qemu-xen/scripts/qapi-types.py | Python | gpl-2.0 | 7,559 | [
"VisIt"
] | 4cc861370b086f5f874974ef19ce016a9b900c1524b2a2898eabf8757a1a8a2b |
# Copyright (C) 2013-2020 2ndQuadrant Limited
#
# This file is part of Barman.
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
"""
This module control how the output of Barman will be rendered
"""
from __future__ import print_function
import datetime
import inspect
import json
import logging
import sys
from barman.infofile import BackupInfo
from barman.utils import (BarmanEncoder, force_str, human_readable_timedelta,
pretty_size, redact_passwords)
from barman.xlog import diff_lsn
__all__ = [
'error_occurred', 'debug', 'info', 'warning', 'error', 'exception',
'result', 'close_and_exit', 'close', 'set_output_writer',
'AVAILABLE_WRITERS', 'DEFAULT_WRITER', 'ConsoleOutputWriter',
'NagiosOutputWriter', 'JsonOutputWriter'
]
#: True if error or exception methods have been called
error_occurred = False
#: Exit code if error occurred
error_exit_code = 1
#: Enable colors in the output
ansi_colors_enabled = False
def _ansi_color(command):
"""
Return the ansi sequence for the provided color
"""
return '\033[%sm' % command
def _colored(message, color):
"""
Return a string formatted with the provided color.
"""
if ansi_colors_enabled:
return _ansi_color(color) + message + _ansi_color('0')
else:
return message
def _red(message):
"""
Format a red string
"""
return _colored(message, '31')
def _green(message):
"""
Format a green string
"""
return _colored(message, '32')
def _yellow(message):
"""
Format a yellow string
"""
return _colored(message, '33')
def _format_message(message, args):
"""
Format a message using the args list. The result will be equivalent to
message % args
If args list contains a dictionary as its only element the result will be
message % args[0]
:param str message: the template string to be formatted
:param tuple args: a list of arguments
:return: the formatted message
:rtype: str
"""
if len(args) == 1 and isinstance(args[0], dict):
return message % args[0]
elif len(args) > 0:
return message % args
else:
return message
def _put(level, message, *args, **kwargs):
"""
Send the message with all the remaining positional arguments to
the configured output manager with the right output level. The message will
be sent also to the logger unless explicitly disabled with log=False
No checks are performed on level parameter as this method is meant
to be called only by this module.
If level == 'exception' the stack trace will be also logged
:param str level:
:param str message: the template string to be formatted
:param tuple args: all remaining arguments are passed to the log formatter
:key bool log: whether to log the message
:key bool is_error: treat this message as an error
"""
# handle keyword-only parameters
log = kwargs.pop('log', True)
is_error = kwargs.pop('is_error', False)
if len(kwargs):
raise TypeError('%s() got an unexpected keyword argument %r'
% (inspect.stack()[1][3], kwargs.popitem()[0]))
if is_error:
global error_occurred
error_occurred = True
_writer.error_occurred()
# Make sure the message is an unicode string
if message:
message = force_str(message)
# dispatch the call to the output handler
getattr(_writer, level)(message, *args)
# log the message as originating from caller's caller module
if log:
exc_info = False
if level == 'exception':
level = 'error'
exc_info = True
frm = inspect.stack()[2]
mod = inspect.getmodule(frm[0])
logger = logging.getLogger(mod.__name__)
log_level = logging.getLevelName(level.upper())
logger.log(log_level, message, *args, **{'exc_info': exc_info})
def _dispatch(obj, prefix, name, *args, **kwargs):
"""
Dispatch the call to the %(prefix)s_%(name) method of the obj object
:param obj: the target object
:param str prefix: prefix of the method to be called
:param str name: name of the method to be called
:param tuple args: all remaining positional arguments will be sent
to target
:param dict kwargs: all remaining keyword arguments will be sent to target
:return: the result of the invoked method
:raise ValueError: if the target method is not present
"""
method_name = "%s_%s" % (prefix, name)
handler = getattr(obj, method_name, None)
if callable(handler):
return handler(*args, **kwargs)
else:
raise ValueError("The object %r does not have the %r method" % (
obj, method_name))
def is_quiet():
"""
Calls the "is_quiet" method, accessing the protected parameter _quiet
of the instanced OutputWriter
:return bool: the _quiet parameter value
"""
return _writer.is_quiet()
def is_debug():
"""
Calls the "is_debug" method, accessing the protected parameter _debug
of the instanced OutputWriter
:return bool: the _debug parameter value
"""
return _writer.is_debug()
def debug(message, *args, **kwargs):
"""
Output a message with severity 'DEBUG'
:key bool log: whether to log the message
"""
_put('debug', message, *args, **kwargs)
def info(message, *args, **kwargs):
"""
Output a message with severity 'INFO'
:key bool log: whether to log the message
"""
_put('info', message, *args, **kwargs)
def warning(message, *args, **kwargs):
"""
Output a message with severity 'INFO'
:key bool log: whether to log the message
"""
_put('warning', message, *args, **kwargs)
def error(message, *args, **kwargs):
"""
Output a message with severity 'ERROR'.
Also records that an error has occurred unless the ignore parameter
is True.
:key bool ignore: avoid setting an error exit status (default False)
:key bool log: whether to log the message
"""
# ignore is a keyword-only parameter
ignore = kwargs.pop('ignore', False)
if not ignore:
kwargs.setdefault('is_error', True)
_put('error', message, *args, **kwargs)
def exception(message, *args, **kwargs):
"""
Output a message with severity 'EXCEPTION'
If raise_exception parameter doesn't evaluate to false raise and exception:
- if raise_exception is callable raise the result of raise_exception()
- if raise_exception is an exception raise it
- else raise the last exception again
:key bool ignore: avoid setting an error exit status
:key raise_exception:
raise an exception after the message has been processed
:key bool log: whether to log the message
"""
# ignore and raise_exception are keyword-only parameters
ignore = kwargs.pop('ignore', False)
# noinspection PyNoneFunctionAssignment
raise_exception = kwargs.pop('raise_exception', None)
if not ignore:
kwargs.setdefault('is_error', True)
_put('exception', message, *args, **kwargs)
if raise_exception:
if callable(raise_exception):
# noinspection PyCallingNonCallable
raise raise_exception(message)
elif isinstance(raise_exception, BaseException):
raise raise_exception
else:
raise
def init(command, *args, **kwargs):
"""
Initialize the output writer for a given command.
:param str command: name of the command are being executed
:param tuple args: all remaining positional arguments will be sent
to the output processor
:param dict kwargs: all keyword arguments will be sent
to the output processor
"""
try:
_dispatch(_writer, 'init', command, *args, **kwargs)
except ValueError:
exception('The %s writer does not support the "%s" command',
_writer.__class__.__name__, command)
close_and_exit()
def result(command, *args, **kwargs):
"""
Output the result of an operation.
:param str command: name of the command are being executed
:param tuple args: all remaining positional arguments will be sent
to the output processor
:param dict kwargs: all keyword arguments will be sent
to the output processor
"""
try:
_dispatch(_writer, 'result', command, *args, **kwargs)
except ValueError:
exception('The %s writer does not support the "%s" command',
_writer.__class__.__name__, command)
close_and_exit()
def close_and_exit():
"""
Close the output writer and terminate the program.
If an error has been emitted the program will report a non zero return
value.
"""
close()
if error_occurred:
sys.exit(error_exit_code)
else:
sys.exit(0)
def close():
"""
Close the output writer.
"""
_writer.close()
def set_output_writer(new_writer, *args, **kwargs):
"""
Replace the current output writer with a new one.
The new_writer parameter can be a symbolic name or an OutputWriter object
:param new_writer: the OutputWriter name or the actual OutputWriter
:type: string or an OutputWriter
:param tuple args: all remaining positional arguments will be passed
to the OutputWriter constructor
:param dict kwargs: all remaining keyword arguments will be passed
to the OutputWriter constructor
"""
global _writer
_writer.close()
if new_writer in AVAILABLE_WRITERS:
_writer = AVAILABLE_WRITERS[new_writer](*args, **kwargs)
else:
_writer = new_writer
class ConsoleOutputWriter(object):
def __init__(self, debug=False, quiet=False):
"""
Default output writer that output everything on console.
:param bool debug: print debug messages on standard error
:param bool quiet: don't print info messages
"""
self._debug = debug
self._quiet = quiet
#: Used in check command to hold the check results
self.result_check_list = []
#: The minimal flag. If set the command must output a single list of
#: values.
self.minimal = False
#: The server is active
self.active = True
def _print(self, message, args, stream):
"""
Print an encoded message on the given output stream
"""
# Make sure to add a newline at the end of the message
if message is None:
message = '\n'
else:
message += '\n'
# Format and encode the message, redacting eventual passwords
encoded_msg = redact_passwords(
_format_message(message, args)).encode('utf-8')
try:
# Python 3.x
stream.buffer.write(encoded_msg)
except AttributeError:
# Python 2.x
stream.write(encoded_msg)
stream.flush()
def _out(self, message, args):
"""
Print a message on standard output
"""
self._print(message, args, sys.stdout)
def _err(self, message, args):
"""
Print a message on standard error
"""
self._print(message, args, sys.stderr)
def is_quiet(self):
"""
Access the quiet property of the OutputWriter instance
:return bool: if the writer is quiet or not
"""
return self._quiet
def is_debug(self):
"""
Access the debug property of the OutputWriter instance
:return bool: if the writer is in debug mode or not
"""
return self._debug
def debug(self, message, *args):
"""
Emit debug.
"""
if self._debug:
self._err('DEBUG: %s' % message, args)
def info(self, message, *args):
"""
Normal messages are sent to standard output
"""
if not self._quiet:
self._out(message, args)
def warning(self, message, *args):
"""
Warning messages are sent to standard error
"""
self._err(_yellow('WARNING: %s' % message), args)
def error(self, message, *args):
"""
Error messages are sent to standard error
"""
self._err(_red('ERROR: %s' % message), args)
def exception(self, message, *args):
"""
Warning messages are sent to standard error
"""
self._err(_red('EXCEPTION: %s' % message), args)
def error_occurred(self):
"""
Called immediately before any message method when the originating
call has is_error=True
"""
def close(self):
"""
Close the output channel.
Nothing to do for console.
"""
def result_backup(self, backup_info):
"""
Render the result of a backup.
Nothing to do for console.
"""
# TODO: evaluate to display something useful here
def result_recovery(self, results):
"""
Render the result of a recovery.
"""
if len(results['changes']) > 0:
self.info("")
self.info("IMPORTANT")
self.info("These settings have been modified to prevent "
"data losses")
self.info("")
for assertion in results['changes']:
self.info("%s line %s: %s = %s",
assertion.filename,
assertion.line,
assertion.key,
assertion.value)
if len(results['warnings']) > 0:
self.info("")
self.info("WARNING")
self.info("You are required to review the following options"
" as potentially dangerous")
self.info("")
for assertion in results['warnings']:
self.info("%s line %s: %s = %s",
assertion.filename,
assertion.line,
assertion.key,
assertion.value)
if results['missing_files']:
# At least one file is missing, warn the user
self.info("")
self.info("WARNING")
self.info("The following configuration files have not been "
"saved during backup, hence they have not been "
"restored.")
self.info("You need to manually restore them "
"in order to start the recovered PostgreSQL instance:")
self.info("")
for file_name in results['missing_files']:
self.info(" %s" % file_name)
if results['delete_barman_wal']:
self.info("")
self.info("After the recovery, please remember to remove the "
"\"barman_wal\" directory")
self.info("inside the PostgreSQL data directory.")
if results['get_wal']:
self.info("")
self.info("WARNING: 'get-wal' is in the specified "
"'recovery_options'.")
self.info("Before you start up the PostgreSQL server, please "
"review the %s file",
results['recovery_configuration_file'])
self.info("inside the target directory. Make sure that "
"'restore_command' can be executed by "
"the PostgreSQL user.")
self.info("")
self.info(
"Recovery completed (start time: %s, elapsed time: %s)",
results['recovery_start_time'],
human_readable_timedelta(
datetime.datetime.now() - results['recovery_start_time']))
self.info("")
self.info("Your PostgreSQL server has been successfully "
"prepared for recovery!")
def _record_check(self, server_name, check, status, hint):
"""
Record the check line in result_check_map attribute
This method is for subclass use
:param str server_name: the server is being checked
:param str check: the check name
:param bool status: True if succeeded
:param str,None hint: hint to print if not None
"""
self.result_check_list.append(dict(
server_name=server_name, check=check, status=status, hint=hint))
if not status and self.active:
global error_occurred
error_occurred = True
def init_check(self, server_name, active, disabled):
"""
Init the check command
:param str server_name: the server we are start listing
:param boolean active: The server is active
:param boolean disabled: The server is disabled
"""
display_name = server_name
# If the server has been manually disabled
if not active:
display_name += " (inactive)"
# If server has configuration errors
elif disabled:
display_name += " (WARNING: disabled)"
self.info("Server %s:" % display_name)
self.active = active
def result_check(self, server_name, check, status, hint=None):
"""
Record a server result of a server check
and output it as INFO
:param str server_name: the server is being checked
:param str check: the check name
:param bool status: True if succeeded
:param str,None hint: hint to print if not None
"""
self._record_check(server_name, check, status, hint)
if hint:
self.info(
"\t%s: %s (%s)" %
(check, _green('OK') if status else _red('FAILED'), hint))
else:
self.info(
"\t%s: %s" %
(check, _green('OK') if status else _red('FAILED')))
def init_list_backup(self, server_name, minimal=False):
"""
Init the list-backup command
:param str server_name: the server we are start listing
:param bool minimal: if true output only a list of backup id
"""
self.minimal = minimal
def result_list_backup(self, backup_info,
backup_size, wal_size,
retention_status):
"""
Output a single backup in the list-backup command
:param BackupInfo backup_info: backup we are displaying
:param backup_size: size of base backup (with the required WAL files)
:param wal_size: size of WAL files belonging to this backup
(without the required WAL files)
:param retention_status: retention policy status
"""
# If minimal is set only output the backup id
if self.minimal:
self.info(backup_info.backup_id)
return
out_list = [
"%s %s - " % (backup_info.server_name, backup_info.backup_id)]
if backup_info.status in BackupInfo.STATUS_COPY_DONE:
end_time = backup_info.end_time.ctime()
out_list.append('%s - Size: %s - WAL Size: %s' %
(end_time,
pretty_size(backup_size),
pretty_size(wal_size)))
if backup_info.tablespaces:
tablespaces = [("%s:%s" % (tablespace.name,
tablespace.location))
for tablespace in backup_info.tablespaces]
out_list.append(' (tablespaces: %s)' %
', '.join(tablespaces))
if backup_info.status == BackupInfo.WAITING_FOR_WALS:
out_list.append(' - %s' % BackupInfo.WAITING_FOR_WALS)
if retention_status and retention_status != BackupInfo.NONE:
out_list.append(' - %s' % retention_status)
else:
out_list.append(backup_info.status)
self.info(''.join(out_list))
def result_show_backup(self, backup_ext_info):
"""
Output all available information about a backup in show-backup command
The argument has to be the result
of a Server.get_backup_ext_info() call
:param dict backup_ext_info: a dictionary containing
the info to display
"""
data = dict(backup_ext_info)
self.info("Backup %s:", data['backup_id'])
self.info(" Server Name : %s", data['server_name'])
if data['systemid']:
self.info(" System Id : %s", data['systemid'])
self.info(" Status : %s", data['status'])
if data['status'] in BackupInfo.STATUS_COPY_DONE:
self.info(" PostgreSQL Version : %s", data['version'])
self.info(" PGDATA directory : %s", data['pgdata'])
if data['tablespaces']:
self.info(" Tablespaces:")
for item in data['tablespaces']:
self.info(" %s: %s (oid: %s)",
item.name, item.location, item.oid)
self.info("")
self.info(" Base backup information:")
self.info(" Disk usage : %s (%s with WALs)",
pretty_size(data['size']),
pretty_size(data['size'] + data[
'wal_size']))
if data['deduplicated_size'] is not None and data['size'] > 0:
deduplication_ratio = (
1 - (float(data['deduplicated_size']) / data['size']))
self.info(" Incremental size : %s (-%s)",
pretty_size(data['deduplicated_size']),
'{percent:.2%}'.format(percent=deduplication_ratio)
)
self.info(" Timeline : %s", data['timeline'])
self.info(" Begin WAL : %s",
data['begin_wal'])
self.info(" End WAL : %s", data['end_wal'])
self.info(" WAL number : %s", data['wal_num'])
# Output WAL compression ratio for basebackup WAL files
if data['wal_compression_ratio'] > 0:
self.info(" WAL compression ratio: %s",
'{percent:.2%}'.format(
percent=data['wal_compression_ratio']))
self.info(" Begin time : %s",
data['begin_time'])
self.info(" End time : %s", data['end_time'])
# If copy statistics are available print a summary
copy_stats = data.get('copy_stats')
if copy_stats:
copy_time = copy_stats.get('copy_time')
if copy_time:
value = human_readable_timedelta(
datetime.timedelta(seconds=copy_time))
# Show analysis time if it is more than a second
analysis_time = copy_stats.get('analysis_time')
if analysis_time is not None and analysis_time >= 1:
value += " + %s startup" % (human_readable_timedelta(
datetime.timedelta(seconds=analysis_time)))
self.info(" Copy time : %s", value)
size = data['deduplicated_size'] or data['size']
value = "%s/s" % pretty_size(size / copy_time)
number_of_workers = copy_stats.get('number_of_workers', 1)
if number_of_workers > 1:
value += " (%s jobs)" % number_of_workers
self.info(" Estimated throughput : %s", value)
self.info(" Begin Offset : %s",
data['begin_offset'])
self.info(" End Offset : %s",
data['end_offset'])
self.info(" Begin LSN : %s",
data['begin_xlog'])
self.info(" End LSN : %s", data['end_xlog'])
self.info("")
self.info(" WAL information:")
self.info(" No of files : %s",
data['wal_until_next_num'])
self.info(" Disk usage : %s",
pretty_size(data['wal_until_next_size']))
# Output WAL rate
if data['wals_per_second'] > 0:
self.info(" WAL rate : %0.2f/hour",
data['wals_per_second'] * 3600)
# Output WAL compression ratio for archived WAL files
if data['wal_until_next_compression_ratio'] > 0:
self.info(
" Compression ratio : %s",
'{percent:.2%}'.format(
percent=data['wal_until_next_compression_ratio']))
self.info(" Last available : %s", data['wal_last'])
if data['children_timelines']:
timelines = data['children_timelines']
self.info(
" Reachable timelines : %s",
", ".join([str(history.tli) for history in timelines]))
self.info("")
self.info(" Catalog information:")
self.info(
" Retention Policy : %s",
data['retention_policy_status'] or 'not enforced')
previous_backup_id = data.setdefault(
'previous_backup_id', 'not available')
self.info(
" Previous Backup : %s",
previous_backup_id or '- (this is the oldest base backup)')
next_backup_id = data.setdefault(
'next_backup_id', 'not available')
self.info(
" Next Backup : %s",
next_backup_id or '- (this is the latest base backup)')
if data['children_timelines']:
self.info("")
self.info(
"WARNING: WAL information is inaccurate due to "
"multiple timelines interacting with this backup")
else:
if data['error']:
self.info(" Error: : %s",
data['error'])
def init_status(self, server_name):
"""
Init the status command
:param str server_name: the server we are start listing
"""
self.info("Server %s:", server_name)
def result_status(self, server_name, status, description, message):
"""
Record a result line of a server status command
and output it as INFO
:param str server_name: the server is being checked
:param str status: the returned status code
:param str description: the returned status description
:param str,object message: status message. It will be converted to str
"""
self.info("\t%s: %s", description, str(message))
def init_replication_status(self, server_name, minimal=False):
"""
Init the 'standby-status' command
:param str server_name: the server we are start listing
:param str minimal: minimal output
"""
self.minimal = minimal
def result_replication_status(self, server_name, target, server_lsn,
standby_info):
"""
Record a result line of a server status command
and output it as INFO
:param str server_name: the replication server
:param str target: all|hot-standby|wal-streamer
:param str server_lsn: server's current lsn
:param StatReplication standby_info: status info of a standby
"""
if target == 'hot-standby':
title = 'hot standby servers'
elif target == 'wal-streamer':
title = 'WAL streamers'
else:
title = 'streaming clients'
if self.minimal:
# Minimal output
if server_lsn:
# current lsn from the master
self.info("%s for master '%s' (LSN @ %s):",
title.capitalize(), server_name, server_lsn)
else:
# We are connected to a standby
self.info("%s for slave '%s':",
title.capitalize(), server_name)
else:
# Full output
self.info("Status of %s for server '%s':",
title, server_name)
# current lsn from the master
if server_lsn:
self.info(" Current LSN on master: %s",
server_lsn)
if standby_info is not None and not len(standby_info):
self.info(" No %s attached", title)
return
# Minimal output
if self.minimal:
n = 1
for standby in standby_info:
if not standby.replay_lsn:
# WAL streamer
self.info(" %s. W) %s@%s S:%s W:%s P:%s AN:%s",
n,
standby.usename,
standby.client_addr or 'socket',
standby.sent_lsn,
standby.write_lsn,
standby.sync_priority,
standby.application_name)
else:
# Standby
self.info(" %s. %s) %s@%s S:%s F:%s R:%s P:%s AN:%s",
n,
standby.sync_state[0].upper(),
standby.usename,
standby.client_addr or 'socket',
standby.sent_lsn,
standby.flush_lsn,
standby.replay_lsn,
standby.sync_priority,
standby.application_name)
n += 1
else:
n = 1
self.info(" Number of %s: %s",
title, len(standby_info))
for standby in standby_info:
self.info("")
# Calculate differences in bytes
sent_diff = diff_lsn(standby.sent_lsn,
standby.current_lsn)
write_diff = diff_lsn(standby.write_lsn,
standby.current_lsn)
flush_diff = diff_lsn(standby.flush_lsn,
standby.current_lsn)
replay_diff = diff_lsn(standby.replay_lsn,
standby.current_lsn)
# Determine the sync stage of the client
sync_stage = None
if not standby.replay_lsn:
client_type = 'WAL streamer'
max_level = 3
else:
client_type = 'standby'
max_level = 5
# Only standby can replay WAL info
if replay_diff == 0:
sync_stage = '5/5 Hot standby (max)'
elif flush_diff == 0:
sync_stage = '4/5 2-safe' # remote flush
# If not yet done, set the sync stage
if not sync_stage:
if write_diff == 0:
sync_stage = '3/%s Remote write' % max_level
elif sent_diff == 0:
sync_stage = '2/%s WAL Sent (min)' % max_level
else:
sync_stage = '1/%s 1-safe' % max_level
# Synchronous standby
if getattr(standby, 'sync_priority', None) > 0:
self.info(" %s. #%s %s %s",
n,
standby.sync_priority,
standby.sync_state.capitalize(),
client_type)
# Asynchronous standby
else:
self.info(" %s. %s %s",
n,
standby.sync_state.capitalize(),
client_type)
self.info(" Application name: %s",
standby.application_name)
self.info(" Sync stage : %s",
sync_stage)
if getattr(standby, 'client_addr', None):
self.info(" Communication : TCP/IP")
self.info(" IP Address : %s "
"/ Port: %s / Host: %s",
standby.client_addr,
standby.client_port,
standby.client_hostname or '-')
else:
self.info(" Communication : Unix domain socket")
self.info(" User name : %s", standby.usename)
self.info(" Current state : %s (%s)",
standby.state,
standby.sync_state)
if getattr(standby, 'slot_name', None):
self.info(" Replication slot: %s", standby.slot_name)
self.info(" WAL sender PID : %s", standby.pid)
self.info(" Started at : %s", standby.backend_start)
if getattr(standby, 'backend_xmin', None):
self.info(" Standby's xmin : %s",
standby.backend_xmin or '-')
if getattr(standby, 'sent_lsn', None):
self.info(" Sent LSN : %s (diff: %s)",
standby.sent_lsn,
pretty_size(sent_diff))
if getattr(standby, 'write_lsn', None):
self.info(" Write LSN : %s (diff: %s)",
standby.write_lsn,
pretty_size(write_diff))
if getattr(standby, 'flush_lsn', None):
self.info(" Flush LSN : %s (diff: %s)",
standby.flush_lsn,
pretty_size(flush_diff))
if getattr(standby, 'replay_lsn', None):
self.info(" Replay LSN : %s (diff: %s)",
standby.replay_lsn,
pretty_size(replay_diff))
n += 1
def init_list_server(self, server_name, minimal=False):
"""
Init the list-server command
:param str server_name: the server we are start listing
"""
self.minimal = minimal
def result_list_server(self, server_name, description=None):
"""
Output a result line of a list-server command
:param str server_name: the server is being checked
:param str,None description: server description if applicable
"""
if self.minimal or not description:
self.info("%s", server_name)
else:
self.info("%s - %s", server_name, description)
def init_show_server(self, server_name):
"""
Init the show-server command output method
:param str server_name: the server we are displaying
"""
self.info("Server %s:" % server_name)
def result_show_server(self, server_name, server_info):
"""
Output the results of the show-server command
:param str server_name: the server we are displaying
:param dict server_info: a dictionary containing the info to display
"""
for status, message in sorted(server_info.items()):
self.info("\t%s: %s", status, message)
class JsonOutputWriter(ConsoleOutputWriter):
def __init__(self, *args, **kwargs):
"""
Output writer that writes on standard output using JSON.
When closed, it dumps all the collected results as a JSON object.
"""
super(JsonOutputWriter, self).__init__(*args, **kwargs)
#: Store JSON data
self.json_output = {}
def _mangle_key(self, value):
"""
Mangle a generic description to be used as dict key
:type value: str
:rtype: str
"""
return value.lower() \
.replace(' ', '_') \
.replace('-', '_') \
.replace('.', '')
def _out_to_field(self, field, message, *args):
"""
Store a message in the required field
"""
if field not in self.json_output:
self.json_output[field] = []
message = _format_message(message, args)
self.json_output[field].append(message)
def debug(self, message, *args):
"""
Add debug messages in _DEBUG list
"""
if not self._debug:
return
self._out_to_field('_DEBUG', message, *args)
def info(self, message, *args):
"""
Add normal messages in _INFO list
"""
self._out_to_field('_INFO', message, *args)
def warning(self, message, *args):
"""
Add warning messages in _WARNING list
"""
self._out_to_field('_WARNING', message, *args)
def error(self, message, *args):
"""
Add error messages in _ERROR list
"""
self._out_to_field('_ERROR', message, *args)
def exception(self, message, *args):
"""
Add exception messages in _EXCEPTION list
"""
self._out_to_field('_EXCEPTION', message, *args)
def close(self):
"""
Close the output channel.
Print JSON output
"""
if not self._quiet:
json.dump(self.json_output, sys.stdout,
sort_keys=True, cls=BarmanEncoder)
self.json_output = {}
def result_backup(self, backup_info):
"""
Save the result of a backup.
"""
self.json_output.update(backup_info.to_dict())
def result_recovery(self, results):
"""
Render the result of a recovery.
"""
changes_count = len(results['changes'])
self.json_output['changes_count'] = changes_count
self.json_output['changes'] = results['changes']
if changes_count > 0:
self.warning("IMPORTANT! Some settings have been modified "
"to prevent data losses. See 'changes' key.")
warnings_count = len(results['warnings'])
self.json_output['warnings_count'] = warnings_count
self.json_output['warnings'] = results['warnings']
if warnings_count > 0:
self.warning("WARNING! You are required to review the options "
"as potentially dangerous. See 'warnings' key.")
missing_files_count = len(results['missing_files'])
self.json_output['missing_files'] = results['missing_files']
if missing_files_count > 0:
# At least one file is missing, warn the user
self.warning("WARNING! Some configuration files have not been "
"saved during backup, hence they have not been "
"restored. See 'missing_files' key.")
if results['delete_barman_wal']:
self.warning("After the recovery, please remember to remove the "
"'barman_wal' directory inside the PostgreSQL "
"data directory.")
if results['get_wal']:
self.warning("WARNING: 'get-wal' is in the specified "
"'recovery_options'. Before you start up the "
"PostgreSQL server, please review the recovery "
"configuration inside the target directory. "
"Make sure that 'restore_command' can be "
"executed by the PostgreSQL user.")
self.json_output.update({
'recovery_start_time':
results['recovery_start_time'].isoformat(' '),
'recovery_start_time_timestamp':
results['recovery_start_time'].strftime('%s'),
'recovery_elapsed_time': human_readable_timedelta(
datetime.datetime.now() - results['recovery_start_time']),
'recovery_elapsed_time_seconds':
(datetime.datetime.now() - results['recovery_start_time'])
.total_seconds()})
def init_check(self, server_name, active, disabled):
"""
Init the check command
:param str server_name: the server we are start listing
:param boolean active: The server is active
:param boolean disabled: The server is disabled
"""
self.json_output[server_name] = {}
self.active = active
def result_check(self, server_name, check, status, hint=None):
"""
Record a server result of a server check
and output it as INFO
:param str server_name: the server is being checked
:param str check: the check name
:param bool status: True if succeeded
:param str,None hint: hint to print if not None
"""
self._record_check(server_name, check, status, hint)
check_key = self._mangle_key(check)
self.json_output[server_name][check_key] = dict(
status="OK" if status else "FAILED",
hint=hint or ""
)
def init_list_backup(self, server_name, minimal=False):
"""
Init the list-backup command
:param str server_name: the server we are listing
:param bool minimal: if true output only a list of backup id
"""
self.minimal = minimal
self.json_output[server_name] = []
def result_list_backup(self, backup_info,
backup_size, wal_size,
retention_status):
"""
Output a single backup in the list-backup command
:param BackupInfo backup_info: backup we are displaying
:param backup_size: size of base backup (with the required WAL files)
:param wal_size: size of WAL files belonging to this backup
(without the required WAL files)
:param retention_status: retention policy status
"""
server_name = backup_info.server_name
# If minimal is set only output the backup id
if self.minimal:
self.json_output[server_name].append(backup_info.backup_id)
return
output = dict(
backup_id=backup_info.backup_id,
)
if backup_info.status in BackupInfo.STATUS_COPY_DONE:
output.update(dict(
end_time_timestamp=backup_info.end_time.strftime('%s'),
end_time=backup_info.end_time.ctime(),
size_bytes=backup_size,
wal_size_bytes=wal_size,
size=pretty_size(backup_size),
wal_size=pretty_size(wal_size),
status=backup_info.status,
retention_status=retention_status or BackupInfo.NONE
))
output['tablespaces'] = []
if backup_info.tablespaces:
for tablespace in backup_info.tablespaces:
output['tablespaces'].append(dict(
name=tablespace.name,
location=tablespace.location
))
else:
output.update(dict(
status=backup_info.status
))
self.json_output[server_name].append(output)
def result_show_backup(self, backup_ext_info):
"""
Output all available information about a backup in show-backup command
The argument has to be the result
of a Server.get_backup_ext_info() call
:param dict backup_ext_info: a dictionary containing
the info to display
"""
data = dict(backup_ext_info)
server_name = data['server_name']
output = self.json_output[server_name] = dict(
backup_id=data['backup_id'],
status=data['status']
)
if data['status'] in BackupInfo.STATUS_COPY_DONE:
output.update(dict(
postgresql_version=data['version'],
pgdata_directory=data['pgdata'],
tablespaces=[]
))
if data['tablespaces']:
for item in data['tablespaces']:
output['tablespaces'].append(dict(
name=item.name,
location=item.location,
oid=item.oid
))
output['base_backup_information'] = dict(
disk_usage=pretty_size(data['size']),
disk_usage_bytes=data['size'],
disk_usage_with_wals=pretty_size(
data['size'] + data['wal_size']),
disk_usage_with_wals_bytes=data['size'] + data['wal_size']
)
if data['deduplicated_size'] is not None and data['size'] > 0:
deduplication_ratio = (1 - (
float(data['deduplicated_size']) / data['size']))
output['base_backup_information'].update(dict(
incremental_size=pretty_size(data['deduplicated_size']),
incremental_size_bytes=data['deduplicated_size'],
incremental_size_ratio='-{percent:.2%}'.format(
percent=deduplication_ratio)
))
output['base_backup_information'].update(dict(
timeline=data['timeline'],
begin_wal=data['begin_wal'],
end_wal=data['end_wal']
))
if data['wal_compression_ratio'] > 0:
output['base_backup_information'].update(dict(
wal_compression_ratio='{percent:.2%}'.format(
percent=data['wal_compression_ratio'])
))
output['base_backup_information'].update(dict(
begin_time_timestamp=data['begin_time'].strftime('%s'),
begin_time=data['begin_time'].isoformat(sep=' '),
end_time_timestamp=data['end_time'].strftime('%s'),
end_time=data['end_time'].isoformat(sep=' ')
))
copy_stats = data.get('copy_stats')
if copy_stats:
copy_time = copy_stats.get('copy_time')
analysis_time = copy_stats.get('analysis_time', 0)
if copy_time:
output['base_backup_information'].update(dict(
copy_time=human_readable_timedelta(
datetime.timedelta(seconds=copy_time)),
copy_time_seconds=copy_time,
analysis_time=human_readable_timedelta(
datetime.timedelta(seconds=analysis_time)),
analysis_time_seconds=analysis_time
))
size = data['deduplicated_size'] or data['size']
output['base_backup_information'].update(dict(
throughput="%s/s" % pretty_size(size / copy_time),
throughput_bytes=size / copy_time,
number_of_workers=copy_stats.get(
'number_of_workers', 1)
))
output['base_backup_information'].update(dict(
begin_offset=data['begin_offset'],
end_offset=data['end_offset'],
begin_lsn=data['begin_xlog'],
end_lsn=data['end_xlog']
))
wal_output = output['wal_information'] = dict(
no_of_files=data['wal_until_next_num'],
disk_usage=pretty_size(data['wal_until_next_size']),
disk_usage_bytes=data['wal_until_next_size'],
wal_rate=0,
wal_rate_per_second=0,
compression_ratio=0,
last_available=data['wal_last'],
timelines=[]
)
# TODO: move the following calculations in a separate function
# or upstream (backup_ext_info?) so that they are shared with
# console output.
if data['wals_per_second'] > 0:
wal_output['wal_rate'] = \
"%0.2f/hour" % (data['wals_per_second'] * 3600)
wal_output['wal_rate_per_second'] = data['wals_per_second']
if data['wal_until_next_compression_ratio'] > 0:
wal_output['compression_ratio'] = '{percent:.2%}'.format(
percent=data['wal_until_next_compression_ratio'])
if data['children_timelines']:
wal_output['_WARNING'] = "WAL information is inaccurate \
due to multiple timelines interacting with \
this backup"
for history in data['children_timelines']:
wal_output['timelines'].append(str(history.tli))
previous_backup_id = data.setdefault(
'previous_backup_id', 'not available')
next_backup_id = data.setdefault('next_backup_id', 'not available')
output['catalog_information'] = {
'retention_policy':
data['retention_policy_status'] or 'not enforced',
'previous_backup':
previous_backup_id or '- (this is the oldest base backup)',
'next_backup':
next_backup_id or '- (this is the latest base backup)'}
else:
if data['error']:
output['error'] = data['error']
def init_status(self, server_name):
"""
Init the status command
:param str server_name: the server we are start listing
"""
if not hasattr(self, 'json_output'):
self.json_output = {}
self.json_output[server_name] = {}
def result_status(self, server_name, status, description, message):
"""
Record a result line of a server status command
and output it as INFO
:param str server_name: the server is being checked
:param str status: the returned status code
:param str description: the returned status description
:param str,object message: status message. It will be converted to str
"""
self.json_output[server_name][status] = dict(
description=description,
message=str(message))
def init_replication_status(self, server_name, minimal=False):
"""
Init the 'standby-status' command
:param str server_name: the server we are start listing
:param str minimal: minimal output
"""
if not hasattr(self, 'json_output'):
self.json_output = {}
self.json_output[server_name] = {}
self.minimal = minimal
def result_replication_status(self, server_name, target, server_lsn,
standby_info):
"""
Record a result line of a server status command
and output it as INFO
:param str server_name: the replication server
:param str target: all|hot-standby|wal-streamer
:param str server_lsn: server's current lsn
:param StatReplication standby_info: status info of a standby
"""
if target == 'hot-standby':
title = 'hot standby servers'
elif target == 'wal-streamer':
title = 'WAL streamers'
else:
title = 'streaming clients'
title_key = self._mangle_key(title)
if title_key not in self.json_output[server_name]:
self.json_output[server_name][title_key] = []
self.json_output[server_name]['server_lsn'] = \
server_lsn if server_lsn else None
if standby_info is not None and not len(standby_info):
self.json_output[server_name]['standby_info'] = \
"No %s attached" % title
return
self.json_output[server_name][title_key] = []
# Minimal output
if self.minimal:
for idx, standby in enumerate(standby_info):
if not standby.replay_lsn:
# WAL streamer
self.json_output[server_name][title_key].append(dict(
user_name=standby.usename,
client_addr=standby.client_addr or 'socket',
sent_lsn=standby.sent_lsn,
write_lsn=standby.write_lsn,
sync_priority=standby.sync_priority,
application_name=standby.application_name
))
else:
# Standby
self.json_output[server_name][title_key].append(dict(
sync_state=standby.sync_state[0].upper(),
user_name=standby.usename,
client_addr=standby.client_addr or 'socket',
sent_lsn=standby.sent_lsn,
flush_lsn=standby.flush_lsn,
replay_lsn=standby.replay_lsn,
sync_priority=standby.sync_priority,
application_name=standby.application_name
))
else:
for idx, standby in enumerate(standby_info):
self.json_output[server_name][title_key].append({})
json_output = self.json_output[server_name][title_key][idx]
# Calculate differences in bytes
lsn_diff = dict(
sent=diff_lsn(standby.sent_lsn, standby.current_lsn),
write=diff_lsn(standby.write_lsn, standby.current_lsn),
flush=diff_lsn(standby.flush_lsn, standby.current_lsn),
replay=diff_lsn(standby.replay_lsn, standby.current_lsn)
)
# Determine the sync stage of the client
sync_stage = None
if not standby.replay_lsn:
client_type = 'WAL streamer'
max_level = 3
else:
client_type = 'standby'
max_level = 5
# Only standby can replay WAL info
if lsn_diff['replay'] == 0:
sync_stage = '5/5 Hot standby (max)'
elif lsn_diff['flush'] == 0:
sync_stage = '4/5 2-safe' # remote flush
# If not yet done, set the sync stage
if not sync_stage:
if lsn_diff['write'] == 0:
sync_stage = '3/%s Remote write' % max_level
elif lsn_diff['sent'] == 0:
sync_stage = '2/%s WAL Sent (min)' % max_level
else:
sync_stage = '1/%s 1-safe' % max_level
# Synchronous standby
if getattr(standby, 'sync_priority', None) > 0:
json_output['name'] = "#%s %s %s" % (
standby.sync_priority,
standby.sync_state.capitalize(),
client_type)
# Asynchronous standby
else:
json_output['name'] = "%s %s" % (
standby.sync_state.capitalize(),
client_type)
json_output['application_name'] = standby.application_name
json_output['sync_stage'] = sync_stage
if getattr(standby, 'client_addr', None):
json_output.update(dict(
communication="TCP/IP",
ip_address=standby.client_addr,
port=standby.client_port,
host=standby.client_hostname or None
))
else:
json_output['communication'] = "Unix domain socket"
json_output.update(dict(
user_name=standby.usename,
current_state=standby.state,
current_sync_state=standby.sync_state
))
if getattr(standby, 'slot_name', None):
json_output['replication_slot'] = standby.slot_name
json_output.update(dict(
wal_sender_pid=standby.pid,
started_at=standby.backend_start.isoformat(sep=' '),
))
if getattr(standby, 'backend_xmin', None):
json_output['standbys_xmin'] = standby.backend_xmin or None
for lsn in lsn_diff.keys():
standby_key = lsn + '_lsn'
if getattr(standby, standby_key, None):
json_output.update({
lsn + '_lsn': getattr(standby, standby_key),
lsn + '_lsn_diff': pretty_size(lsn_diff[lsn]),
lsn + '_lsn_diff_bytes': lsn_diff[lsn]
})
def init_list_server(self, server_name, minimal=False):
"""
Init the list-server command
:param str server_name: the server we are listing
"""
self.json_output[server_name] = {}
self.minimal = minimal
def result_list_server(self, server_name, description=None):
"""
Output a result line of a list-server command
:param str server_name: the server is being checked
:param str,None description: server description if applicable
"""
self.json_output[server_name] = dict(
description=description
)
def init_show_server(self, server_name):
"""
Init the show-server command output method
:param str server_name: the server we are displaying
"""
self.json_output[server_name] = {}
def result_show_server(self, server_name, server_info):
"""
Output the results of the show-server command
:param str server_name: the server we are displaying
:param dict server_info: a dictionary containing the info to display
"""
for status, message in sorted(server_info.items()):
if not isinstance(message, (int, str, bool,
list, dict, type(None))):
message = str(message)
self.json_output[server_name][status] = message
class NagiosOutputWriter(ConsoleOutputWriter):
"""
Nagios output writer.
This writer doesn't output anything to console.
On close it writes a nagios-plugin compatible status
"""
def _out(self, message, args):
"""
Do not print anything on standard output
"""
def _err(self, message, args):
"""
Do not print anything on standard error
"""
def close(self):
"""
Display the result of a check run as expected by Nagios.
Also set the exit code as 2 (CRITICAL) in case of errors
"""
global error_occurred, error_exit_code
# List of all servers that have been checked
servers = []
# List of servers reporting issues
issues = []
for item in self.result_check_list:
# Keep track of all the checked servers
if item['server_name'] not in servers:
servers.append(item['server_name'])
# Keep track of the servers with issues
if not item['status'] and item['server_name'] not in issues:
issues.append(item['server_name'])
# Global error (detected at configuration level)
if len(issues) == 0 and error_occurred:
print("BARMAN CRITICAL - Global configuration errors")
error_exit_code = 2
return
if len(issues) > 0 and error_occurred:
fail_summary = []
details = []
for server in issues:
# Join all the issues for a server. Output format is in the
# form:
# "<server_name> FAILED: <failed_check1>, <failed_check2> ... "
# All strings will be concatenated into the $SERVICEOUTPUT$
# macro of the Nagios output
server_fail = "%s FAILED: %s" % (
server,
", ".join([
item['check']
for item in self.result_check_list
if item['server_name'] == server and not item['status']
]))
fail_summary.append(server_fail)
# Prepare an array with the detailed output for
# the $LONGSERVICEOUTPUT$ macro of the Nagios output
# line format:
# <servername>.<failed_check1>: FAILED
# <servername>.<failed_check2>: FAILED (Hint if present)
# <servername2.<failed_check1>: FAILED
# .....
for issue in self.result_check_list:
if issue['server_name'] == server and not issue['status']:
fail_detail = "%s.%s: FAILED" % (server,
issue['check'])
if issue['hint']:
fail_detail += " (%s)" % issue['hint']
details.append(fail_detail)
# Append the summary of failures to the first line of the output
# using * as delimiter
if len(servers) == 1:
print("BARMAN CRITICAL - server %s has issues * %s" %
(servers[0], " * ".join(fail_summary)))
else:
print("BARMAN CRITICAL - %d server out of %d have issues * "
"%s" % (len(issues), len(servers),
" * ".join(fail_summary)))
# add the detailed list to the output
for issue in details:
print(issue)
error_exit_code = 2
elif len(issues) > 0 and not error_occurred:
# Some issues, but only in skipped server
good = [item for item in servers if item not in issues]
# Display the output message for a single server check
if len(good) == 0:
print("BARMAN OK - No server configured * IGNORING: %s" %
(" * IGNORING: ".join(issues)))
elif len(good) == 1:
print("BARMAN OK - Ready to serve the Espresso backup "
"for %s * IGNORING: %s" %
(good[0], " * IGNORING: ".join(issues)))
else:
# Display the output message for several servers, using
# '*' as delimiter
print("BARMAN OK - Ready to serve the Espresso backup "
"for %d servers * %s * IGNORING: %s" % (
len(good),
" * ".join(good),
" * IGNORING: ".join(issues)))
else:
# No issues, all good!
# Display the output message for a single server check
if not len(servers):
print("BARMAN OK - No server configured")
elif len(servers) == 1:
print("BARMAN OK - Ready to serve the Espresso backup "
"for %s" %
(servers[0]))
else:
# Display the output message for several servers, using
# '*' as delimiter
print("BARMAN OK - Ready to serve the Espresso backup "
"for %d servers * %s" % (
len(servers), " * ".join(servers)))
#: This dictionary acts as a registry of available OutputWriters
AVAILABLE_WRITERS = {
'console': ConsoleOutputWriter,
'json': JsonOutputWriter,
# nagios is not registered as it isn't a general purpose output writer
# 'nagios': NagiosOutputWriter,
}
#: The default OutputWriter
DEFAULT_WRITER = 'console'
#: the current active writer. Initialized according DEFAULT_WRITER on load
_writer = AVAILABLE_WRITERS[DEFAULT_WRITER]()
| 2ndquadrant-it/barman | barman/output.py | Python | gpl-3.0 | 65,249 | [
"ESPResSo"
] | d1549a26efc2bfa159ed14e541d2471311362379d30b70d3ef804fbab0f3610f |
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Guidelines for writing new hacking checks
- Use only for Nova specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to nova/tests/unit/test_hacking.py
"""
import ast
import os
import re
from hacking import core
UNDERSCORE_IMPORT_FILES = []
session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]")
cfg_re = re.compile(r".*\scfg\.")
# Excludes oslo.config OptGroup objects
cfg_opt_re = re.compile(r".*[\s\[]cfg\.[a-zA-Z]*Opt\(")
rule_default_re = re.compile(r".*RuleDefault\(")
policy_enforce_re = re.compile(r".*_ENFORCER\.enforce\(")
virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/")
virt_import_re = re.compile(
r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)")
virt_config_re = re.compile(
r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)")
asse_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
r"(\w|\.|\'|\"|\[|\])+\)\)")
asse_equal_type_re = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
r"(\w|\.|\'|\"|\[|\])+\)")
asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\("
r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\("
r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
# NOTE(snikitin): Next two regexes weren't united to one for more readability.
# asse_true_false_with_in_or_not_in regex checks
# assertTrue/False(A in B) cases where B argument has no spaces
# asse_true_false_with_in_or_not_in_spaces regex checks cases
# where B argument has spaces and starts/ends with [, ', ".
# For example: [1, 2, 3], "some string", 'another string'.
# We have to separate these regexes to escape a false positives
# results. B argument should have spaces only if it starts
# with [, ", '. Otherwise checking of string
# "assertFalse(A in B and C in D)" will be false positives.
# In this case B argument is "B and C in D".
asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)"
r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
translated_log = re.compile(r"(.)*LOG\.\w+\(\s*_\(\s*('|\")")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
import_translation_for_log_or_exception = re.compile(
r"(.)*(from\snova.i18n\simport)\s_")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
api_version_re = re.compile(r"@.*\bapi_version\b")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
decorator_re = re.compile(r"@.*")
http_not_implemented_re = re.compile(r"raise .*HTTPNotImplemented\(")
spawn_re = re.compile(
r".*(eventlet|greenthread)\.(?P<spawn_part>spawn(_n)?)\(.*\)")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
log_remove_context = re.compile(
r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)")
return_not_followed_by_space = re.compile(r"^\s*return(?:\(|{|\"|'|#).*$")
uuid4_re = re.compile(r"uuid4\(\)($|[^\.]|\.hex)")
redundant_import_alias_re = re.compile(r"import (?:.*\.)?(.+) as \1$")
yield_not_followed_by_space = re.compile(r"^\s*yield(?:\(|{|\[|\"|').*$")
asse_regexpmatches = re.compile(
r"(assertRegexpMatches|assertNotRegexpMatches)\(")
privsep_file_re = re.compile('^nova/privsep[./]')
privsep_import_re = re.compile(
r"^(?:import|from).*\bprivsep\b")
# Redundant parenthetical masquerading as a tuple, used with ``in``:
# Space, "in", space, open paren
# Optional single or double quote (so we match strings or symbols)
# A sequence of the characters that can make up a symbol. (This is weak: a
# string can contain other characters; and a numeric symbol can start with a
# minus, and a method call has a param list, and... Not sure this gets better
# without a lexer.)
# The same closing quote
# Close paren
disguised_as_tuple_re = re.compile(r''' in \((['"]?)[a-zA-Z0-9_.]+\1\)''')
# NOTE(takashin): The patterns of nox-existent mock assertion methods and
# attributes do not cover all cases. If you find a new pattern,
# add the pattern in the following regex patterns.
mock_assert_method_re = re.compile(
r"\.((called_once(_with)*|has_calls)|"
r"mock_assert_(called(_(once|with|once_with))?"
r"|any_call|has_calls|not_called)|"
r"(asser|asset|asssert|assset)_(called(_(once|with|once_with))?"
r"|any_call|has_calls|not_called))\(")
mock_attribute_re = re.compile(r"[\.\(](retrun_value)[,=\s]")
# Regex for useless assertions
useless_assertion_re = re.compile(
r"\.((assertIsNone)\(None|(assertTrue)\((True|\d+|'.+'|\".+\")),")
# Regex for misuse of assert_has_calls
mock_assert_has_calls_re = re.compile(r"\.assert_has_calls\s?=")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pycodestyle.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pycodestyle."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
@core.flake8ext
def import_no_db_in_virt(logical_line, filename):
"""Check for db calls from nova/virt
As of grizzly-2 all the database calls have been removed from
nova/virt, and we want to keep it that way.
N307
"""
if "nova/virt" in filename and not filename.endswith("fake.py"):
if logical_line.startswith("from nova.db import api"):
yield (0, "N307: nova.db.api import not allowed in nova/virt/*")
@core.flake8ext
def no_db_session_in_public_api(logical_line, filename):
if "db/api.py" in filename:
if session_check.match(logical_line):
yield (0, "N309: public db api methods may not accept session")
@core.flake8ext
def use_timeutils_utcnow(logical_line, filename):
# tools are OK to use the standard datetime module
if "/tools/" in filename:
return
msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()"
datetime_funcs = ['now', 'utcnow']
for f in datetime_funcs:
pos = logical_line.find('datetime.%s' % f)
if pos != -1:
yield (pos, msg % f)
def _get_virt_name(regex, data):
m = regex.match(data)
if m is None:
return None
driver = m.group(1)
# Ignore things we mis-detect as virt drivers in the regex
if driver in ["test_virt_drivers", "driver", "disk", "api", "imagecache",
"cpu", "hardware", "image"]:
return None
return driver
@core.flake8ext
def import_no_virt_driver_import_deps(physical_line, filename):
"""Check virt drivers' modules aren't imported by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not access those drivers. Any code that
is to be shared should be refactored into a common
module
N311
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_import_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N311: importing code from other virt drivers forbidden")
@core.flake8ext
def import_no_virt_driver_config_deps(physical_line, filename):
"""Check virt drivers' config vars aren't used by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not use their config vars. Any config vars
that are to be shared should be moved into a common module
N312
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_config_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N312: using config vars from other virt drivers forbidden")
@core.flake8ext
def capital_cfg_help(logical_line, tokens):
msg = "N313: capitalize help string"
if cfg_re.match(logical_line):
for t in range(len(tokens)):
if tokens[t][1] == "help":
txt = tokens[t + 2][1]
if len(txt) > 1 and txt[1].islower():
yield (0, msg)
@core.flake8ext
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
N316
"""
if asse_trueinst_re.match(logical_line):
yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed")
@core.flake8ext
def assert_equal_type(logical_line):
"""Check for assertEqual(type(A), B) sentences
N317
"""
if asse_equal_type_re.match(logical_line):
yield (0, "N317: assertEqual(type(A), B) sentences not allowed")
@core.flake8ext
def no_translate_logs(logical_line, filename):
"""Check for 'LOG.foo(_('
As per our translation policy, we shouldn't translate logs.
This check assumes that 'LOG' is a logger.
N319
"""
if translated_log.match(logical_line):
yield (0, "N319 Don't translate logs")
@core.flake8ext
def no_import_translation_in_tests(logical_line, filename):
"""Check for 'from nova.i18n import _'
N337
"""
if 'nova/tests/' in filename:
res = import_translation_for_log_or_exception.match(logical_line)
if res:
yield (0, "N337 Don't import translation in tests")
@core.flake8ext
def no_setting_conf_directly_in_tests(logical_line, filename):
"""Check for setting CONF.* attributes directly in tests
The value can leak out of tests affecting how subsequent tests run.
Using self.flags(option=value) is the preferred method to temporarily
set config options in tests.
N320
"""
if 'nova/tests/' in filename:
res = conf_attribute_set_re.match(logical_line)
if res:
yield (0, "N320: Setting CONF.* attributes directly in tests is "
"forbidden. Use self.flags(option=value) instead")
@core.flake8ext
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif string_translation.match(logical_line):
yield (0, "N323: Found use of _() without explicit import of _ !")
@core.flake8ext
def use_jsonutils(logical_line, filename):
# tools are OK to use the standard json module
if "/tools/" in filename:
return
msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s"
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
@core.flake8ext
def check_api_version_decorator(logical_line, previous_logical, blank_before,
filename):
msg = ("N332: the api_version decorator must be the first decorator"
" on a method.")
if blank_before == 0 and re.match(api_version_re, logical_line) \
and re.match(decorator_re, previous_logical):
yield (0, msg)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
name = 'check_for_trans_add'
version = '0.1'
CHECK_DESC = ('N326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
for node_x in (node.left, node.right):
if isinstance(node_x, ast.Call):
if isinstance(node_x.func, ast.Name):
if node_x.func.id == '_':
self.add_error(node_x)
super(CheckForTransAdd, self).generic_visit(node)
class _FindVariableReferences(ast.NodeVisitor):
def __init__(self):
super(_FindVariableReferences, self).__init__()
self._references = []
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load):
# This means the value of a variable was loaded. For example a
# variable 'foo' was used like:
# mocked_thing.bar = foo
# foo()
# self.assertRaises(exception, foo)
self._references.append(node.id)
super(_FindVariableReferences, self).generic_visit(node)
class CheckForUncalledTestClosure(BaseASTChecker):
"""Look for closures that are never called in tests.
A recurring pattern when using multiple mocks is to create a closure
decorated with mocks like:
def test_thing(self):
@mock.patch.object(self.compute, 'foo')
@mock.patch.object(self.compute, 'bar')
def _do_test(mock_bar, mock_foo):
# Test things
_do_test()
However it is easy to leave off the _do_test() and have the test pass
because nothing runs. This check looks for methods defined within a test
method and ensures that there is a reference to them. Only methods defined
one level deep are checked. Something like:
def test_thing(self):
class FakeThing:
def foo(self):
would not ensure that foo is referenced.
N349
"""
name = 'check_for_uncalled_test_closure'
version = '0.1'
def __init__(self, tree, filename):
super(CheckForUncalledTestClosure, self).__init__(tree, filename)
self._filename = filename
def visit_FunctionDef(self, node):
# self._filename is 'stdin' in the unit test for this check.
if (not os.path.basename(self._filename).startswith('test_') and
os.path.basename(self._filename) != 'stdin'):
return
closures = []
references = []
# Walk just the direct nodes of the test method
for child_node in ast.iter_child_nodes(node):
if isinstance(child_node, ast.FunctionDef):
closures.append(child_node.name)
# Walk all nodes to find references
find_references = _FindVariableReferences()
find_references.generic_visit(node)
references = find_references._references
missed = set(closures) - set(references)
if missed:
self.add_error(node, 'N349: Test closures not called: %s'
% ','.join(missed))
@core.flake8ext
def assert_true_or_false_with_in(logical_line):
"""Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N334
"""
res = (asse_true_false_with_in_or_not_in.search(logical_line) or
asse_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N334: Use assertIn/NotIn(A, B) rather than "
"assertTrue/False(A in/not in B) when checking collection "
"contents.")
@core.flake8ext
def assert_raises_regexp(logical_line):
"""Check for usage of deprecated assertRaisesRegexp
N335
"""
res = asse_raises_regexp.search(logical_line)
if res:
yield (0, "N335: assertRaisesRegex must be used instead "
"of assertRaisesRegexp")
@core.flake8ext
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
N338
"""
res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or
asse_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "N338: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
@core.flake8ext
def check_http_not_implemented(logical_line, filename, noqa):
msg = ("N339: HTTPNotImplemented response must be implemented with"
" common raise_feature_not_supported().")
if noqa:
return
if ("nova/api/openstack/compute" not in filename):
return
if re.match(http_not_implemented_re, logical_line):
yield (0, msg)
@core.flake8ext
def check_greenthread_spawns(logical_line, filename):
"""Check for use of greenthread.spawn(), greenthread.spawn_n(),
eventlet.spawn(), and eventlet.spawn_n()
N340
"""
msg = ("N340: Use nova.utils.%(spawn)s() rather than "
"greenthread.%(spawn)s() and eventlet.%(spawn)s()")
if "nova/utils.py" in filename or "nova/tests/" in filename:
return
match = re.match(spawn_re, logical_line)
if match:
yield (0, msg % {'spawn': match.group('spawn_part')})
@core.flake8ext
def check_no_contextlib_nested(logical_line, filename):
msg = ("N341: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested for "
"more information. nova.test.nested() is an alternative as well.")
if contextlib_nested.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_config_option_in_central_place(logical_line, filename):
msg = ("N342: Config options should be in the central location "
"'/nova/conf/*'. Do not declare new config options outside "
"of that folder.")
# That's the correct location
if "nova/conf/" in filename:
return
# (macsz) All config options (with exceptions that are clarified
# in the list below) were moved to the central place. List below is for
# all options that were impossible to move without doing a major impact
# on code. Add full path to a module or folder.
conf_exceptions = [
# CLI opts are allowed to be outside of nova/conf directory
'nova/cmd/manage.py',
'nova/cmd/policy.py',
'nova/cmd/status.py',
# config options should not be declared in tests, but there is
# another checker for it (N320)
'nova/tests',
]
if any(f in filename for f in conf_exceptions):
return
if cfg_opt_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_policy_registration_in_central_place(logical_line, filename):
msg = ('N350: Policy registration should be in the central location(s) '
'"/nova/policies/*"')
# This is where registration should happen
if "nova/policies/" in filename:
return
# A couple of policy tests register rules
if "nova/tests/unit/test_policy.py" in filename:
return
if rule_default_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_policy_enforce(logical_line, filename):
"""Look for uses of nova.policy._ENFORCER.enforce()
Now that policy defaults are registered in code the _ENFORCER.authorize
method should be used. That ensures that only registered policies are used.
Uses of _ENFORCER.enforce could allow unregistered policies to be used, so
this check looks for uses of that method.
N351
"""
msg = ('N351: nova.policy._ENFORCER.enforce() should not be used. '
'Use the authorize() method instead.')
if policy_enforce_re.match(logical_line):
yield (0, msg)
@core.flake8ext
def check_doubled_words(physical_line, filename):
"""Check for the common doubled-word typos
N343
"""
msg = ("N343: Doubled word '%(word)s' typo found")
match = re.search(doubled_words_re, physical_line)
if match:
return (0, msg % {'word': match.group(1)})
@core.flake8ext
def no_os_popen(logical_line):
"""Disallow 'os.popen('
Deprecated library function os.popen() Replace it using subprocess
https://bugs.launchpad.net/tempest/+bug/1529836
N348
"""
if 'os.popen(' in logical_line:
yield (0, 'N348 Deprecated library function os.popen(). '
'Replace it using subprocess module. ')
@core.flake8ext
def no_log_warn(logical_line):
"""Disallow 'LOG.warn('
Deprecated LOG.warn(), instead use LOG.warning
https://bugs.launchpad.net/senlin/+bug/1508442
N352
"""
msg = ("N352: LOG.warn is deprecated, please use LOG.warning!")
if "LOG.warn(" in logical_line:
yield (0, msg)
@core.flake8ext
def check_context_log(logical_line, filename, noqa):
"""check whether context is being passed to the logs
Not correct: LOG.info("Rebooting instance", context=context)
Correct: LOG.info("Rebooting instance")
https://bugs.launchpad.net/nova/+bug/1500896
N353
"""
if noqa:
return
if "nova/tests" in filename:
return
if log_remove_context.match(logical_line):
yield (0,
"N353: Nova is using oslo.context's RequestContext "
"which means the context object is in scope when "
"doing logging using oslo.log, so no need to pass it as "
"kwarg.")
@core.flake8ext
def no_assert_equal_true_false(logical_line):
"""Enforce use of assertTrue/assertFalse.
Prevent use of assertEqual(A, True|False), assertEqual(True|False, A),
assertNotEqual(A, True|False), and assertNotEqual(True|False, A).
N355
"""
_start_re = re.compile(r'assert(Not)?Equal\((True|False),')
_end_re = re.compile(r'assert(Not)?Equal\(.*,\s+(True|False)\)$')
if _start_re.search(logical_line) or _end_re.search(logical_line):
yield (0, "N355: assertEqual(A, True|False), "
"assertEqual(True|False, A), assertNotEqual(A, True|False), "
"or assertEqual(True|False, A) sentences must not be used. "
"Use assertTrue(A) or assertFalse(A) instead")
@core.flake8ext
def no_assert_true_false_is_not(logical_line):
"""Enforce use of assertIs/assertIsNot.
Prevent use of assertTrue(A is|is not B) and assertFalse(A is|is not B).
N356
"""
_re = re.compile(r'assert(True|False)\(.+\s+is\s+(not\s+)?.+\)$')
if _re.search(logical_line):
yield (0, "N356: assertTrue(A is|is not B) or "
"assertFalse(A is|is not B) sentences must not be used. "
"Use assertIs(A, B) or assertIsNot(A, B) instead")
@core.flake8ext
def check_uuid4(logical_line):
"""Generating UUID
Use oslo_utils.uuidutils or uuidsentinel(in case of test cases) to generate
UUID instead of uuid4().
N357
"""
msg = ("N357: Use oslo_utils.uuidutils or uuidsentinel(in case of test "
"cases) to generate UUID instead of uuid4().")
if uuid4_re.search(logical_line):
yield (0, msg)
@core.flake8ext
def return_followed_by_space(logical_line):
"""Return should be followed by a space.
Return should be followed by a space to clarify that return is
not a function. Adding a space may force the developer to rethink
if there are unnecessary parentheses in the written code.
Not correct: return(42), return(a, b)
Correct: return, return 42, return (a, b), return a, b
N358
"""
if return_not_followed_by_space.match(logical_line):
yield (0,
"N358: Return keyword should be followed by a space.")
@core.flake8ext
def no_redundant_import_alias(logical_line):
"""Check for redundant import aliases.
Imports should not be in the forms below.
from x import y as y
import x as x
import x.y as y
N359
"""
if re.search(redundant_import_alias_re, logical_line):
yield (0, "N359: Import alias should not be redundant.")
@core.flake8ext
def yield_followed_by_space(logical_line):
"""Yield should be followed by a space.
Yield should be followed by a space to clarify that yield is
not a function. Adding a space may force the developer to rethink
if there are unnecessary parentheses in the written code.
Not correct: yield(x), yield(a, b)
Correct: yield x, yield (a, b), yield a, b
N360
"""
if yield_not_followed_by_space.match(logical_line):
yield (0,
"N360: Yield keyword should be followed by a space.")
@core.flake8ext
def assert_regexpmatches(logical_line):
"""Check for usage of deprecated assertRegexpMatches/assertNotRegexpMatches
N361
"""
res = asse_regexpmatches.search(logical_line)
if res:
yield (0, "N361: assertRegex/assertNotRegex must be used instead "
"of assertRegexpMatches/assertNotRegexpMatches.")
@core.flake8ext
def privsep_imports_not_aliased(logical_line, filename):
"""Do not abbreviate or alias privsep module imports.
When accessing symbols under nova.privsep in code or tests, the full module
path (e.g. nova.privsep.path.readfile(...)) should be used
explicitly rather than importing and using an alias/abbreviation such as:
from nova.privsep import path
...
path.readfile(...)
See Ief177dbcb018da6fbad13bb0ff153fc47292d5b9.
N362
"""
if (
# Give modules under nova.privsep a pass
not privsep_file_re.match(filename) and
# Any style of import of privsep...
privsep_import_re.match(logical_line) and
# ...that isn't 'import nova.privsep[.foo...]'
logical_line.count(' ') > 1):
yield (0, "N362: always import privsep modules so that the use of "
"escalated permissions is obvious to callers. For example, "
"use 'import nova.privsep.path' instead of "
"'from nova.privsep import path'.")
@core.flake8ext
def did_you_mean_tuple(logical_line):
"""Disallow ``(not_a_tuple)`` because you meant ``(a_tuple_of_one,)``.
N363
"""
if disguised_as_tuple_re.search(logical_line):
yield (0, "N363: You said ``in (not_a_tuple)`` when you almost "
"certainly meant ``in (a_tuple_of_one,)``.")
@core.flake8ext
def nonexistent_assertion_methods_and_attributes(logical_line, filename):
"""Check non-existent mock assertion methods and attributes.
The following assertion methods do not exist.
- called_once()
- called_once_with()
- has_calls()
- mock_assert_*()
The following typos were found in the past cases.
- asser_*
- asset_*
- assset_*
- asssert_*
- retrun_value
N364
"""
msg = ("N364: Non existent mock assertion method or attribute (%s) is "
"used. Check a typo or whether the assertion method should begin "
"with 'assert_'.")
if 'nova/tests/' in filename:
match = mock_assert_method_re.search(logical_line)
if match:
yield (0, msg % match.group(1))
match = mock_attribute_re.search(logical_line)
if match:
yield (0, msg % match.group(1))
@core.flake8ext
def useless_assertion(logical_line, filename):
"""Check useless assertions in tests.
The following assertions are useless.
- assertIsNone(None, ...)
- assertTrue(True, ...)
- assertTrue(2, ...) # Constant number
- assertTrue('Constant string', ...)
- assertTrue("Constant string", ...)
They are usually misuses of assertIsNone or assertTrue.
N365
"""
msg = "N365: Misuse of %s."
if 'nova/tests/' in filename:
match = useless_assertion_re.search(logical_line)
if match:
yield (0, msg % (match.group(2) or match.group(3)))
@core.flake8ext
def check_assert_has_calls(logical_line, filename):
"""Check misuse of assert_has_calls.
Not correct: mock_method.assert_has_calls = [mock.call(0)]
Correct: mock_method.assert_has_calls([mock.call(0)])
N366
"""
msg = "N366: The assert_has_calls is a method rather than a variable."
if ('nova/tests/' in filename and
mock_assert_has_calls_re.search(logical_line)):
yield (0, msg)
| klmitch/nova | nova/hacking/checks.py | Python | apache-2.0 | 32,078 | [
"VisIt"
] | 14c3ef46bda7b430dcce20833de11793c8e5f746fb72166270d70c15b5dd3784 |
import version
version.append ('$Revision: 91660 $')
del version
# This file is 'kImage' instead of 'Image' because of the potential
# conflict with the Python Imaging Library.
try:
import Image
import ImageColor
import ImageDraw
except ImportError:
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import ktl
import Color
import Images
import Stage
default_size = 18
class Simple:
''' Base class for Image objects used within the GUI. This is not
subclassed from Image.Image, because the PIL objects do not
necessarily behave in an object-centric fashion.
'''
def __init__ (self, size=default_size, background='#ffffff'):
# Base image for this object.
self.base = None
# 'Rendered' image for this object.
self.image = None
self.scale = 1
self.background = None
self.icon_size = None
self.setBackground (background)
self.setSize (size)
def setBackground (self, color):
changed = False
if color == None:
# Full transparency.
color = (0, 0, 0, 0)
else:
color = ImageColor.getrgb (color)
# Add transparency.
color = (color[0], color[1], color[2], 0)
if self.background != color:
self.background = color
changed = True
return changed
def setSize (self, size):
changed = False
icon_size = int (size)
if self.icon_size != icon_size:
self.icon_size = icon_size
changed = True
return changed
def setScale (self, scale):
changed = False
scale = float (scale)
if self.scale != scale:
self.scale = scale
changed = True
return changed
def setBase (self, image):
changed = False
if image != self.base:
self.base = image
changed = True
return changed
def redraw (self):
''' Update our image content based on status changes.
'''
size = int (self.icon_size * self.scale)
background = self.background
new_image = Image.new ('RGBA', (size, size), background)
# Image coordinate (0,0) is the upper-left corner.
# The image being pasted is also specified as the mask,
# the third argument to Image.paste (). This eliminates
# unsightly black haloes around pasted images with alpha
# channels. From the PIL documentation:
# Note that if you paste an "RGBA" image, the alpha
# band is ignored. You can work around this by using
# the same image as both source image and mask.
if self.base != None:
image = self.base.resize ((size, size), Image.ANTIALIAS)
new_image.paste (image, (0, 0), image)
self.image = new_image
# Return the Image object for readability's sake,
# though the original object can be referred to
# directly.
return new_image
# end of class Simple
class Path (Simple):
def __init__ (self, size=default_size, background='#ffffff'):
Simple.__init__ (self, size * 2, background)
# If a Path image is transparent, it never blocks.
#
# If a Path image is lit, it is receiving light
# from something further up the light path.
#
# If a Path image is blocking, it is in a state
# such that elements further down the path will
# not be lit.
self.transparent = False
self.blocking = False
self.lit = False
# Which image is displayed depends on the above
# state bits.
self.blocked = None
self.passed = None
def setSize (self, size):
return Simple.setSize (self, size * 2)
def setBlockingImage (self, image):
changed = False
if self.blocked != image:
changed = True
self.blocked = image
return changed
def setPassingImage (self, image):
changed = False
if self.passed != image:
changed = True
self.passed = image
return changed
def interpret (self, keyword=None, slice=None):
''' Figure out whether this specific Path instance is
blocking light. Return None if there was no change;
return True if we are newly blocking light, return
False if we are newly passing light.
'''
if keyword == None or slice == None:
suffix = None
ascii = None
binary = None
else:
suffix = keyword['name'][-3:]
ascii = slice['ascii']
binary = slice['binary']
blocking = None
# For stages where we monitor the ordinal position
# (filter wheels, for example), light is not (acceptably)
# passing through a given stage if our position is not a
# positive ordinal value (greater than zero).
if suffix == 'ORD':
value = binary
if value < 1:
if self.blocking == False:
blocking = True
elif self.blocking == True:
blocking = False
# Likewise, if a callback is registered for a NAM
# keyword, assume that the positions 'Unknown' and
# 'Home' are blocking light.
elif suffix == 'NAM':
value = ascii
if value == 'unknown' or \
value == 'home' or \
value == 'irregular':
if self.blocking == False:
blocking = True
elif self.blocking == True:
blocking = False
# Inspect our current state and see whether our
# base image should be changed.
if self.lit == False:
if self.base != None:
self.base = None
elif self.blocking == True and self.transparent == False:
if self.base != self.blocked:
self.base = self.blocked
else:
if self.base != self.passed:
self.base = self.passed
return blocking
# end of class Path
class Motion (Simple):
def __init__ (self, size=default_size, background='#ffffff'):
Simple.__init__ (self, size, background)
# Which states will prompt the appearance of the
# 'maintenance' icon rather than a pie chart?
self.maintenance = ('calibrating', 'jogging', 'tracking', 'acquiring', 'slewing')
# Helper data to render the pie-chart progress icon.
self.pie = True
self.type = None
self.pie_image = None
# The following values are used to compute the completion
# progress using REL and TRL keywords.
self.progress = None
self.target = None
# The following value is used with a PCT keyword, which
# eliminates the need for asynchronous computation of the
# progress.
self.completion = None
def interpret (self, keyword=None, slice=None):
if keyword == None or slice == None:
return False
suffix = keyword['name'][-3:]
ascii = slice['ascii']
binary = slice['binary']
changed = False
if suffix == 'PCT':
# Percentage completion of the move in progress.
# PCT keyword is a fraction of 100, not a fraction
# of 1. We want to use it later as a fraction of 1.
self.completion = float (binary) / 100.0
elif suffix == 'REL':
# How far the stage has moved since the beginning
# of this specific motion.
self.progress = float (binary)
elif suffix == 'TRL':
# How far the stage is expected to move-- the
# absolute difference between the position at
# the start of the move, and the target position.
self.target = float (binary)
elif suffix == 'STA':
ascii = ascii.lower ()
if self.type != ascii:
self.type = ascii
if ascii != 'moving':
self.completion = None
self.progress = None
self.target = None
if self.type in self.maintenance:
changed = self.setBase (Images.get ('maintenance'))
else:
changed = self.setBase (None)
if self.type == 'moving':
if self.pie == True:
# Two different conditions might be used to
# prompt the creation of a progress image.
# We are either receiving direct broadcasts
# of the completion percentage, or we are
# building our own notion of completion
# based on multiple keywords.
if self.completion != None or \
(self.progress != None and self.target != None):
new_image = self.buildImage ()
changed = self.setBase (new_image)
else:
changed = self.setBase (Images.get ('maintenance'))
return changed
def buildImage (self, size=64, complete=Color.progress, pending='black'):
''' Populate self.motion with a pie-chart progress icon
indicating the relative progress on the current move.
The 'complete' color is used to fill in the portion
of the move that is complete. The 'pending' color is
used to indicate the portion that is yet to come.
'''
if self.target == 0:
# Not moving at all? Then you're already there.
completion = 1
elif self.progress != None and self.target != None:
completion = abs (self.progress / self.target)
elif self.completion != None:
completion = self.completion
else:
# This should not occur.
raise RuntimeError, 'not enough information to build a Motion pie icon'
if completion > 1:
# Probably the result of mixing REL and TRL
# values from distinct move requests. If
# REL and TRL are broadcast before STA,
# this should never happen; but, if we got
# here, it probably did.
# Assert a sane value.
completion = 1
# Time to build an icon.
background = ImageColor.getrgb (pending)
complete = ImageColor.getrgb (complete)
pending = ImageColor.getrgb (pending)
# Full background transparency.
background = (background[0], background[1], background[2], 0)
new_image = Image.new ('RGBA', (size, size), background)
# Leave 5% on either side as a whitespace buffer surrounding
# the actual progress indicator.
diameter = int (size * 0.9)
buffer = size - diameter
if buffer < 1:
buffer = 1
bounding_box = (buffer, buffer, size - buffer, size - buffer)
draw = ImageDraw.Draw (new_image)
# For the pie slice, zero degrees is a line from the center
# of the circle to the right-most edge (3 o'clock). We want
# zero to be from the center to the top edge (12 o'clock).
# Thus, apply an offset of 270 degrees.
zero = 270
angle = int (completion * 360)
if angle == 360:
# Full. Just draw a 'complete' circle, no pie slice.
draw.ellipse (bounding_box,
outline='black', fill=complete)
else:
# Draw a 'pending' circle, with a slice
# representing 'complete'.
angle = (angle + zero) % 360
draw.ellipse (bounding_box,
outline='black', fill=pending)
draw.pieslice (bounding_box,
outline='black', fill=complete,
start=zero, end=angle)
# Done building the image.
return (new_image)
# end of class Motion
class Status (Simple):
def __init__ (self, size=default_size, background='#ffffff'):
Simple.__init__ (self, size, background)
# Keep track of prior status information.
self.results = {}
self.state = {}
self.ok_values = ('ready', 'moving', 'ok', 'tracking')
# Allow for per-Status image variation in whether
# the motor is expected to remain powered after
# a move.
self.motor_hold = None
def setMotorHold (self, hold):
if hold != True and hold != False:
raise ValueError, "the sole argument to setMotorHold is a boolean"
changed = False
if self.motor_hold != hold:
self.motor_hold = hold
changed = True
return changed
def interpret (self, keyword=None, slice=None):
''' Update the local image components per the received
keyword.
'''
if keyword == None or slice == None:
return False
suffix = keyword['name'][-3:]
ascii = slice['ascii']
binary = slice['binary']
changed = False
results = self.results
state = self.state
if ascii != None:
ascii = ascii.lower ()
# Record relevant keywords for the generation of
# the overall status icon.
if suffix == 'ERR' or suffix == 'LIM' or suffix == 'MOE':
state[suffix] = binary
else:
state[suffix] = ascii
# That's it for processing the keyword values directly.
# Build a status icon based on the contents of self.state.
if 'LCK' in state:
value = state['LCK']
if value != 'unlocked' and value != '':
status = Images.get ('locked')
else:
status = None
results['LCK'] = status
if 'ERR' in state:
if state['ERR'] < 0:
status = Images.get ('error')
else:
status = None
results['ERR'] = status
# 'Not in a limit' corresponds to binary value 0.
# If we are in a limit, display it as a warning.
if 'LIM' in state:
if state['LIM'] != 0:
status = Images.get ('warning')
else:
status = None
results['LIM'] = status
# If the stage is in a 'ready' state, confirm that the motor is
# in the correct state.
if 'MOO' in state and 'STA' in state:
if 'MOE' in state:
motor_hold = self.state['MOE']
elif self.motor_hold != None:
motor_hold = self.motor_hold
else:
motor_hold = Stage.servo_hold
if state['STA'] == 'ready' and \
((motor_hold == False and state['MOO'] == 'on') or \
(motor_hold == True and state['MOO'] == 'off')):
status = Images.get ('warning')
else:
status = None
results['MOO'] = status
# Consider the MOO keyword separately if it is not paired
# with a STA keyword.
elif 'MOO' in state:
value = state['MOO']
if 'MOE' in state:
motor_hold = self.state['MOE']
elif self.motor_hold != None:
motor_hold = self.motor_hold
else:
motor_hold = Stage.servo_hold
if motor_hold == False and value == 'on':
status = Images.get ('warning')
elif motor_hold == True and value == 'off':
status = Images.get ('warning')
else:
status = None
results['MOO'] = status
# If the status keyword is not in an "acceptable" state, it
# should be flagged as a warning, or critical.
if 'STA' in state:
value = state['STA']
# The 'OK' values trump all other states, as they
# imply everything is A-OK to request a move or
# that the stage is otherwise operating in a
# completely normal mode.
if value in self.ok_values:
status = Images.get ('ok')
elif value == 'not calibrated':
status = Images.get ('cant_proceed')
elif value == 'fault' or \
value == 'disabled':
status = Images.get ('error')
elif value == 'locked':
status = Images.get ('locked')
else:
status = Images.get ('warning')
results['STA'] = status
if 'MSG' in state:
if state['MSG'] != '':
status = Images.get ('warning')
else:
status = None
results['MSG'] = status
if 'XMV' in state:
if state['XMV'] != '':
status = Images.get ('error')
else:
status = None
results['XMV'] = status
# Start off with results that should be prioritized,
# or may depend on other results.
if 'LCK' in results and results['LCK'] != None:
status = results['LCK']
elif 'ERR' in results and results['ERR'] != None:
status = results['ERR']
elif 'LIM' in results and results['LIM'] != None:
if 'STA' in results and results['STA'] == Images.get ('error'):
status = results['STA']
else:
status = results['LIM']
elif 'MOO' in results and results['MOO'] != None:
if 'STA' in results and results['STA'] == Images.get ('error'):
status = results['STA']
else:
status = results['MOO']
elif 'STA' in results and results['STA'] != None:
status = results['STA']
# Any states after this point are generally only rendered
# for a 'standalone' Status image, one that is associated
# with a single keyword.
elif len (results) == 1:
key = results.keys ()[0]
status = results[key]
else:
# Insufficient state to build an image. We either
# don't have enough broadcasts, or there are no
# recognized (or actionable) suffixes associated
# with this Status image.
status = None
self.results = results
self.state = state
# After all those checks, did anything actually change?
if self.base != status:
self.base = status
changed = True
return changed
# end of class Status
class Temperature (Status):
def __init__ (self, *arguments, **keyword_arguments):
Status.__init__ (self, *arguments, **keyword_arguments)
self.state = {}
self.power = None
self.setpoint = None
def setPowerKeyword (self, keyword):
if self.power == keyword:
return False
self.power = keyword
return True
def setSetpointKeyword (self, keyword):
if self.setpoint == keyword:
return False
self.setpoint = keyword
return True
def interpret (self, keyword=None, slice=None):
if keyword == None or slice == None:
return False
if keyword == self.power or keyword == self.setpoint:
binary = slice['binary']
self.state[keyword] = binary
else:
ktl.log (ktl.LOG_ERROR, "callback for %s not handled by Temperature image" % (keyword['name']))
return False
# Now analyze the current state, and select an
# appropriate base image.
base = None
state = self.state
if self.power in state and state[self.power] != True:
base = None
elif self.setpoint in state:
setpoint = self.state[self.setpoint]
if setpoint <= 0:
base = Images.get ('bulb_off')
else:
base = Images.get ('bulb_on')
if self.base != base:
self.base = base
return True
else:
return False
# end of class Temperature
class OmegaStatus (Status):
def __init__ (self, *arguments, **keyword_arguments):
Status.__init__ (self, *arguments, **keyword_arguments)
self.button = None
self.service = None
self.power = None
# Omega keywords this object understands, and will attempt
# to monitor when self.setService () is called.
self.known = ('DISPSTA', 'SETPOINT', 'TEMP')
def setButton (self, button):
if self.button == button:
return False
self.button = button
return True
def setPowerKeyword (self, keyword):
if self.power == keyword:
return False
self.power = keyword
return True
def setService (self, service):
if self.service == service:
return False
if self.button == None:
raise RuntimeError, "you must call OmegaStatus.setButton first"
self.service = service
for keyword in self.known:
if keyword in service:
self.button.setStatusKeyword (service[keyword])
return True
def interpret (self, keyword=None, slice=None):
if keyword == None or slice == None:
return False
if keyword == self.power:
self.state[self.power] = slice['binary']
else:
name = keyword['name']
if name in self.known:
self.state[name] = slice['ascii'].lower ()
else:
ktl.log (ktl.LOG_ERROR, "callback for %s not handled by OmegaStatus image" % (name))
return False
# Now analyze the current state, and select an
# appropriate base image.
base = None
state = self.state
if self.power in state and state[self.power] != True:
base = Images.get ('error')
elif 'DISPSTA' in state and state['DISPSTA'] != 'ready':
base = Images.get ('error')
elif 'SETPOINT' in state and 'TEMP' in state:
setpoint = float (state['SETPOINT'])
temp = float (state['TEMP'])
if setpoint == 0:
base = Images.get ('ok')
elif temp > (setpoint + 5):
base = Images.get ('warning')
else:
base = Images.get ('ok')
else:
base = Images.get ('ok')
if self.base != base:
self.base = base
return True
else:
return False
# end of class OmegaStatus
| alexrudy/Cauldron | Cauldron/bundled/GUI/kImage.py | Python | bsd-3-clause | 18,912 | [
"MOE"
] | 8a89c286f63ea80c72ccdd3952273a6b252087de9357ae2a2c0fdba02dc0cce8 |
''' DIRAC.ResourceStatusSystem.Policy package
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
| yujikato/DIRAC | src/DIRAC/ResourceStatusSystem/Policy/__init__.py | Python | gpl-3.0 | 179 | [
"DIRAC"
] | dfb9380fe4a10570c3e7b23b795c6e545a03df1c4caf5ac0c40defada2ff653f |
from sys import exit
from random import randint
class Scene(object):
def enter(self):
print "This scene is not yet configured. Subclass it and implement enter()."
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('finished')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# be sure to print out the last scene
current_scene.enter()
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your mom would be proud...if she were smarter.",
"Such a luser.",
"I have a small puppy that's better at this."
]
def enter(self):
print Death.quips[randint(0, len(self.quips)-1)]
exit(1)
class CentralCorridor(Scene):
def enter(self):
print "The Gothons of Planet Percal #25 have invaded your ship and destroyed"
print "your entire crew. You are the last surviving member and your last"
print "mission is to get the neutron destruct bomb from the Weapons Armory,"
print "put it in the bridge, and blow the ship up after getting into an "
print "escape pod."
print "\n"
print "You're running down the central corridor to the Weapons Armory when"
print "a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume"
print "flowing around his hate filled body. He's blocking the door to the"
print "Armory and about to pull a weapon to blast you."
action = raw_input("> ")
if action == "shoot!":
print "Quick on the draw you yank out your blaster and fire it at the Gothon."
print "His clown costume is flowing and moving around his body, which throws"
print "off your aim. Your laser hits his costume but misses him entirely. This"
print "completely ruins his brand new costume his mother bought him, which"
print "makes him fly into an insane rage and blast you repeatedly in the face until"
print "you are dead. Then he eats you."
return 'death'
elif action == "dodge!":
print "Like a world class boxer you dodge, weave, slip and slide right"
print "as the Gothon's blaster cranks a laser past your head."
print "In the middle of your artful dodge your foot slips and you"
print "bang your head on the metal wall and pass out."
print "You wake up shortly after only to die as the Gothon stomps on"
print "your head and eats you."
return 'death'
elif action == "tell a joke":
print "Lucky for you they made you learn Gothon insults in the academy."
print "You tell the one Gothon joke you know:"
print "Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr."
print "The Gothon stops, tries not to laugh, then busts out laughing and can't move."
print "While he's laughing you run up and shoot him square in the head"
print "putting him down, then jump through the Weapon Armory door."
return 'laser_weapon_armory'
else:
print "DOES NOT COMPUTE!"
return 'central_corridor'
class LaserWeaponArmory(Scene):
def enter(self):
print "You do a dive roll into the Weapon Armory, crouch and scan the room"
print "for more Gothons that might be hiding. It's dead quiet, too quiet."
print "You stand up and run to the far side of the room and find the"
print "neutron bomb in its container. There's a keypad lock on the box"
print "and you need the code to get the bomb out. If you get the code"
print "wrong 10 times then the lock closes forever and you can't"
print "get the bomb. The code is 3 digits."
# code = "%d%d%d" % (randint(1,9), randint(1,9), randint(1,9))
code = "123"
guess = raw_input("[keypad]> ")
guesses = 0
while guess != code and guesses < 10:
print "BZZZZEDDD!"
guesses += 1
guess = raw_input("[keypad]> ")
if guess == code:
print "The container clicks open and the seal breaks, letting gas out."
print "You grab the neutron bomb and run as fast as you can to the"
print "bridge where you must place it in the right spot."
return 'the_bridge'
else:
print "The lock buzzes one last time and then you hear a sickening"
print "melting sound as the mechanism is fused together."
print "You decide to sit there, and finally the Gothons blow up the"
print "ship from their ship and you die."
return 'death'
class TheBridge(Scene):
def enter(self):
print "You burst onto the Bridge with the netron destruct bomb"
print "under your arm and surprise 5 Gothons who are trying to"
print "take control of the ship. Each of them has an even uglier"
print "clown costume than the last. They haven't pulled their"
print "weapons out yet, as they see the active bomb under your"
print "arm and don't want to set it off."
action = raw_input("> ")
if action == "throw the bomb":
print "In a panic you throw the bomb at the group of Gothons"
print "and make a leap for the door. Right as you drop it a"
print "Gothon shoots you right in the back killing you."
print "As you die you see another Gothon frantically try to disarm"
print "the bomb. You die knowing they will probably blow up when"
print "it goes off."
return 'death'
elif action == "slowly place the bomb":
print "You point your blaster at the bomb under your arm"
print "and the Gothons put their hands up and start to sweat."
print "You inch backward to the door, open it, and then carefully"
print "place the bomb on the floor, pointing your blaster at it."
print "You then jump back through the door, punch the close button"
print "and blast the lock so the Gothons can't get out."
print "Now that the bomb is placed you run to the escape pod to"
print "get off this tin can."
return 'escape_pod'
else:
print "DOES NOT COMPUTE!"
return "the_bridge"
class EscapePod(Scene):
def enter(self):
print "You rush through the ship desperately trying to make it to"
print "the escape pod before the whole ship explodes. It seems like"
print "hardly any Gothons are on the ship, so your run is clear of"
print "interference. You get to the chamber with the escape pods, and"
print "now need to pick one to take. Some of them could be damaged"
print "but you don't have time to look. There's 5 pods, which one"
print "do you take?"
good_pod = randint(1,5)
guess = raw_input("[pod #]> ")
if int(guess) != good_pod:
print "You jump into pod %s and hit the eject button." % guess
print "The pod escapes out into the void of space, then"
print "implodes as the hull ruptures, crushing your body"
print "into jam jelly."
return 'death'
else:
print "You jump into pod %s and hit the eject button." % guess
print "The pod easily slides out into space heading to"
print "the planet below. As it flies to the planet, you look"
print "back and see your ship implode then explode like a"
print "bright star, taking out the Gothon ship at the same"
print "time. You won!"
return 'finished'
class Finished(Scene):
def enter(self):
print "You won! Good job."
return 'finished'
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
| liggettla/python | ex43.py | Python | mit | 8,807 | [
"BLAST"
] | 07c56b711cb1c687479e50dffd7027d1635494c4488465cd00f0a43231cc65bc |
"""Implements the graph generation for computation of gradients."""
import collections
import warnings
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import types
# pylint: disable=unused-import
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_grad
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import linalg_grad
from tensorflow.python.ops import math_grad
# pylint: enable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s"
% (dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.ConstantValue(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices, _IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _GetGradsDevice(op, colocate_gradients_with_ops):
"""Gets the device to which to assign gradients of "op".
Args:
op: an Operation.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
Returns:
A device string.
"""
if colocate_gradients_with_ops and op.device:
return op.device
else:
return op.graph.get_default_device()
def _PendingCount(graph, to_ops, from_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a boolean which is True if any of the ops in between from_ops and to_ops
contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
has_control_flow = False
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
for x in op.control_inputs:
if between_ops[x._id]:
pending_count[x._id] += 1
if op.type == "Exit":
has_control_flow = True
return pending_count, has_control_flow
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If one of the grad_ys is invalid.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
with ops.device(_GetGradsDevice(y.op, colocate_gradients_with_ops)):
grad_ys[i] = array_ops.fill(array_ops.shape(y),
constant_op.constant(1, dtype=y.dtype))
else:
if grad_y.dtype != y.dtype:
raise ValueError("Y and ys_grad must be of the same type, "
"not y: %s, ys_grad: %s " %
(types.as_dtype(y.dtype).name,
types.as_dtype(grad_y.dtype).name))
return grad_ys
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if the gradients are invalid.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is not None:
if not grad.dtype.is_compatible_with(inp.dtype):
raise ValueError(
"Gradient type %s generated for op %s does "
"not match input type %s" %
(types.as_dtype(grad.dtype).name, op.node_def,
types.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
def gradients(ys, xs, grad_ys=None, name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial 'grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.op_scope(ys + xs + grad_ys, name, "gradients"):
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, has_control_flow = _PendingCount(
ops.get_default_graph(), to_ops, from_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
if op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with ops.device(_GetGradsDevice(op, colocate_gradients_with_ops)):
if has_control_flow:
control_flow_ops.EnterGradWhileContext(op)
out_grads = _AggregatedGrads(grads, op, has_control_flow,
aggregation_method)
grad_fn = None
if any(out_grads) and op._id not in stop_ops:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if grad_fn and any(out_grads):
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not out_grad
and types.as_dtype(op.outputs[i].dtype).base_dtype in (
types.float32, types.float64)):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
out_grads[i] = array_ops.zeros_like(op.outputs[i])
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
op_wrapper = op
if has_control_flow:
op_wrapper = control_flow_ops.MakeWrapper(op)
in_grads = _AsList(grad_fn(op_wrapper, *out_grads))
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(in_grads) > 1:
in_grads = control_flow_ops.tuple(in_grads)
logging.vlog(1, "Gradient for '" + op.name + "'")
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if x]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if x]))
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagates a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad:
_SetGrad(grads, t_in, in_grad)
if has_control_flow:
control_flow_ops.ExitGradWhileContext(op)
# update pending count for the inputs of op.
for x in op.inputs:
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if has_control_flow and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
if ready:
queue.append(x.op)
for x in op.control_inputs:
pending_count[x._id] -= 1
if pending_count[x._id] is 0:
queue.append(x)
return [_GetGrad(grads, x) for x in xs]
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert op.type == "Switch"
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads: return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(
g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, has_control_flow, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
has_control_flow: True iff the graph contains control flow ops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [AggregationMethod.ADD_N,
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N]:
raise ValueError("Invalid aggregation_method specified.")
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if has_control_flow:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert op.type == "Switch"
continue
# Grads have to be Tensors or IndexedSlices
if not all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad if g]):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if all([isinstance(g, ops.Tensor) for g in out_grad if g]):
tensor_shape = _AccumulatorShape(out_grad)
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = math_ops.add_n(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list([g for g in out_grad if g])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat(0, [x.values for x in out_grad]),
array_ops.concat(0, [x.indices for x in out_grad]),
out_grad[0].dense_shape)
else:
out_grads[i] = []
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [math_ops.mul(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v)
if grad_elem is not None]
# Second backprop
return gradients(elemwise_products, xs)
| liyu1990/tensorflow | tensorflow/python/ops/gradients.py | Python | apache-2.0 | 24,255 | [
"VisIt"
] | edbb5460e2e3b6ca69df83bfe1c151f0659330559d48e4e3160cdd7345cb94be |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os, shutil, plistlib, subprocess, glob, zipfile, tempfile, \
py_compile, stat, operator, time
from functools import partial
from contextlib import contextmanager
from itertools import repeat
abspath, join, basename = os.path.abspath, os.path.join, os.path.basename
from setup import (
__version__ as VERSION, __appname__ as APPNAME, basenames, modules as
main_modules, Command, SRC, functions as main_functions)
from setup.build_environment import sw as SW, QT_FRAMEWORKS, QT_PLUGINS, PYQT_MODULES
from setup.installer.osx.app.sign import current_dir, sign_app
LICENSE = open('LICENSE', 'rb').read()
MAGICK_HOME='@executable_path/../Frameworks/ImageMagick'
ENV = dict(
FONTCONFIG_PATH='@executable_path/../Resources/fonts',
FONTCONFIG_FILE='@executable_path/../Resources/fonts/fonts.conf',
MAGICK_CONFIGURE_PATH=MAGICK_HOME+'/config-Q16',
MAGICK_CODER_MODULE_PATH=MAGICK_HOME+'/modules-Q16/coders',
MAGICK_CODER_FILTER_PATH=MAGICK_HOME+'/modules-Q16/filters',
QT_PLUGIN_PATH='@executable_path/../MacOS/qt-plugins',
PYTHONIOENCODING='UTF-8',
)
info = warn = None
@contextmanager
def timeit():
times = [0, 0]
st = time.time()
yield times
dt = time.time() - st
times[0], times[1] = dt // 60, dt % 60
class OSX32_Freeze(Command):
description = 'Freeze OSX calibre installation'
def add_options(self, parser):
parser.add_option('--test-launchers', default=False,
action='store_true',
help='Only build launchers')
if not parser.has_option('--dont-strip'):
parser.add_option('-x', '--dont-strip', default=False,
action='store_true', help='Dont strip the generated binaries')
def run(self, opts):
global info, warn
info, warn = self.info, self.warn
main(opts.test_launchers, opts.dont_strip)
def compile_launcher_lib(contents_dir, gcc, base):
info('\tCompiling calibre_launcher.dylib')
fd = join(contents_dir, 'Frameworks')
dest = join(fd, 'calibre-launcher.dylib')
src = join(base, 'util.c')
cmd = [gcc] + '-Wall -dynamiclib -std=gnu99'.split() + [src] + \
['-I'+base] + \
['-I%s/python/Python.framework/Versions/Current/Headers' % SW] + \
'-current_version 1.0 -compatibility_version 1.0'.split() + \
'-fvisibility=hidden -o'.split() + [dest] + \
['-install_name',
'@executable_path/../Frameworks/'+os.path.basename(dest)] + \
[('-F%s/python' % SW), '-framework', 'Python', '-framework', 'CoreFoundation', '-headerpad_max_install_names']
# info('\t'+' '.join(cmd))
sys.stdout.flush()
subprocess.check_call(cmd)
return dest
def compile_launchers(contents_dir, xprograms, pyver):
gcc = os.environ.get('CC', 'gcc')
base = os.path.dirname(__file__)
lib = compile_launcher_lib(contents_dir, gcc, base)
src = open(join(base, 'launcher.c'), 'rb').read()
env, env_vals = [], []
for key, val in ENV.items():
env.append('"%s"'% key)
env_vals.append('"%s"'% val)
env = ', '.join(env)+', '
env_vals = ', '.join(env_vals)+', '
src = src.replace('/*ENV_VARS*/', env)
src = src.replace('/*ENV_VAR_VALS*/', env_vals)
programs = [lib]
for program, x in xprograms.iteritems():
module, func, ptype = x
info('\tCompiling', program)
out = join(contents_dir, 'MacOS', program)
programs.append(out)
psrc = src.replace('**PROGRAM**', program)
psrc = psrc.replace('**MODULE**', module)
psrc = psrc.replace('**FUNCTION**', func)
psrc = psrc.replace('**PYVER**', pyver)
psrc = psrc.replace('**IS_GUI**', ('1' if ptype == 'gui' else '0'))
fsrc = '/tmp/%s.c'%program
with open(fsrc, 'wb') as f:
f.write(psrc)
cmd = [gcc, '-Wall', '-I'+base, fsrc, lib, '-o', out,
'-headerpad_max_install_names']
# info('\t'+' '.join(cmd))
sys.stdout.flush()
subprocess.check_call(cmd)
return programs
def flipwritable(fn, mode=None):
"""
Flip the writability of a file and return the old mode. Returns None
if the file is already writable.
"""
if os.access(fn, os.W_OK):
return None
old_mode = os.stat(fn).st_mode
os.chmod(fn, stat.S_IWRITE | old_mode)
return old_mode
STRIPCMD = ['/usr/bin/strip', '-x', '-S', '-']
def strip_files(files, argv_max=(256 * 1024)):
"""
Strip a list of files
"""
tostrip = [(fn, flipwritable(fn)) for fn in files if os.path.exists(fn)]
while tostrip:
cmd = list(STRIPCMD)
flips = []
pathlen = reduce(operator.add, [len(s) + 1 for s in cmd])
while pathlen < argv_max:
if not tostrip:
break
added, flip = tostrip.pop()
pathlen += len(added) + 1
cmd.append(added)
flips.append((added, flip))
else:
cmd.pop()
tostrip.append(flips.pop())
os.spawnv(os.P_WAIT, cmd[0], cmd)
for args in flips:
flipwritable(*args)
def flush(func):
def ff(*args, **kwargs):
sys.stdout.flush()
sys.stderr.flush()
ret = func(*args, **kwargs)
sys.stdout.flush()
sys.stderr.flush()
return ret
return ff
class Py2App(object):
FID = '@executable_path/../Frameworks'
def __init__(self, build_dir, test_launchers=False, dont_strip=False):
self.build_dir = build_dir
self.dont_strip = dont_strip
self.contents_dir = join(self.build_dir, 'Contents')
self.resources_dir = join(self.contents_dir, 'Resources')
self.frameworks_dir = join(self.contents_dir, 'Frameworks')
self.version_info = '.'.join(map(str, sys.version_info[:2]))
self.site_packages = join(self.resources_dir, 'Python', 'site-packages')
self.to_strip = []
self.warnings = []
self.run(test_launchers)
def warn(self, *args):
warn(*args)
def run(self, test_launchers):
ret = 0
if not test_launchers:
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
os.makedirs(self.build_dir)
self.create_skeleton()
self.create_plist()
self.add_python_framework()
self.add_site_packages()
self.add_stdlib()
self.add_qt_frameworks()
self.add_calibre_plugins()
self.add_podofo()
self.add_poppler()
self.add_imaging_libs()
self.add_fontconfig()
self.add_imagemagick()
self.add_misc_libraries()
self.add_resources()
self.compile_py_modules()
self.copy_site()
self.create_exe()
if not test_launchers and not self.dont_strip:
self.strip_files()
if not test_launchers:
self.create_console_app()
self.create_gui_apps()
ret = self.makedmg(self.build_dir, APPNAME+'-'+VERSION)
return ret
@flush
def add_resources(self):
shutil.copytree('resources', os.path.join(self.resources_dir,
'resources'))
@flush
def strip_files(self):
info('\nStripping files...')
strip_files(self.to_strip)
@flush
def create_exe(self):
info('\nCreating launchers')
programs = {}
progs = []
for x in ('console', 'gui'):
progs += list(zip(basenames[x], main_modules[x], main_functions[x], repeat(x)))
for program, module, func, ptype in progs:
programs[program] = (module, func, ptype)
programs = compile_launchers(self.contents_dir, programs,
self.version_info)
for out in programs:
self.fix_dependencies_in_lib(out)
@flush
def set_id(self, path_to_lib, new_id):
old_mode = flipwritable(path_to_lib)
subprocess.check_call(['install_name_tool', '-id', new_id, path_to_lib])
if old_mode is not None:
flipwritable(path_to_lib, old_mode)
@flush
def get_dependencies(self, path_to_lib):
install_name = subprocess.check_output(['otool', '-D', path_to_lib]).splitlines()[-1].strip()
raw = subprocess.check_output(['otool', '-L', path_to_lib])
for line in raw.splitlines():
if 'compatibility' not in line or line.strip().endswith(':'):
continue
idx = line.find('(')
path = line[:idx].strip()
yield path, path == install_name
@flush
def get_local_dependencies(self, path_to_lib):
for x, is_id in self.get_dependencies(path_to_lib):
for y in (SW+'/lib/', SW+'/qt/lib/', SW+'/python/Python.framework/',):
if x.startswith(y):
if y == SW+'/python/Python.framework/':
y = SW+'/python/'
yield x, x[len(y):], is_id
break
@flush
def change_dep(self, old_dep, new_dep, is_id, path_to_lib):
cmd = ['-id', new_dep] if is_id else ['-change', old_dep, new_dep]
subprocess.check_call(['install_name_tool'] + cmd + [path_to_lib])
@flush
def fix_dependencies_in_lib(self, path_to_lib):
self.to_strip.append(path_to_lib)
old_mode = flipwritable(path_to_lib)
for dep, bname, is_id in self.get_local_dependencies(path_to_lib):
ndep = self.FID+'/'+bname
self.change_dep(dep, ndep, is_id, path_to_lib)
ldeps = list(self.get_local_dependencies(path_to_lib))
if ldeps:
info('\nFailed to fix dependencies in', path_to_lib)
info('Remaining local dependencies:', ldeps)
raise SystemExit(1)
if old_mode is not None:
flipwritable(path_to_lib, old_mode)
@flush
def add_python_framework(self):
info('\nAdding Python framework')
src = join(SW + '/python', 'Python.framework')
x = join(self.frameworks_dir, 'Python.framework')
curr = os.path.realpath(join(src, 'Versions', 'Current'))
currd = join(x, 'Versions', basename(curr))
rd = join(currd, 'Resources')
os.makedirs(rd)
shutil.copy2(join(curr, 'Resources', 'Info.plist'), rd)
shutil.copy2(join(curr, 'Python'), currd)
self.set_id(join(currd, 'Python'),
self.FID+'/Python.framework/Versions/%s/Python'%basename(curr))
# The following is needed for codesign in OS X >= 10.9.5
with current_dir(x):
os.symlink(basename(curr), 'Versions/Current')
for y in ('Python', 'Resources'):
os.symlink('Versions/Current/%s'%y, y)
@flush
def add_qt_frameworks(self):
info('\nAdding Qt Frameworks')
for f in QT_FRAMEWORKS:
self.add_qt_framework(f)
pdir = join(SW, 'qt', 'plugins')
ddir = join(self.contents_dir, 'MacOS', 'qt-plugins')
os.mkdir(ddir)
for x in QT_PLUGINS:
shutil.copytree(join(pdir, x), join(ddir, x))
for l in glob.glob(join(ddir, '*/*.dylib')):
self.fix_dependencies_in_lib(l)
x = os.path.relpath(l, ddir)
self.set_id(l, '@executable_path/'+x)
def add_qt_framework(self, f):
libname = f
f = f+'.framework'
src = join(SW, 'qt', 'lib', f)
ignore = shutil.ignore_patterns('Headers', '*.h', 'Headers/*')
dest = join(self.frameworks_dir, f)
shutil.copytree(src, dest, symlinks=True,
ignore=ignore)
lib = os.path.realpath(join(dest, libname))
rpath = os.path.relpath(lib, self.frameworks_dir)
self.set_id(lib, self.FID+'/'+rpath)
self.fix_dependencies_in_lib(lib)
# The following is needed for codesign in OS X >= 10.9.5
# The presence of the .prl file in the root of the framework causes
# codesign to fail.
with current_dir(dest):
for x in os.listdir('.'):
if x != 'Versions' and not os.path.islink(x):
os.remove(x)
@flush
def create_skeleton(self):
c = join(self.build_dir, 'Contents')
for x in ('Frameworks', 'MacOS', 'Resources'):
os.makedirs(join(c, x))
for x in glob.glob(join('icons', 'icns', '*.iconset')):
subprocess.check_call([
'iconutil', '-c', 'icns', x, '-o', join(
self.resources_dir, basename(x).partition('.')[0] + '.icns')])
@flush
def add_calibre_plugins(self):
dest = join(self.frameworks_dir, 'plugins')
os.mkdir(dest)
for f in glob.glob('src/calibre/plugins/*.so'):
shutil.copy2(f, dest)
self.fix_dependencies_in_lib(join(dest, basename(f)))
@flush
def create_plist(self):
from calibre.ebooks import BOOK_EXTENSIONS
env = dict(**ENV)
env['CALIBRE_LAUNCHED_FROM_BUNDLE']='1'
docs = [{'CFBundleTypeName':'E-book',
'CFBundleTypeExtensions':list(BOOK_EXTENSIONS),
'CFBundleTypeIconFile':'book.icns',
'CFBundleTypeRole':'Viewer',
}]
pl = dict(
CFBundleDevelopmentRegion='English',
CFBundleDisplayName=APPNAME,
CFBundleName=APPNAME,
CFBundleIdentifier='net.kovidgoyal.calibre',
CFBundleVersion=VERSION,
CFBundleShortVersionString=VERSION,
CFBundlePackageType='APPL',
CFBundleSignature='????',
CFBundleExecutable='calibre',
CFBundleDocumentTypes=docs,
LSMinimumSystemVersion='10.7.2',
LSRequiresNativeExecution=True,
NSAppleScriptEnabled=False,
NSHumanReadableCopyright=time.strftime('Copyright %Y, Kovid Goyal'),
CFBundleGetInfoString=('calibre, an E-book management '
'application. Visit http://calibre-ebook.com for details.'),
CFBundleIconFile='calibre.icns',
NSHighResolutionCapable=True,
LSApplicationCategoryType='public.app-category.productivity',
LSEnvironment=env
)
plistlib.writePlist(pl, join(self.contents_dir, 'Info.plist'))
@flush
def install_dylib(self, path, set_id=True):
shutil.copy2(path, self.frameworks_dir)
if set_id:
self.set_id(join(self.frameworks_dir, basename(path)),
self.FID+'/'+basename(path))
self.fix_dependencies_in_lib(join(self.frameworks_dir, basename(path)))
@flush
def add_podofo(self):
info('\nAdding PoDoFo')
pdf = join(SW, 'lib', 'libpodofo.0.9.3.dylib')
self.install_dylib(pdf)
@flush
def add_poppler(self):
info('\nAdding poppler')
for x in ('libpoppler.46.dylib',):
self.install_dylib(os.path.join(SW, 'lib', x))
for x in ('pdftohtml', 'pdftoppm', 'pdfinfo'):
self.install_dylib(os.path.join(SW, 'bin', x), False)
@flush
def add_imaging_libs(self):
info('\nAdding libjpeg, libpng and libwebp')
for x in ('jpeg.8', 'png16.16', 'webp.5'):
self.install_dylib(os.path.join(SW, 'lib', 'lib%s.dylib' % x))
@flush
def add_fontconfig(self):
info('\nAdding fontconfig')
for x in ('fontconfig.1', 'freetype.6', 'expat.1',
'plist.3', 'usbmuxd.4', 'imobiledevice.5'):
src = os.path.join(SW, 'lib', 'lib'+x+'.dylib')
self.install_dylib(src)
dst = os.path.join(self.resources_dir, 'fonts')
if os.path.exists(dst):
shutil.rmtree(dst)
src = os.path.join(SW, 'etc', 'fonts')
shutil.copytree(src, dst, symlinks=False)
fc = os.path.join(dst, 'fonts.conf')
raw = open(fc, 'rb').read()
raw = raw.replace('<dir>/usr/share/fonts</dir>', '''\
<dir>/Library/Fonts</dir>
<dir>/System/Library/Fonts</dir>
<dir>/usr/X11R6/lib/X11/fonts</dir>
<dir>/usr/share/fonts</dir>
<dir>/var/root/Library/Fonts</dir>
<dir>/usr/share/fonts</dir>
''')
open(fc, 'wb').write(raw)
@flush
def add_imagemagick(self):
info('\nAdding ImageMagick')
for x in ('Wand-6', 'Core-6'):
self.install_dylib(os.path.join(SW, 'lib', 'libMagick%s.Q16.2.dylib'%x))
idir = glob.glob(os.path.join(SW, 'lib', 'ImageMagick-*'))[-1]
dest = os.path.join(self.frameworks_dir, 'ImageMagick')
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(idir, dest, True)
for x in os.walk(dest):
for f in x[-1]:
if f.endswith('.so'):
f = join(x[0], f)
self.fix_dependencies_in_lib(f)
@flush
def add_misc_libraries(self):
for x in ('usb-1.0.0', 'mtp.9', 'ltdl.7',
'chm.0', 'sqlite3.0', 'icudata.53', 'icui18n.53', 'icuio.53', 'icuuc.53'):
info('\nAdding', x)
x = 'lib%s.dylib'%x
shutil.copy2(join(SW, 'lib', x), self.frameworks_dir)
dest = join(self.frameworks_dir, x)
self.set_id(dest, self.FID+'/'+x)
self.fix_dependencies_in_lib(dest)
@flush
def add_site_packages(self):
info('\nAdding site-packages')
os.makedirs(self.site_packages)
paths = reversed(map(abspath, [x for x in sys.path if x.startswith('/')]))
upaths = []
for x in paths:
if x not in upaths and (x.endswith('.egg') or
x.endswith('/site-packages')):
upaths.append(x)
upaths.append(os.path.expanduser('~/build/calibre/src'))
for x in upaths:
info('\t', x)
tdir = None
try:
if not os.path.isdir(x):
try:
zf = zipfile.ZipFile(x)
except:
self.warn(x, 'is neither a directory nor a zipfile')
continue
tdir = tempfile.mkdtemp()
zf.extractall(tdir)
x = tdir
self.add_modules_from_dir(x)
self.add_packages_from_dir(x)
finally:
if tdir is not None:
shutil.rmtree(tdir)
shutil.rmtree(os.path.join(self.site_packages, 'calibre', 'plugins'))
sp = join(self.resources_dir, 'Python', 'site-packages')
for x in os.listdir(join(sp, 'PyQt5')):
if x.endswith('.so') and x.rpartition('.')[0] not in PYQT_MODULES:
os.remove(join(sp, 'PyQt5', x))
os.remove(join(sp, 'PyQt5', 'uic/port_v3/proxy_base.py'))
self.remove_bytecode(sp)
@flush
def add_modules_from_dir(self, src):
for x in glob.glob(join(src, '*.py'))+glob.glob(join(src, '*.so')):
shutil.copy2(x, self.site_packages)
if x.endswith('.so'):
self.fix_dependencies_in_lib(x)
@flush
def add_packages_from_dir(self, src):
for x in os.listdir(src):
x = join(src, x)
if os.path.isdir(x) and os.path.exists(join(x, '__init__.py')):
if self.filter_package(basename(x)):
continue
self.add_package_dir(x)
@flush
def add_package_dir(self, x, dest=None):
def ignore(root, files):
ans = []
for y in files:
ext = os.path.splitext(y)[1]
if ext not in ('', '.py', '.so') or \
(not ext and not os.path.isdir(join(root, y))):
ans.append(y)
return ans
if dest is None:
dest = self.site_packages
dest = join(dest, basename(x))
shutil.copytree(x, dest, symlinks=True, ignore=ignore)
self.postprocess_package(x, dest)
for x in os.walk(dest):
for f in x[-1]:
if f.endswith('.so'):
f = join(x[0], f)
self.fix_dependencies_in_lib(f)
@flush
def filter_package(self, name):
return name in ('Cython', 'modulegraph', 'macholib', 'py2app',
'bdist_mpkg', 'altgraph')
@flush
def postprocess_package(self, src_path, dest_path):
pass
@flush
def add_stdlib(self):
info('\nAdding python stdlib')
src = SW + '/python/Python.framework/Versions/Current/lib/python'
src += self.version_info
dest = join(self.resources_dir, 'Python', 'lib', 'python')
dest += self.version_info
os.makedirs(dest)
for x in os.listdir(src):
if x in ('site-packages', 'config', 'test', 'lib2to3', 'lib-tk',
'lib-old', 'idlelib', 'plat-mac', 'plat-darwin', 'site.py'):
continue
x = join(src, x)
if os.path.isdir(x):
self.add_package_dir(x, dest)
elif os.path.splitext(x)[1] in ('.so', '.py'):
shutil.copy2(x, dest)
dest2 = join(dest, basename(x))
if dest2.endswith('.so'):
self.fix_dependencies_in_lib(dest2)
self.remove_bytecode(join(self.resources_dir, 'Python', 'lib'))
confdir = join(self.resources_dir, 'Python',
'lib/python%s/config'%self.version_info)
os.makedirs(confdir)
shutil.copy2(join(src, 'config/Makefile'), confdir)
incdir = join(self.resources_dir, 'Python',
'include/python'+self.version_info)
os.makedirs(incdir)
shutil.copy2(join(src.replace('/lib/', '/include/'), 'pyconfig.h'),
incdir)
@flush
def remove_bytecode(self, dest):
for x in os.walk(dest):
root = x[0]
for f in x[-1]:
if os.path.splitext(f) in ('.pyc', '.pyo'):
os.remove(join(root, f))
@flush
def compile_py_modules(self):
info('\nCompiling Python modules')
base = join(self.resources_dir, 'Python')
for x in os.walk(base):
root = x[0]
for f in x[-1]:
if f.endswith('.py'):
y = join(root, f)
rel = os.path.relpath(y, base)
try:
py_compile.compile(y, dfile=rel, doraise=True)
os.remove(y)
except:
self.warn('WARNING: Failed to byte-compile', y)
def create_app_clone(self, name, specialise_plist):
info('\nCreating ' + name)
cc_dir = os.path.join(self.contents_dir, name, 'Contents')
exe_dir = join(cc_dir, 'MacOS')
os.makedirs(exe_dir)
for x in os.listdir(self.contents_dir):
if x.endswith('.app'):
continue
if x == 'Info.plist':
plist = plistlib.readPlist(join(self.contents_dir, x))
specialise_plist(plist)
plist.pop('CFBundleDocumentTypes')
exe = plist['CFBundleExecutable']
# We cannot symlink the bundle executable as if we do,
# codesigning fails
nexe = plist['CFBundleExecutable'] = exe + '-placeholder-for-codesigning'
shutil.copy2(join(self.contents_dir, 'MacOS', exe), join(exe_dir, nexe))
exe = join(exe_dir, plist['CFBundleExecutable'])
plistlib.writePlist(plist, join(cc_dir, x))
elif x == 'MacOS':
for item in os.listdir(join(self.contents_dir, 'MacOS')):
os.symlink('../../../MacOS/'+item, join(exe_dir, item))
else:
os.symlink(join('../..', x), join(cc_dir, x))
@flush
def create_console_app(self):
def specialise_plist(plist):
plist['LSBackgroundOnly'] = '1'
plist['CFBundleIdentifier'] = 'com.calibre-ebook.console'
plist['CFBundleExecutable'] = 'calibre-parallel'
self.create_app_clone('console.app', specialise_plist)
# Comes from the terminal-notifier project:
# https://github.com/alloy/terminal-notifier
shutil.copytree(join(SW, 'build/notifier.app'), join(
self.contents_dir, 'calibre-notifier.app'))
@flush
def create_gui_apps(self):
def specialise_plist(launcher, plist):
plist['CFBundleDisplayName'] = plist['CFBundleName'] = {
'ebook-viewer':'E-book Viewer', 'ebook-edit':'Edit Book', 'calibre-debug': 'calibre (debug)',
}[launcher]
plist['CFBundleExecutable'] = launcher
if launcher != 'calibre-debug':
plist['CFBundleIconFile'] = launcher + '.icns'
plist['CFBundleIdentifier'] = 'com.calibre-ebook.' + launcher
for launcher in ('ebook-viewer', 'ebook-edit', 'calibre-debug'):
self.create_app_clone(launcher + '.app', partial(specialise_plist, launcher))
@flush
def copy_site(self):
base = os.path.dirname(__file__)
shutil.copy2(join(base, 'site.py'), join(self.resources_dir, 'Python',
'lib', 'python'+self.version_info))
@flush
def makedmg(self, d, volname,
destdir='dist',
internet_enable=True,
format='UDBZ'):
''' Copy a directory d into a dmg named volname '''
info('\nSigning...')
sys.stdout.flush()
if not os.path.exists(destdir):
os.makedirs(destdir)
dmg = os.path.join(destdir, volname+'.dmg')
if os.path.exists(dmg):
os.unlink(dmg)
tdir = tempfile.mkdtemp()
appdir = os.path.join(tdir, os.path.basename(d))
shutil.copytree(d, appdir, symlinks=True)
with timeit() as times:
sign_app(appdir)
info('Signing completed in %d minutes %d seconds' % tuple(times))
os.symlink('/Applications', os.path.join(tdir, 'Applications'))
size_in_mb = int(subprocess.check_output(['du', '-s', '-k', tdir]).decode('utf-8').split()[0]) / 1024.
cmd = ['/usr/bin/hdiutil', 'create', '-srcfolder', tdir, '-volname', volname, '-format', format]
if 190 < size_in_mb < 250:
# We need -size 255m because of a bug in hdiutil. When the size of
# srcfolder is close to 200MB hdiutil fails with
# diskimages-helper: resize request is above maximum size allowed.
cmd += ['-size', '255m']
info('\nCreating dmg...')
with timeit() as times:
subprocess.check_call(cmd + [dmg])
if internet_enable:
subprocess.check_call(['/usr/bin/hdiutil', 'internet-enable', '-yes', dmg])
info('dmg created in %d minutes and %d seconds' % tuple(times))
shutil.rmtree(tdir)
size = os.stat(dmg).st_size/(1024*1024.)
info('\nInstaller size: %.2fMB\n'%size)
return dmg
def test_exe():
build_dir = abspath(join('build', APPNAME+'.app'))
py2app = Py2App(build_dir)
py2app.create_exe()
return 0
def main(test=False, dont_strip=False):
if 'test_exe' in sys.argv:
return test_exe()
build_dir = abspath(join(os.path.dirname(SRC), 'build', APPNAME+'.app'))
Py2App(build_dir, test_launchers=test, dont_strip=dont_strip)
return 0
if __name__ == '__main__':
sys.exit(main())
| ashang/calibre | setup/installer/osx/app/main.py | Python | gpl-3.0 | 27,912 | [
"VisIt"
] | d1f6493b7e7dd50bf4d6ecdc6e9ba96b2a3fed0ab0d0f50fca12a78a5d29e001 |
import matplotlib.pyplot as plt
import os
from astropy.table import Table
import numpy as np
# setup information sources
degas = Table.read(os.path.join(os.environ['SCRIPTDIR'],'degas_base.fits'))
stack = Table.read('/lustre/cv/users/akepley/degas/stack_test/stack_IR6p0_mom1.fits')
plotDir = os.path.join(os.environ['ANALYSISDIR'],'plots','fdense_plots')
if not os.path.exists(plotDir):
os.mkdir(plotDir)
# only look at dr1 galaxies
dr1 = degas['DR1'] == 1
ndr1 = np.sum(dr1)
# setup plot style
markers = ['o','v','^','s','*','D'] # 6 items
colors = ['royalblue','forestgreen','darkorange','royalblue','crimson','rebeccapurple','darkcyan','darkmagenta']
ndr1 = np.sum(dr1)
markerlist = np.tile(markers,int(np.ceil(ndr1/len(markers))))
markerlist = markerlist[0:ndr1]
colorlist = np.tile(colors,int(np.ceil(ndr1/len(colors))))
colorlist = colorlist[0:ndr1]
# set up plot
fig = plt.figure(figsize=(8,6),facecolor='white',edgecolor='white')
fig.subplots_adjust(left=0.1,right=0.8,bottom=0.1, top=0.9)
ax = fig.add_subplot(1,1,1)
# for each dr1 galaxy, show radial trends for each line.
for (galaxy,color,marker) in zip(degas[dr1],colorlist,markerlist):
idx = ( (stack['galaxy'] == galaxy['NAME']) \
& (stack['bin_type'] == 'intensity'))
# unit of molmass is solMass/pc^2
alpha_co = float(stack.meta['ALPHA_CO'].split()[0])
alpha_co_units = ' '.join(stack.meta['ALPHA_CO'].split()[1:])
molmass = stack[idx]['bin_mean'] * alpha_co
lolims = stack[idx]['ratio_HCN_CO_lolim']
fdense = stack[idx]['ratio_HCN_CO']
fdense_err = stack[idx]['ratio_HCN_CO_err']
fdense_err[lolims] = fdense[lolims] * 0.3
ax.errorbar(molmass, fdense,
yerr = fdense_err,
uplims = lolims,
marker = marker,
markerfacecolor='none',
markeredgecolor=color,
linestyle= '--',
color=color)
ax.scatter(molmass[~lolims], fdense[~lolims],
marker=marker,
color=color,
label=galaxy['NAME'])
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend(loc='upper left',bbox_to_anchor=(1.0,1.0))
ax.set_xlabel(r'$\Sigma_{mol}$ (M$_{\odot}$ pc$^{-2}$)')
ax.set_ylabel(r'log$_{10}$ (HCN-to-CO)')
fig.show()
fig.savefig(os.path.join(plotDir,'fdense_vs_molmass_combined.pdf'))
fig.savefig(os.path.join(plotDir,'fdense_vs_molmass_combined.png'))
plt.close()
| low-sky/degas | scripts/plot_fdense_vs_molmass_combined.py | Python | gpl-3.0 | 2,487 | [
"Galaxy"
] | b858d5ba32bfd64fa6d81b720480e7006dab090908f18f0b41d32d59ffe351bd |
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from scipy.linalg import pinvh
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
For an example, see :ref:`examples/linear_model/plot_bayesian_ridge.py
<sphx_glr_auto_examples_linear_model_plot_bayesian_ridge.py>`.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Preserve the alpha and lambda values that were used to
# calculate the final coefficients
self.alpha_ = alpha_
self.lambda_ = lambda_
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
For an example, see :ref:`examples/linear_model/plot_ard.py
<sphx_glr_auto_examples_linear_model_plot_ard.py>`.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
ARD is a little different than the slide: only dimensions/features for
which self.lambda_ < self.threshold_lambda are kept and the rest are
discarded.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
| MechCoder/scikit-learn | sklearn/linear_model/bayes.py | Python | bsd-3-clause | 19,650 | [
"Gaussian"
] | 92c54dbc0c5cdc6c1c9efc739832fa32cfb5abea69cfb55782fafb24809fd2c2 |
words=[
u"James", u"John", u"Robert", u"Michael", u"William", u"David", u"Richard", u"Joseph",
u"Charles", u"Thomas", u"Christopher", u"Daniel", u"Matthew", u"Donald", u"Anthony",
u"Paul", u"Mark", u"George", u"Steven", u"Kenneth", u"Andrew", u"Edward", u"Brian",
u"Joshua", u"Kevin", u"Ronald", u"Timothy", u"Jason", u"Jeffrey", u"Gary", u"Ryan",
u"Nicholas", u"Eric", u"Stephen", u"Jacob", u"Larry", u"Frank", u"Jonathan", u"Scott",
u"Justin", u"Raymond", u"Brandon", u"Gregory", u"Samuel", u"Patrick", u"Benjamin",
u"Jack", u"Dennis", u"Jerry", u"Alexander", u"Tyler", u"Douglas", u"Henry", u"Peter",
u"Walter", u"Aaron", u"Jose", u"Adam", u"Harold", u"Zachary", u"Nathan", u"Carl",
u"Kyle", u"Arthur", u"Gerald", u"Lawrence", u"Roger", u"Albert", u"Keith", u"Jeremy",
u"Terry", u"Joe", u"Sean", u"Willie", u"Jesse", u"Ralph", u"Billy", u"Austin", u"Bruce",
u"Christian", u"Roy", u"Bryan", u"Eugene", u"Louis", u"Harry", u"Wayne", u"Ethan",
u"Jordan", u"Russell", u"Alan", u"Philip", u"Randy", u"Juan", u"Howard", u"Vincent",
u"Bobby", u"Dylan", u"Johnny", u"Phillip", u"Craig"]
# This is a phone book record.
class Record:
def __init__(self, firstname, lastname):
self.first = firstname
self.last = lastname
def __lt__(self, other):
if self.last < other.last:
return True
if self.last > other.last:
return False
return self.first < other.first
Records = []
for first in words:
for last in words:
Records.append(Record(first, last))
for i in xrange(100):
y = Records[:]
y = sorted(y)
#for w in y:
# print w.first, w.last
| Ivacker/swift | utils/benchmark/Phonebook/Phonebook.py | Python | apache-2.0 | 1,595 | [
"Brian"
] | 39988a41020106409708e3da4aa8604bb5e03198dbb4f487052bc29be722bd88 |
#!/usr/bin/env python
"""Collect daily reference desk statistics in a database
Display the stats in a useful way with charts and download links"""
from flask import Flask, abort, request, redirect, url_for, \
render_template, make_response, g, session
from flask_babelex import Babel
from flask_login import LoginManager, login_required, current_user, \
login_user, logout_user, AnonymousUserMixin
from os.path import abspath, dirname
from data import lists
from conf import ConfigFile
import ldap
import sys
import datetime
import psycopg2
import StringIO
import copy
import csv
import random
import ConfigParser
from optparse import OptionParser
class LocalCGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
Clone of workzeug.contrib.fixers.CGIRootFix, but doesn't strip leading '/'
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.rstrip('/')
return self.app(environ, start_response)
app = Flask(__name__)
app.root_path = abspath(dirname(__file__))
opt = {}
parser = OptionParser()
parser.add_option('-d', '--debug', dest='DEBUG', action='store_true',
help='Provides debug output when unhandled exceptions occur.')
parser.add_option('-v', '--verbose', dest='VERBOSE', action='store_true',
help='Provides verbose output for what is being done.')
parser.add_option('-s', '--student', dest='STUDENT', action='store_true',
help='Connects to the student LDAP instead of the staff.')
cmd_opt, junk = parser.parse_args()
c = ConfigFile(app.root_path + '/config.ini')
keys = c.getsection('Refdesk')
for key in keys:
opt[key] = keys[key]
opt['DEBUG'] = cmd_opt.DEBUG
opt['VERBOSE'] = cmd_opt.VERBOSE
opt['STUDENT'] = cmd_opt.STUDENT
if opt['VERBOSE']:
print('Root path: ' + app.root_path)
if opt['VERBOSE']:
print(app.root_path + '/config.ini')
if opt['SECRET']:
app.secret_key = opt['SECRET']
else:
print('No secret key. Aborting.')
exit()
app.wsgi_app = LocalCGIRootFix(app.wsgi_app, app_root=opt['APP_ROOT'])
babel = Babel(app)
login_manager = LoginManager()
login_manager.init_app(app)
def get_db():
"""
Get a database connection
With a host attribute in the mix, you could connect to a remote
database, but then you would have to set up .pgpass or add a
password parameter, so let's keep it simple.
"""
try:
return psycopg2.connect(
database=opt['DB_NAME'],
host=opt['DB_HOST'],
user=opt['DB_USER'],
password=opt['DB_PASS']
)
except Exception, ex:
if opt['VERBOSE']:
print(ex)
def get_ldap_connection():
conn = ldap.initialize('ldap://'+opt['LDAP_HOST'])
return conn
class User():
__tablename__ = 'users'
def __init__(self, username, session_id = None):
self.uname = username
if not session_id:
self.id = random.SystemRandom().randint(-0xFFFFFF, 0xFFFFFF)
else:
self.id = session_id
if opt['VERBOSE']:
print(self.id)
@staticmethod
def try_login(username, password):
conn = get_ldap_connection()
if opt['STUDENT']:
conn.simple_bind_s('cn=%s,ou=STD,o=LUL' % username, password)
else:
conn.simple_bind_s('cn=%s,ou=Empl,o=LUL' % username, password)
@staticmethod
def get_by_id(id):
dbh = get_db()
cur = dbh.cursor()
cur.execute("SELECT id, uname FROM users where id = %s" % id)
row = cur.fetchone()
try:
if row[0]:
i = row[0]
u = row[1]
dbh.close()
return User(u, i)
else:
dbh.close()
return None
except Exception, ex:
if opt['VERBOSE']:
print(ex)
dbh.close()
# Executes is query returns no rows.
return None
@staticmethod
def get_by_uname(uname):
dbh = get_db()
cur = dbh.cursor()
cur.execute("SELECT id, uname FROM users")
for row in cur.fetchall():
if uname == row[1]:
dbh.close()
return User(row[1], row[0])
else:
return None
dbh.close()
return None
def add_to_db(self):
dbh = get_db()
cur = dbh.cursor()
cur.execute("""
INSERT INTO users (id, uname)
VALUES (%s, %s)""", (self.id, self.uname))
dbh.commit()
dbh.close()
def logout(self):
dbh = get_db()
cur = dbh.cursor()
cur.execute("""
DELETE FROM users WHERE uname = '%s'""" % self.uname)
dbh.commit()
dbh.close()
def expired(self):
dbh = get_db()
cur = dbh.cursor()
cur.execute("""SELECT expires FROM users WHERE uname = %s AND id = %s""", (self.uname, self.id))
row = cur.fetchone()
if row[0] < datetime.datetime.now():
dbh.close()
return True
dbh.close()
return False
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
@babel.localeselector
def get_locale():
return g.get('current_lang','en');
@app.before_request
def pre_request():
try:
if opt['VERBOSE']:
print(session['uid'])
current_user = User.get_by_id(session['uid'])
if current_user.expired():
current_user.logout()
logout_user()
except Exception, ex:
if opt['VERBOSE']:
print('User is anonymous.')
try:
g.user = current_user
except Exception, ex:
if opt['VERBOSE']:
print('No user object set for session.')
if request.view_args and 'lang' in request.view_args:
lang = request.view_args['lang']
if lang in ['en', 'fr']:
g.current_lang = lang
request.view_args.pop('lang')
else:
return abort(404)
@login_manager.user_loader
def load_user(id):
return User.get_by_id(id)
@app.route('/<lang>/', methods=['GET', 'POST'])
@login_required
def submit(date=None):
"Either show the form, or process the form"
if request.method == 'POST':
return eat_stat_form()
else:
#return show_stat_form()
if opt['VERBOSE']:
print('Before queueing data edit.')
return edit_data(date)
@app.errorhandler(401)
def page_forbidden(err):
"Redirect to the login page."
return render_template('login.html'), 200
@app.errorhandler(404)
def page_not_found(err):
"Give a simple 404 error page."
return render_template('404.html'), 404
@app.errorhandler(500)
def page_broken(err):
"""
Let people know something went wrong
This could be a duplicate entry for the same day, or a lost database
connection, or pretty much anything. Leave it up to the brainiac
devops person to suss it out.
"""
return render_template('500.html'), 500
def login():
"""
Attempt to log into the LDAP server with the authentication
provided by a form.
"""
try:
if current_user.is_authenticated():
if opt['VERBOSE']:
print(current_user)
return redirect(url_for('edit_data', lang='en')), 302
form = request.form
username = form['user']
password = form['pass']
if opt['VERBOSE']:
print(username)
User.try_login(username, password)
user = User.get_by_uname(username)
if not user:
user = User(username)
user.add_to_db()
session['uid'] = user.id
login_user(user)
return redirect(url_for('edit_data', lang='en')), 302
except Exception, ex:
if opt['VERBOSE']:
print(ex)
return render_template('login_fail.html'), 401
def eat_stat_form():
"Shove the form data into the database"
try:
dbh = get_db()
cur = dbh.cursor()
form = request.form
fdate = form['refdate']
if opt['VERBOSE']:
print('reached data insertion...')
for time in lists['timelist']:
for stat in lists['helplist']:
if opt['VERBOSE']:
print(time, stat)
val_en = form[time+stat+'_en']
val_fr = form[time+stat+'_fr']
cur.execute("""INSERT INTO refdeskstats (refdate, reftime, reftype, refcount_en, refcount_fr)
VALUES (%s, %s, %s, %s, %s)""", (fdate, time, stat, val_en, val_fr))
dbh.commit()
dbh.close()
message = "Your form was successfully submitted."
return render_template('menu_interface.html', message=message)
except Exception, ex:
if opt['VERBOSE']:
print(ex)
return abort(500)
def get_stats(date):
"Get the stats from the database"
try:
dbase = get_db()
cur = dbase.cursor()
monthdate = str(date) + '%'
cur.execute("""
SELECT DISTINCT refdate
FROM refstatview
WHERE refdate::text LIKE %s
ORDER BY refdate desc""",
(str(monthdate),))
#cur.execute('SELECT DISTINCT refdate FROM refview ORDER BY refdate desc')
dates = [dict(refdate=row[0]) for row in cur.fetchall()]
if dbase.closed:
return "I was closed!"
dbase.commit()
dbase.close()
return dates
except Exception, ex:
if opt['VERBOSE']:
print(ex)
def get_months():
"Get the months that have data"
try:
dbase = get_db()
cur = dbase.cursor()
cur.execute("""SELECT DISTINCT date_part('year',refdate)||
'-' ||date_part('month',refdate) AS date_piece,
(date_part('year',refdate)|| '-' ||date_part('month',refdate)||
'-01')::date AS date
FROM refstatview GROUP BY date_piece
ORDER BY date desc""")
months = []
for row in cur.fetchall():
year, month = parse_date(row[1])
months.append({'month': year + '-' + month})
#months = [dict(month=row[0]) for row in cur.fetchall()]
dbase.commit()
dbase.close()
# print(months)
return months
except Exception, ex:
if opt['VERBOSE']:
print(ex)
def get_csv(filename):
"Get the data in CSV format"
try:
data = get_db()
cur = data.cursor()
#print(cur.mogrify("SELECT refdate, refstat, refcount FROM refstats WHERE refdate = %s", (str(filename),)))
if str(filename) == 'allstats':
cur.execute("SELECT refdate, reftime, reftype, refcount_en, refcount_fr FROM refstatview ORDER BY refdate, reftime, reftype")
else:
cur.execute("""SELECT refdate, reftime, reftype, refcount_en, refcount_fr
FROM refstatview WHERE refdate=%s""",
(str(filename),))
csvgen = StringIO.StringIO()
csvfile = csv.writer(csvgen)
for row in cur.fetchall():
csvfile.writerow([row[0], row[1], row[2], row[3], row[4]])
csv_result = csvgen.getvalue()
csvgen.close()
data.commit()
data.close()
return csv_result
except Exception, ex:
if opt['VERBOSE']:
print(ex)
def get_data_array(date):
"Put the data into an array for Google charts"
try:
data = get_db()
cur = data.cursor()
cur.execute("""SELECT refdate, reftime, reftype, refcount_en,
refcount_fr FROM refstatview WHERE refdate=%s""",
(str(date),))
stack = copy.deepcopy(lists['stack_a'])
array = copy.deepcopy(lists['array'])
for row in cur.fetchall():
timeslot = str(row[1])
stat = row[2]
array[lists['helpcodes'][stat+'_en']-1][lists['timecodes'][timeslot]] = row[3]
array[lists['helpcodes'][stat+'_fr']-1][lists['timecodes'][timeslot]] = row[4]
data.commit()
data.close()
for stat_data in array:
stack.append(stat_data)
return stack
except Exception, ex:
if opt['VERBOSE']:
print(ex)
def get_time_array(date):
"Put the data into an array for Google charts"
try:
data = get_db()
cur = data.cursor()
#cur.execute("SELECT refdate, refstat, refcount FROM refstats WHERE refdate=%s", (str(date),))
#"""If we want everyday in the month"""
if len(str(date)) == 7:
date_year, date_month = parse_date(str(date))
if opt['VERBOSE']:
print('viewing:'+ str((date_year, date_month)))
cur.execute("""SELECT reftime, reftype,
sum(refcount_en), sum(refcount_fr)
FROM refstatview
WHERE date_part('year',refdate) = %s
AND date_part('month',refdate) = %s
GROUP BY reftime, reftype""",
(str(date_year), str(date_month)))
else:
cur.execute("""SELECT reftime, reftype, refcount_en, refcount_fr, refdate
FROM refstatview WHERE refdate=%s""",
(str(date),))
stack = copy.deepcopy(lists['stack_b'])
times = copy.deepcopy(lists['times'])
if opt['VERBOSE']:
print(times)
for row in cur.fetchall():
timeslot = str(row[0])
stat = row[1]
#print(helpcodes[stat])
#if timeslot in lists['timelist']:
times[lists['timecodes'][timeslot]-1][lists['helpcodes'][stat+'_en']] = row[2]
times[lists['timecodes'][timeslot]-1][lists['helpcodes'][stat+'_fr']] = row[3]
if opt['VERBOSE']:
print(times)
data.commit()
data.close()
for time in times:
stack.append(time)
#print(time)
#print(stack)
return stack
except Exception, ex:
if opt['VERBOSE']:
print(ex)
def get_weekday_array(date):
"""Put the data into an array for google charts"""
try:
data = get_db()
cur = data.cursor()
month = str(date) + '%'
cur.execute("""
SELECT reftime, reftype, refcount_en, refcount_fr, day_of_week
FROM refstatview_day_of_week
WHERE refdate::text LIKE %s
ORDER BY day_of_week""", (str(month),))
stack = copy.deepcopy(lists['stack_b'])
days = copy.deepcopy(lists['days'])
for row in cur.fetchall():
"""Get the data for each day of the month and do something useful with it"""
timeslot = row[0]
stat = row[1]
if row[4] >= 0 and row[4] <= 6:
days[int(row[4])][lists['helpcodes'][stat+'_en']] += row[2]
days[int(row[4])][lists['helpcodes'][stat+'_fr']] += row[3]
data.commit()
data.close()
for day in days:
stack.append(day)
#print(stack)
return stack
except Exception, ex:
if opt['VERBOSE']:
print(ex)
def parse_date(date):
"Returns the year and the month separately from the date"
date_parts = str(date).split('-')
return date_parts[0], date_parts[1]
def parse_stat(stat):
"Returns the type of stat and the time slot"
for s in lists['helplist']:
if opt['VERBOSE']:
print(stat)
pos = stat.find(s)
if pos > -1:
return stat[0:pos], s
def get_missing(date):
"Find the dates that are missing stats"
try:
data = get_db()
cur = data.cursor()
month = str(date) + '%'
day = str(date) + '-01'
cur.execute("""
With x AS (SELECT DISTINCT refdate from refstatview
WHERE refdate::text LIKE %s),
y AS (SELECT generate_series(date %s,
date %s + '1 month'::interval - '1 day'::interval,
'1 day'::interval) AS missingdate)
SELECT missingdate::date from y
WHERE missingdate NOT IN(
SELECT refdate from x)
""", (str(month), str(day), str(day)))
missing = []
for row in cur.fetchall():
missing.append({'refdate': row[0]})
data.commit()
data.close()
return missing
except Exception, ex:
if opt['VERBOSE']:
print(ex)
def get_current_data(date):
"Pull out the current data for a given day"
try:
data = get_db()
cur = data.cursor()
cur.execute("""SELECT reftime, reftype,
refcount_en, refcount_fr
FROM refstatview WHERE refdate=%s""",
(str(date),))
stats = {}
for row in cur.fetchall():
time = str(row[0])
stat = row[1]
stats[time+stat+'_en'] = row[2]
stats[time+stat+'_fr'] = row[3]
data.commit()
data.close()
#print(stats)
return stats
except Exception, ex:
if opt['VERBOSE']:
print(ex)
@app.route('/<lang>/login/', methods=['GET', 'POST'])
def login_form():
if request.method == 'POST':
return login()
else:
return render_template('login.html');
@app.route('/<lang>/logout/', methods=['GET'])
@login_required
def logout():
current_user.logout()
logout_user()
return redirect(url_for('login_form', lang='en')), 302
@app.route('/<lang>/view/', methods=['GET'])
@app.route('/<lang>/view/<date>', methods=['GET'])
@login_required
def show_stats(date=None):
"Lets try to get all dates with data input"
try:
dates = get_stats(date)
months = get_months()
if date:
tarray = get_time_array(date)
#If the date specified is a full month. len(YYYY-MM) == 7.
if len(str(date)) == 7:
wdarray = get_weekday_array(date)
missing = get_missing(date)
return render_template('show_mchart.html', dates=dates, \
tarray=tarray, date=date, wdarray=wdarray, months=months, \
missing=missing \
)
else:
array = get_data_array(date)
return render_template('show_chart.html', dates=dates, \
array=array, tarray=tarray, date=date, months=months \
)
else:
return render_template('show_stats.html', dates=dates, months=months)
except:
return abort(500)
@app.route('/<lang>/', methods=['GET','POST'])
@app.route('/<lang>/edit/<date>', methods=['GET','POST'])
@login_required
def edit_data(date):
"Add data to missing days or edit current data"
if request.method == 'POST':
return eat_stat_form()
try:
if date:
stats = get_current_data(date)
#if opt['VERBOSE']:
# print(date + 'stats:' + stats)
#if stats:
if opt['VERBOSE']:
print ('before page render: stats found')
return render_template('stat_form.html', today=date, stats=stats)
#else:
#return render_template('stat_form.html', today=date)
#return render_template('edit_stat_form.html', today=date, stats=stats)
else:
if opt['VERBOSE']:
print ('before page render: no stats')
date = datetime.datetime.now().strftime("%Y-%m-%d")
if opt['VERBOSE']:
print(date)
stats = get_current_data(date)
return render_template('stat_form.html', today=((datetime.datetime.now() + datetime.timedelta(hours=-2)).date().isoformat()), stats=stats)
except Exception, ex:
if opt['VERBOSE']:
print(ex)
return abort(500)
@app.route('/<lang>/download/')
@app.route('/<lang>/download/<filename>')
@login_required
def download_file(filename='allstats'):
"Downloads a file in CSV format"
try:
filename = str(filename)
csv_data = get_csv(filename)
csv_file = filename + ".csv"
response = make_response(csv_data)
response_header = "attachment; fname=" + csv_file
response.headers["Content-Type"] = 'text/csv'
response.headers["Content-Disposition"] = response_header
return response
except Exception, ex:
if opt['VERBOSE']:
print(ex)
return abort(500)
@app.route('/favicon.ico')
def favicon():
return '', 404
if __name__ == '__main__':
#app.run(debug=opt['DEBUG'], host=opt['HOST'], port=opt['PORT'])
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.wsgi import WSGIResource
resource = WSGIResource(reactor, reactor.getThreadPool(), app)
site = Site(resource)
reactor.listenTCP(opt['PORT'], site, interface=opt['HOST'])
reactor.run()
| dbs/refdeskstats | refdesk.py | Python | gpl-3.0 | 22,461 | [
"VisIt"
] | fe8eb08de4638843d245070187fc4264e992b5e16403dbc0c80d579378d0f109 |
"""
The B{0install} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _, logger
import os, sys
from optparse import OptionParser
import logging
from zeroinstall import SafeException, DryRun
valid_commands = ['add', 'select', 'show', 'download', 'run', 'update', 'whatchanged', 'destroy',
'config', 'import', 'list', 'search', 'add-feed', 'remove-feed', 'list-feeds',
'man', 'digest']
class UsageError(Exception): pass
def _ensure_standard_fds():
"""Ensure stdin, stdout and stderr FDs exist, to avoid confusion."""
for std in (0, 1, 2):
try:
os.fstat(std)
except OSError:
fd = os.open(os.devnull, os.O_RDONLY)
if fd != std:
os.dup2(fd, std)
os.close(fd)
class NoCommand(object):
"""Handle --help and --version"""
def add_options(self, parser):
parser.add_option("-V", "--version", help=_("display version information"), action='store_true')
def handle(self, config, options, args):
if options.version:
import zeroinstall
print("0install (zero-install) " + zeroinstall.version)
print("Copyright (C) 2013 Thomas Leonard")
print(_("This program comes with ABSOLUTELY NO WARRANTY,"
"\nto the extent permitted by law."
"\nYou may redistribute copies of this program"
"\nunder the terms of the GNU Lesser General Public License."
"\nFor more information about these matters, see the file named COPYING."))
sys.exit(0)
raise UsageError()
class _Completion(object):
def __init__(self, config, command_args, shell):
"""@type command_args: [str]
@type shell: str"""
assert shell in ('zsh', 'bash', 'fish'), shell
self.shell = shell
self.config = config
self.cword = int(os.environ['COMP_CWORD']) - 1
self.response_prefix = ''
if shell == 'zsh':
self.cword -= 1
if shell == 'bash':
# Bash does crazy splitting (e.g. "http://foo" becomes "http" ":" "//foo")
# Do our best to reverse that splitting here (inspired by Git completion code)
command_args = command_args[:]
while ':' in command_args[1:]:
i = command_args.index(':', 1)
combined = command_args[i - 1] + command_args[i]
if i + 1 < len(command_args):
combined += command_args[i + 1]
command_args = command_args[:i - 1] + [combined] + command_args[i + 2:]
if self.cword > i:
self.cword -= 2
elif self.cword == i:
self.cword -= 1
# For --opt=value, we get ['--opt', '=', value]. Just get rid of the '='.
if self.cword > 0 and command_args[self.cword - 1] == '=':
del command_args[self.cword - 1]
self.cword -= 1
elif command_args[self.cword] == '=':
command_args[self.cword] = ''
#print(command_args, self.cword, file = sys.stderr)
if self.cword < len(command_args):
self.current = command_args[self.cword]
else:
self.current = ''
if shell in ('zsh', 'fish'):
if self.current.startswith('--') and '=' in self.current:
# Split "--foo=bar" into "--foo", "bar"
name, value = self.current.split('=', 1)
command_args[self.cword:self.cword + 1] = [name, value]
self.cword += 1
self.current = command_args[self.cword]
self.response_prefix = name + '='
else:
self.response_prefix = ''
self.command_args = command_args
def got_command(self, command, pos):
#print("found %s at %s [cword = %d]" % (command, pos, self.cword), file = sys.stderr)
"""@type command: str
@type pos: int"""
if pos == self.cword:
for command in valid_commands:
self.add("filter", command)
sys.exit(0)
def complete(self, parser, cmd):
opts = {}
for opt in parser.option_list:
for name in opt._short_opts:
opts[name] = opt
for name in opt._long_opts:
opts[name] = opt
options_possible = True
arg_word = -1
args = []
consume_args = 0
complete_option_arg = None # (option, args, arg pos)
#logger.warning("%s at %d", self.command_args, self.cword)
for i, a in enumerate(self.command_args):
#logger.warning("%d %s (%d)", i, a, options_possible)
if consume_args > 0:
#print("consume " + a, file=sys.stderr)
consume_args -= 1
elif a == '--' and options_possible and i != self.cword:
options_possible = False
elif a.startswith('-') and options_possible:
if i == self.cword:
self._complete_option(parser)
return
# Does it take an argument?
option_with_args = None
if a.startswith('--'):
opt = opts.get(a, None)
if opt and opt.nargs:
option_with_args = opt
else:
for l in a[1:]:
opt = opts.get('-' + l, None)
if opt and opt.nargs:
option_with_args = opt
break
if option_with_args:
consume_args = option_with_args.nargs
option_arg_index = self.cword - i - 1
if option_arg_index >= 0 and option_arg_index < consume_args:
complete_option_arg = (option_with_args,
self.command_args[i + 1 : i + 1 + consume_args],
option_arg_index)
else:
if len(args) > 0 and options_possible and not parser.allow_interspersed_args:
options_possible = False
args.append(a)
if i < self.cword:
arg_word += 1
if complete_option_arg is None:
if hasattr(cmd, 'complete'):
if arg_word == len(args) - 1: args.append('')
cmd.complete(self, args[1:], arg_word)
else:
metavar = complete_option_arg[0].metavar
#logger.warning("complete option arg %s %s as %s", args[1:], complete_option_arg, metavar)
if metavar == 'DIR':
self.expand_files()
elif metavar == 'OS':
for value in ["Cygwin", "Darwin", "FreeBSD", "Linux", "MacOSX", "Windows"]:
self.add("filter", value)
elif metavar == 'CPU':
for value in ["src", "i386", "i486", "i586", "i686", "ppc", "ppc64", "x86_64"]:
self.add("filter", value)
elif metavar == 'URI RANGE':
if complete_option_arg[2] == 0:
# When completing the URI, contextualise to the app's selections, if possible
if len(args) > 1:
app = self.config.app_mgr.lookup_app(args[1], missing_ok = True)
if app:
for uri in app.get_selections().selections:
self.add("filter", uri)
return
# Otherwise, complete on all cached URIs
self.expand_interfaces()
else:
self.expand_range(complete_option_arg[1][0])
elif metavar in ('RANGE', 'VERSION'):
if len(args) > 1:
self.expand_range(args[1], maybe_app = True, range_ok = metavar == 'RANGE')
elif metavar == 'HASH':
from zeroinstall.zerostore import manifest
for alg in sorted(manifest.algorithms):
self.add("filter", alg)
#else: logger.warning("%r", metavar)
def _complete_option(self, parser):
if len(self.current) < 2 or self.current.startswith('--'):
# Long option, or nothing yet
for opt in parser.option_list:
for o in opt._long_opts:
self.add("filter", o)
else:
# Short option: if it's valid, complete it.
# Otherwise, reject it.
valid = set()
for opt in parser.option_list:
for o in opt._short_opts:
valid.add(o[1:])
if all(char in valid for char in self.current[1:]):
self.add("add", self.current)
def expand_range(self, uri, maybe_app = False, range_ok = True):
"""@type uri: str
@type maybe_app: bool
@type range_ok: bool"""
if maybe_app:
app = self.config.app_mgr.lookup_app(uri, missing_ok = True)
if app:
uri = app.get_requirements().interface_uri
iface_cache = self.config.iface_cache
iface = iface_cache.get_interface(uri)
versions = [impl.get_version() for impl in iface_cache.get_implementations(iface)]
if range_ok and '..' in self.current:
prefix = self.current.split('..', 1)[0] + '..!'
else:
prefix = ''
for v in sorted(versions):
#logger.warning(prefix + v)
self.add("filter", prefix + v)
def expand_apps(self):
for app in self.config.app_mgr.iterate_apps():
self.add("filter", app)
def expand_files(self):
print("file")
def expand_interfaces(self):
c = self.current
if 'http://'.startswith(c[:7]) or 'https://'.startswith(c[:8]):
if c.count('/') < 3:
# Start with just the domains
import re
start = re.compile('(https?://[^/]+/).*')
starts = set()
for iface in self.config.iface_cache.list_all_interfaces():
if not iface.startswith(c):continue
match = start.match(iface)
if match:
starts.add(match.group(1))
for s in sorted(starts):
self.add("prefix", s)
else:
for iface in self.config.iface_cache.list_all_interfaces():
if iface.startswith(c):
self.add("filter", iface)
if '://' not in c:
self.expand_files()
def add_filtered(self, value):
"""Add this value, but only if it matches the prefix.
@type value: str"""
self.add("filter", value)
def add(self, type, value):
"""Types are:
add - a raw string to add
filter - a string to add only if it matches
prefix - a completion that doesn't insert a space after it.
@type type: str
@type value: str"""
if self.shell == 'bash':
if ':' in self.current:
ignored = self.current.rsplit(':', 1)[0] + ':'
if not value.startswith(ignored): return
value = value[len(ignored):]
#print(">%s<" % value, file = sys.stderr)
if type != 'prefix':
value += ' '
print(type, self.response_prefix + value)
def main(command_args, config = None):
"""Act as if 0install was run with the given arguments.
@type command_args: [str]
@type config: L{zeroinstall.injector.config.Config} | None
@arg command_args: array of arguments (e.g. C{sys.argv[1:]})"""
_ensure_standard_fds()
if config is None:
from zeroinstall.injector.config import load_config
config = load_config()
completion = None
if command_args and command_args[0] == '_complete':
shell = command_args[1]
command_args = command_args[3:]
# command_args[2] == "0install"
completion = _Completion(config, command_args, shell = shell)
# The first non-option argument is the command name (or "help" if none is found).
command = None
for i, arg in enumerate(command_args):
if not arg.startswith('-'):
command = arg
command_args = command_args[:i] + command_args[i + 1:]
if completion:
completion.got_command(command, i)
break
elif arg == '--':
break
else:
if completion:
completion.got_command(None, len(command_args))
verbose = False
try:
# Configure a parser for the given command
my_name = os.path.basename(sys.argv[0])
if my_name == '0launch': my_name = '0install' # Hack for --python-fallback
if command:
if command not in valid_commands:
if completion:
return
raise SafeException(_("Unknown sub-command '%s': try --help") % command)
module_name = command.replace('-', '_')
cmd = __import__('zeroinstall.cmd.' + module_name, globals(), locals(), [module_name], 0)
parser = OptionParser(usage=_("usage: %s %s [OPTIONS] %s") % (my_name, command, cmd.syntax))
else:
cmd = NoCommand()
parser = OptionParser(usage=_("usage: %s COMMAND\n\nTry --help with one of these:%s") %
(my_name, "\n\n0install " + '\n0install '.join(valid_commands)))
parser.add_option("-c", "--console", help=_("never use GUI"), action='store_false', dest='gui')
parser.add_option("", "--dry-run", help=_("just print what would be executed"), action='store_true')
parser.add_option("-g", "--gui", help=_("show graphical policy editor"), action='store_true')
parser.add_option("-v", "--verbose", help=_("more verbose output"), action='count')
parser.add_option("", "--with-store", help=_("add an implementation cache"), action='append', metavar='DIR')
cmd.add_options(parser)
if completion:
completion.complete(parser, cmd)
return
(options, args) = parser.parse_args(command_args)
verbose = options.verbose
if options.verbose:
if options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
import zeroinstall
logger.info(_("Running 0install %(version)s %(args)s; Python %(python_version)s"), {'version': zeroinstall.version, 'args': repr(command_args), 'python_version': sys.version})
if options.with_store:
from zeroinstall import zerostore
for x in options.with_store:
config.stores.stores.append(zerostore.Store(os.path.abspath(x)))
logger.info(_("Stores search path is now %s"), config.stores.stores)
config.handler.dry_run = bool(options.dry_run)
if config.handler.dry_run:
if options.gui is True:
raise SafeException(_("Can't use --gui with --dry-run"))
options.gui = False
cmd.handle(config, options, args)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
sys.exit(1)
except UsageError:
parser.print_help()
sys.exit(1)
except DryRun as ex:
print(_("[dry-run]"), ex)
except SafeException as ex:
if verbose: raise
try:
from zeroinstall.support import unicode
print(unicode(ex), file=sys.stderr)
except:
print(repr(ex), file=sys.stderr)
sys.exit(1)
return
| AlexanderRyzhko/0install-TUF | zeroinstall/cmd/__init__.py | Python | lgpl-2.1 | 12,927 | [
"VisIt"
] | 7985e05ba4ad54662a3b96f69b0cb552ad4a428f8e7ba9aa7489ac97c2c30a62 |
from rdkit import RDConfig
import os, sys
import unittest
from rdkit import Chem
from rdkit.Chem import rdMolHash
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1(self):
m = Chem.MolFromSmiles('C1CCCC(O)C1c1ccnc(OC)c1')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.AnonymousGraph),
'***1****(*2*****2*)*1')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.ElementGraph),
'COC1CC(C2CCCCC2O)CCN1')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.CanonicalSmiles),
'COc1cc(C2CCCCC2O)ccn1')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.MurckoScaffold),
'c1cc(C2CCCCC2)ccn1')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.ExtendedMurcko),
'*c1cc(C2CCCCC2*)ccn1')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.MolFormula), 'C12H17NO2')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.AtomBondCounts), '15,16')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.DegreeVector), '0,4,9,2')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.Mesomer),
'CO[C]1[CH][C](C2CCCCC2O)[CH][CH][N]1_0')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.Regioisomer),
'*O.*O*.C.C1CCCCC1.c1ccncc1')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.NetCharge), '0')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.SmallWorldIndexBR), 'B16R2')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.SmallWorldIndexBRL), 'B16R2L9')
self.assertEqual(rdMolHash.MolHash(m, rdMolHash.HashFunction.ArthorSubstructureOrder),
'000f001001000c000300005f000000')
if __name__ == "__main__":
unittest.main()
| greglandrum/rdkit | Code/GraphMol/MolHash/Wrap/testMolHash.py | Python | bsd-3-clause | 1,896 | [
"RDKit"
] | 243216bf4ba2fdc4c02b4420a7ac182f8aaaf2bb84d7a276c10ae3a19dd72ad9 |
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit http://feedparser.org/ for the latest version
Visit http://feedparser.org/docs/ for the latest documentation
Required: Python 2.1 or later
Recommended: Python 2.3 or later
Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "4.1"# + "$Revision: 1.92 $"[11:15] + "-cvs"
__license__ = """Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(x?[0-9A-Fa-f]+)[^0-9A-Fa-f]')
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'license', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
if baselang:
self.feeddata['language'] = baselang
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
tag = tag.split(':')[-1]
return self.handle_data('<%s%s>' % (tag, ''.join([' %s="%s"' % t for t in attrs])), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
else:
# entity resolution graciously donated by Aaron Swartz
def name2cp(k):
import htmlentitydefs
if hasattr(htmlentitydefs, 'name2codepoint'): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
k = htmlentitydefs.entitydefs[k]
if k.startswith('&#') and k.endswith(';'):
return int(k[2:-1]) # not in latin-1
return ord(k)
try: name2cp(ref)
except KeyError: text = '&%s;' % ref
else: text = unichr(name2cp(ref)).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
# resolve relative URIs within embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding)
# sanitize embedded markup
if self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding)
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource) and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
self.inimage = 1
self.push('image', 0)
context = self._getContext()
context.setdefault('image', FeedParserDict())
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
self.intextinput = 1
self.push('textinput', 0)
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['textinput']['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['image']['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
elif self.inimage:
context = self._getContext()
context['image']['href'] = value
elif self.intextinput:
context = self._getContext()
context['textinput']['link'] = value
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author = context.get(key)
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))''', author)
if not emailmatch: return
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
context.setdefault('%s_detail' % key, FeedParserDict())
context['%s_detail' % key]['name'] = author
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
def _start_cc_license(self, attrsD):
self.push('license', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('license')
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
def _end_creativecommons_license(self):
self.pop('license')
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(FeedParserDict({'term': term, 'scheme': scheme, 'label': label}))
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
attrsD.setdefault('type', 'text/html')
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context = self._getContext()
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD['rel'] == 'enclosure':
self._start_enclosure(attrsD)
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
if self.intextinput:
context['textinput']['link'] = value
if self.inimage:
context['image']['link'] = value
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
value = self.popContent('title')
context = self._getContext()
if self.intextinput:
context['textinput']['title'] = value
elif self.inimage:
context['image']['title'] = value
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
context = self._getContext()
if self.intextinput:
context['textinput']['description'] = value
elif self.inimage:
context['image']['description'] = value
self._summaryKey = None
_end_abstract = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
self._getContext().setdefault('enclosures', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href:
context = self._getContext()
if not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding):
self.encoding = encoding
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
for key, value in attrs:
if type(value) != type(u''):
value = unicode(value, self.encoding)
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]).encode(self.encoding)
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
self.pieces.append('&%(ref)s;' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding):
_BaseHTMLProcessor.__init__(self, encoding)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri)
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def _sanitizeHTML(htmlSource, encoding):
p = _HTMLSanitizer(encoding)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it must be a tuple of 9 integers
as returned by gmtime() in the standard Python time module. This MUST
be in GMT (Greenwich Mean Time). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(params.get('second', 0))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
# daylight savings is complex, but not needed for feedparser's purposes
# as time zones, if specified, include mention of whether it is active
# (e.g. PST vs. PDT, CET). Using -1 is implementation-dependent and
# and most implementations have DST bugs
daylight_savings_flag = 0
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
data = doctype_pattern.sub('', data)
return version, data
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, and windows-1252 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '')
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
if __name__ == '__main__':
if not sys.argv[1:]:
print __doc__
sys.exit(0)
else:
urls = sys.argv[1:]
zopeCompatibilityHack()
from pprint import pprint
for url in urls:
print url
print
result = parse(url)
pprint(result)
print
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
| virtuald/exaile | plugins/podcasts/_feedparser.py | Python | gpl-2.0 | 122,472 | [
"NetCDF",
"VisIt"
] | 5177664d3d7fc98623f344e4e520208c8326202b8e27c0b18c540cbbccda6305 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
The initial version of this module was based on a similar implementation
present in FireWorks (https://pypi.python.org/pypi/FireWorks).
Work done by D. Waroquiers, A. Jain, and M. Kocher.
The main difference wrt the Fireworks implementation is that the QueueAdapter
objects provide a programmatic interface for setting important attributes
such as the number of MPI nodes, the number of OMP threads and the memory requirements.
This programmatic interface is used by the `TaskManager` for optimizing the parameters
of the run before submitting the job (Abinit provides the autoparal option that
allows one to get a list of parallel configuration and their expected efficiency).
"""
import sys
import os
import abc
import string
import copy
import getpass
import json
import math
from . import qutils as qu
from collections import namedtuple
from subprocess import Popen, PIPE
from pymatgen.util.io_utils import AtomicFile
from monty.string import is_string, list_strings
from monty.collections import AttrDict
from monty.functools import lazy_property
from monty.inspect import all_subclasses
from monty.io import FileLock
from monty.json import MSONable
from pymatgen.core.units import Memory
from .utils import Condition
from .launcher import ScriptEditor
from .qjobs import QueueJob
import logging
logger = logging.getLogger(__name__)
__all__ = [
"make_qadapter",
]
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
class SubmitResults(namedtuple("SubmitResult", "qid, out, err, process")):
"""
named tuple createc by the concrete implementation of _submit_to_que to pass the results of the process of
submitting the jobfile to the que.
qid: queue id of the submission
out: stdout of the submission
err: stdrr of the submisison
process: process object of the submission
"""
class MpiRunner:
"""
This object provides an abstraction for the mpirunner provided
by the different MPI libraries. It's main task is handling the
different syntax and options supported by the different mpirunners.
"""
def __init__(self, name, type=None, options=""):
"""
Args:
name (str): Name of the mpirunner e.g. mpirun, mpiexec, srun ...
type: Type of the mpirunner (not used at present)
options (str): String with options passed to the mpi runner e.g. "--bind-to None"
"""
self.name = name if name else ""
self.type = None
self.options = str(options)
def string_to_run(self, qad, executable, stdin=None, stdout=None, stderr=None, exec_args=None):
"""
Build and return a string with the command required to launch `executable` with the qadapter `qad`.
Args
qad: Qadapter instance.
executable (str): Executable name or path
stdin (str): Name of the file to be used as standard input. None means no redirection.
stdout (str): Name of the file to be used as standard output. None means no redirection.
stderr (str): Name of the file to be used as standard error. None means no redirection.
exec_args: Optional list of strings with options passed to `executable`.
Return:
String with command to execute.
"""
stdin = "< " + stdin if stdin is not None else ""
stdout = "> " + stdout if stdout is not None else ""
stderr = "2> " + stderr if stderr is not None else ""
if exec_args:
executable = executable + " " + " ".join(list_strings(exec_args))
basename = os.path.basename(self.name)
if basename in ["mpirun", "mpiexec", "srun"]:
if self.type is None:
# $MPIRUN -n $MPI_PROCS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR
num_opt = "-n " + str(qad.mpi_procs)
cmd = " ".join([self.name, self.options, num_opt, executable, stdin, stdout, stderr])
else:
raise NotImplementedError("type %s is not supported!" % self.type)
elif basename == "runjob":
#runjob --ranks-per-node 2 --exp-env OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR
#runjob -n 2 --exp-env=OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR
# exe must be absolute path or relative to cwd.
bg_size, rpn = qad.bgsize_rankspernode()
#num_opt = "-n " + str(qad.mpi_procs)
num_opt = "--ranks-per-node " + str(rpn)
cmd = " ".join([self.name, self.options, num_opt, "--exp-env OMP_NUM_THREADS",
"--exe `which " + executable + "` ", stdin, stdout, stderr])
else:
if qad.mpi_procs != 1:
raise ValueError("Cannot use mpi_procs > when mpi_runner basename=%s" % basename)
cmd = " ".join([executable, stdin, stdout, stderr])
return cmd
#@property
#def has_mpirun(self):
# """True if we are running via mpirun, mpiexec ..."""
# return self.name in ("mpirun", "mpiexec", "srun", "runjob")
class OmpEnv(AttrDict):
"""
Dictionary with the OpenMP environment variables
see https://computing.llnl.gov/tutorials/openMP/#EnvironmentVariables
"""
_KEYS = [
"OMP_SCHEDULE",
"OMP_NUM_THREADS",
"OMP_DYNAMIC",
"OMP_PROC_BIND",
"OMP_NESTED",
"OMP_STACKSIZE",
"OMP_WAIT_POLICY",
"OMP_MAX_ACTIVE_LEVELS",
"OMP_THREAD_LIMIT",
"OMP_STACKSIZE",
"OMP_PROC_BIND",
]
@classmethod
def as_ompenv(cls, obj):
"""Convert an object into a OmpEnv"""
if isinstance(obj, cls): return obj
if obj is None: return cls()
return cls(**obj)
def __init__(self, *args, **kwargs):
"""
Constructor method inherited from dictionary:
>>> assert OmpEnv(OMP_NUM_THREADS=1).OMP_NUM_THREADS == 1
To create an instance from an INI file, use:
OmpEnv.from_file(filename)
"""
super(OmpEnv, self).__init__(*args, **kwargs)
err_msg = ""
for key, value in self.items():
self[key] = str(value)
if key not in self._KEYS:
err_msg += "unknown option %s\n" % key
if err_msg:
raise ValueError(err_msg)
def export_str(self):
"""Return a string with the bash statements needed to setup the OMP env."""
return "\n".join("export %s=%s" % (k, v) for k, v in self.items())
class Hardware:
"""
This object collects information on the hardware available in a given queue.
Basic definitions:
- A node refers to the physical box, i.e. cpu sockets with north/south switches connecting memory systems
and extension cards, e.g. disks, nics, and accelerators
- A cpu socket is the connector to these systems and the cpu cores
- A cpu core is an independent computing with its own computing pipeline, logical units, and memory controller.
Each cpu core will be able to service a number of cpu threads, each having an independent instruction stream
but sharing the cores memory controller and other logical units.
"""
def __init__(self, **kwargs):
self.num_nodes = int(kwargs.pop("num_nodes"))
self.sockets_per_node = int(kwargs.pop("sockets_per_node"))
self.cores_per_socket = int(kwargs.pop("cores_per_socket"))
# Convert memory to megabytes.
m = str(kwargs.pop("mem_per_node"))
self.mem_per_node = int(Memory.from_string(m).to("Mb"))
if self.mem_per_node <= 0 or self.sockets_per_node <= 0 or self.cores_per_socket <= 0:
raise ValueError("invalid parameters: %s" % kwargs)
if kwargs:
raise ValueError("Found invalid keywords in the partition section:\n %s" % list(kwargs.keys()))
def __str__(self):
"""String representation."""
lines = []
app = lines.append
app(" num_nodes: %d, sockets_per_node: %d, cores_per_socket: %d, mem_per_node %s," %
(self.num_nodes, self.sockets_per_node, self.cores_per_socket, self.mem_per_node))
return "\n".join(lines)
@property
def num_cores(self):
"""Total number of cores available"""
return self.cores_per_socket * self.sockets_per_node * self.num_nodes
@property
def cores_per_node(self):
"""Number of cores per node."""
return self.cores_per_socket * self.sockets_per_node
@property
def mem_per_core(self):
"""Memory available on a single node."""
return self.mem_per_node / self.cores_per_node
def can_use_omp_threads(self, omp_threads):
"""True if omp_threads fit in a node."""
return self.cores_per_node >= omp_threads
def divmod_node(self, mpi_procs, omp_threads):
"""Use divmod to compute (num_nodes, rest_cores)"""
return divmod(mpi_procs * omp_threads, self.cores_per_node)
def as_dict(self):
return {'num_nodes': self.num_nodes,
'sockets_per_node': self.sockets_per_node,
'cores_per_socket': self.cores_per_socket,
'mem_per_node': str(Memory(val=self.mem_per_node, unit='Mb'))}
@classmethod
def from_dict(cls, dd):
return cls(num_nodes=dd['num_nodes'],
sockets_per_node=dd['sockets_per_node'],
cores_per_socket=dd['cores_per_socket'],
mem_per_node=dd['mem_per_node'])
class _ExcludeNodesFile:
"""
This file contains the list of nodes to be excluded.
Nodes are indexed by queue name.
"""
DIRPATH = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
FILEPATH = os.path.join(DIRPATH, "exclude_nodes.json")
def __init__(self):
if not os.path.exists(self.FILEPATH):
if not os.path.exists(self.DIRPATH): os.makedirs(self.DIRPATH)
with FileLock(self.FILEPATH):
with open(self.FILEPATH, "w") as fh:
json.dump({}, fh)
def read_nodes(self, qname):
with open(self.FILEPATH, "w") as fh:
return json.load(fh).get(qname, [])
def add_nodes(self, qname, nodes):
nodes = (nodes,) if not isinstance(nodes, (tuple, list)) else nodes
with FileLock(self.FILEPATH):
with AtomicFile(self.FILEPATH, mode="w+") as fh:
d = json.load(fh)
if qname in d:
d["qname"].extend(nodes)
d["qname"] = list(set(d["qname"]))
else:
d["qname"] = nodes
json.dump(d, fh)
_EXCL_NODES_FILE = _ExcludeNodesFile()
def show_qparams(qtype, stream=sys.stdout):
"""Print to the given stream the template of the :class:`QueueAdapter` of type `qtype`."""
for cls in all_subclasses(QueueAdapter):
if cls.QTYPE == qtype: return stream.write(cls.QTEMPLATE)
raise ValueError("Cannot find class associated to qtype %s" % qtype)
def all_qtypes():
"""Return sorted list with all qtypes supported."""
return sorted([cls.QTYPE for cls in all_subclasses(QueueAdapter)])
def make_qadapter(**kwargs):
"""
Return the concrete :class:`QueueAdapter` class from a string.
Note that one can register a customized version with:
.. example::
from qadapters import SlurmAdapter
class MyAdapter(SlurmAdapter):
QTYPE = "myslurm"
# Add your customized code here
# Register your class.
SlurmAdapter.register(MyAdapter)
make_qadapter(qtype="myslurm", **kwargs)
.. warning::
MyAdapter should be pickleable, hence one should declare it
at the module level so that pickle can import it at run-time.
"""
# Get all known subclasses of QueueAdapter.
d = {c.QTYPE: c for c in all_subclasses(QueueAdapter)}
# Preventive copy before pop
kwargs = copy.deepcopy(kwargs)
qtype = kwargs["queue"].pop("qtype")
return d[qtype](**kwargs)
class QScriptTemplate(string.Template):
delimiter = '$$'
class QueueAdapterError(Exception):
"""Base Error class for exceptions raise by QueueAdapter."""
class MaxNumLaunchesError(QueueAdapterError):
"""Raised by `submit_to_queue` if we try to submit more than `max_num_launches` times."""
class QueueAdapter(MSONable, metaclass=abc.ABCMeta):
"""
The `QueueAdapter` is responsible for all interactions with a specific queue management system.
This includes handling all details of queue script format as well as queue submission and management.
This is the **abstract** base class defining the methods that must be implemented by the concrete classes.
Concrete classes should extend this class with implementations that work on specific queue systems.
.. note::
A `QueueAdapter` has a handler (:class:`QueueJob`) defined in qjobs.py that allows one
to contact the resource manager to get info about the status of the job.
Each concrete implementation of `QueueAdapter` should have a corresponding `QueueJob`.
"""
Error = QueueAdapterError
MaxNumLaunchesError = MaxNumLaunchesError
@classmethod
def all_qtypes(cls):
"""Return sorted list with all qtypes supported."""
return sorted([subcls.QTYPE for subcls in all_subclasses(cls)])
@classmethod
def autodoc(cls):
return """
# Dictionary with info on the hardware available on this queue.
hardware:
num_nodes: # Number of nodes available on this queue (integer, MANDATORY).
sockets_per_node: # Number of sockets per node (integer, MANDATORY).
cores_per_socket: # Number of cores per socket (integer, MANDATORY).
# The total number of cores available on this queue is
# `num_nodes * sockets_per_node * cores_per_socket`.
# Dictionary with the options used to prepare the enviroment before submitting the job
job:
setup: # List of commands (strings) executed before running (DEFAULT: empty)
omp_env: # Dictionary with OpenMP environment variables (DEFAULT: empty i.e. no OpenMP)
modules: # List of modules to be imported before running the code (DEFAULT: empty).
# NB: Error messages produced by module load are redirected to mods.err
shell_env: # Dictionary with shell environment variables.
mpi_runner: # MPI runner. Possible values in ["mpirun", "mpiexec", "srun", None]
# DEFAULT: None i.e. no mpirunner is used.
mpi_runner_options # String with optional options passed to the `mpi_runner` e.g. "--bind-to None"
shell_runner: # Used for running small sequential jobs on the front-end. Set it to None
# if mpirun or mpiexec are not available on the fron-end. If not
# given, small sequential jobs are executed with `mpi_runner`.
shell_runner_options # Similar to mpi_runner_options but for the runner used on the front-end.
pre_run: # List of commands (strings) executed before the run (DEFAULT: empty)
post_run: # List of commands (strings) executed after the run (DEFAULT: empty)
# dictionary with the name of the queue and optional parameters
# used to build/customize the header of the submission script.
queue:
qtype: # String defining the qapapter type e.g. slurm, shell ...
qname: # Name of the submission queue (string, MANDATORY)
qparams: # Dictionary with values used to generate the header of the job script
# We use the *normalized* version of the options i.e dashes in the official name
# are replaced by underscores e.g. ``--mail-type`` becomes ``mail_type``
# See pymatgen.io.abinit.qadapters.py for the list of supported values.
# Use ``qverbatim`` to pass additional options that are not included in the template.
# dictionary with the constraints that must be fulfilled in order to run on this queue.
limits:
min_cores: # Minimum number of cores (integer, DEFAULT: 1)
max_cores: # Maximum number of cores (integer, MANDATORY). Hard limit to hint_cores:
# it's the limit beyond which the scheduler will not accept the job (MANDATORY).
hint_cores: # The limit used in the initial setup of jobs.
# Fix_Critical method may increase this number until max_cores is reached
min_mem_per_proc: # Minimum memory per MPI process in Mb, units can be specified e.g. 1.4 Gb
# (DEFAULT: hardware.mem_per_core)
max_mem_per_proc: # Maximum memory per MPI process in Mb, units can be specified e.g. `1.4Gb`
# (DEFAULT: hardware.mem_per_node)
timelimit: # Initial time-limit. Accepts time according to slurm-syntax i.e:
# "days-hours" or "days-hours:minutes" or "days-hours:minutes:seconds" or
# "minutes" or "minutes:seconds" or "hours:minutes:seconds",
timelimit_hard: # The hard time-limit for this queue. Same format as timelimit.
# Error handlers could try to submit jobs with increased timelimit
# up to timelimit_hard. If not specified, timelimit_hard == timelimit
condition: # MongoDB-like condition (DEFAULT: empty, i.e. not used)
allocation: # String defining the policy used to select the optimal number of CPUs.
# possible values are in ["nodes", "force_nodes", "shared"]
# "nodes" means that we should try to allocate entire nodes if possible.
# This is a soft limit, in the sense that the qadapter may use a configuration
# that does not fulfill this requirement. In case of failure, it will try to use the
# smallest number of nodes compatible with the optimal configuration.
# Use `force_nodes` to enfore entire nodes allocation.
# `shared` mode does not enforce any constraint (DEFAULT: shared).
max_num_launches: # Limit to the number of times a specific task can be restarted (integer, DEFAULT: 5)
"""
def __init__(self, **kwargs):
"""
Args:
qname: Name of the queue.
qparams: Dictionary with the parameters used in the template.
setup: String or list of commands to execute during the initial setup.
modules: String or list of modules to load before running the application.
shell_env: Dictionary with the environment variables to export before running the application.
omp_env: Dictionary with the OpenMP variables.
pre_run: String or list of commands to execute before launching the calculation.
post_run: String or list of commands to execute once the calculation is completed.
mpi_runner: Path to the MPI runner or :class:`MpiRunner` instance. None if not used
mpi_runner_options: Optional string with options passed to the mpi_runner.
max_num_launches: Maximum number of submissions that can be done for a specific task. Defaults to 5
qverbatim:
min_cores, max_cores, hint_cores: Minimum, maximum, and hint limits of number of cores that can be used
min_mem_per_proc=Minimum memory per process in megabytes.
max_mem_per_proc=Maximum memory per process in megabytes.
timelimit: initial time limit in seconds
timelimit_hard: hard limelimit for this queue
priority: Priority level, integer number > 0
condition: Condition object (dictionary)
.. note::
priority is a non-negative integer used to order the qadapters. The :class:`TaskManager` will
try to run jobs on the qadapter with the highest priority if possible
"""
# TODO
#task_classes
# Make defensive copies so that we can change the values at runtime.
kwargs = copy.deepcopy(kwargs)
self.priority = int(kwargs.pop("priority"))
self.hw = Hardware(**kwargs.pop("hardware"))
self._parse_queue(kwargs.pop("queue"))
self._parse_limits(kwargs.pop("limits"))
self._parse_job(kwargs.pop("job"))
self.set_master_mem_overhead(kwargs.pop("master_mem_overhead", 0))
# List of dictionaries with the parameters used to submit jobs
# The launcher will use this information to increase the resources
self.launches = []
if kwargs:
raise ValueError("Found unknown keywords:\n%s" % list(kwargs.keys()))
self.validate_qparams()
# Initialize some values from the info reported in the partition.
self.set_mpi_procs(self.min_cores)
self.set_mem_per_proc(self.min_mem_per_proc)
# Final consistency check.
self.validate_qparams()
def as_dict(self):
"""
Provides a simple though not complete dict serialization of the object (OMP missing, not all limits are
kept in the dictionary, ... other things to be checked)
Raise:
`ValueError` if errors.
"""
if self.has_omp:
raise NotImplementedError('as_dict method of QueueAdapter not yet implemented when OpenMP is activated')
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'priority': self.priority,
'hardware': self.hw.as_dict(),
'queue': {'qtype': self.QTYPE,
'qname': self._qname,
'qnodes': self.qnodes,
'qparams': self._qparams},
'limits': {'timelimit_hard': self._timelimit_hard,
'timelimit': self._timelimit,
'min_cores': self.min_cores,
'max_cores': self.max_cores,
'min_mem_per_proc': self.min_mem_per_proc,
'max_mem_per_proc': self.max_mem_per_proc,
'memory_policy': self.memory_policy
},
'job': {},
'mpi_procs': self._mpi_procs,
'mem_per_proc': self._mem_per_proc,
'master_mem_overhead': self._master_mem_overhead
}
@classmethod
def from_dict(cls, dd):
priority = dd.pop('priority')
hardware = dd.pop('hardware')
queue = dd.pop('queue')
limits = dd.pop('limits')
job = dd.pop('job')
qa = make_qadapter(priority=priority, hardware=hardware, queue=queue, limits=limits, job=job)
qa.set_mpi_procs(dd.pop('mpi_procs'))
qa.set_mem_per_proc(dd.pop('mem_per_proc'))
qa.set_master_mem_overhead(dd.pop('master_mem_overhead', 0))
timelimit = dd.pop('timelimit', None)
if timelimit is not None:
qa.set_timelimit(timelimit=timelimit)
dd.pop('@module', None)
dd.pop('@class', None)
if dd:
raise ValueError("Found unknown keywords:\n%s" % list(dd.keys()))
return qa
def validate_qparams(self):
"""
Check if the keys specified by the user in qparams are supported.
Raise:
`ValueError` if errors.
"""
# No validation for ShellAdapter.
if isinstance(self, ShellAdapter): return
# Parse the template so that we know the list of supported options.
err_msg = ""
for param in self.qparams:
if param not in self.supported_qparams:
err_msg += "Unsupported QUEUE parameter name %s\n" % param
err_msg += "Supported parameters:\n"
for param_sup in self.supported_qparams:
err_msg += " %s \n" % param_sup
if err_msg:
raise ValueError(err_msg)
def _parse_limits(self, d):
# Time limits.
self.set_timelimit(qu.timelimit_parser(d.pop("timelimit")))
tl_hard = d.pop("timelimit_hard",None)
tl_hard = qu.timelimit_parser(tl_hard) if tl_hard is not None else self.timelimit
self.set_timelimit_hard(tl_hard)
# Cores
self.min_cores = int(d.pop("min_cores", 1))
self.max_cores = int(d.pop("max_cores"))
self.hint_cores = int(d.pop("hint_cores", self.max_cores))
self.memory_policy = d.pop("memory_policy", "mem")
if self.min_cores > self.max_cores:
raise ValueError("min_cores %s cannot be greater than max_cores %s" % (self.min_cores, self.max_cores))
# Memory
# FIXME: Neeed because autoparal 1 with paral_kgb 1 is not able to estimate memory
self.min_mem_per_proc = qu.any2mb(d.pop("min_mem_per_proc", self.hw.mem_per_core))
self.max_mem_per_proc = qu.any2mb(d.pop("max_mem_per_proc", self.hw.mem_per_node))
# Misc
self.max_num_launches = int(d.pop("max_num_launches", 5))
self.condition = Condition(d.pop("condition", {}))
self.allocation = d.pop("allocation", "shared")
if self.allocation not in ("nodes", "force_nodes", "shared"):
raise ValueError("Wrong value for `allocation` option")
if d:
raise ValueError("Found unknown keyword(s) in limits section:\n %s" % list(d.keys()))
def _parse_job(self, d):
setup = d.pop("setup", None)
if is_string(setup): setup = [setup]
self.setup = setup[:] if setup is not None else []
omp_env = d.pop("omp_env", None)
self.omp_env = omp_env.copy() if omp_env is not None else {}
modules = d.pop("modules", None)
if is_string(modules): modules = [modules]
self.modules = modules[:] if modules is not None else []
shell_env = d.pop("shell_env", None)
self.shell_env = shell_env.copy() if shell_env is not None else {}
mpi_options = d.pop("mpi_runner_options", "")
self.mpi_runner = d.pop("mpi_runner", None)
if not isinstance(self.mpi_runner, MpiRunner):
self.mpi_runner = MpiRunner(self.mpi_runner, options=mpi_options)
self.shell_runner = d.pop("shell_runner", None)
shell_runner_options = d.pop("shell_runner_options", "")
if self.shell_runner is not None:
self.shell_runner = MpiRunner(self.shell_runner, options=shell_runner_options)
pre_run = d.pop("pre_run", None)
if is_string(pre_run): pre_run = [pre_run]
self.pre_run = pre_run[:] if pre_run is not None else []
post_run = d.pop("post_run", None)
if is_string(post_run): post_run = [post_run]
self.post_run = post_run[:] if post_run is not None else []
if d:
raise ValueError("Found unknown keyword(s) in job section:\n %s" % list(d.keys()))
def _parse_queue(self, d):
# Init params
qparams = d.pop("qparams", None)
self._qparams = copy.deepcopy(qparams) if qparams is not None else {}
self.set_qname(d.pop("qname", ""))
self.qnodes = d.pop("qnodes", "standard")
if self.qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if d:
raise ValueError("Found unknown keyword(s) in queue section:\n %s" % list(d.keys()))
def __str__(self):
lines = ["%s:%s" % (self.__class__.__name__, self.qname)]
app = lines.append
app("Hardware:\n" + str(self.hw))
#lines.extend(["qparams:\n", str(self.qparams)])
if self.has_omp: app(str(self.omp_env))
return "\n".join(lines)
@property
def qparams(self):
"""Dictionary with the parameters used to construct the header."""
return self._qparams
@lazy_property
def supported_qparams(self):
"""
Dictionary with the supported parameters that can be passed to the
queue manager (obtained by parsing QTEMPLATE).
"""
import re
return re.findall(r"\$\$\{(\w+)\}", self.QTEMPLATE)
@property
def has_mpi(self):
"""True if we are using MPI"""
return bool(self.mpi_runner)
@property
def has_omp(self):
"""True if we are using OpenMP threads"""
return hasattr(self, "omp_env") and bool(getattr(self, "omp_env"))
@property
def num_cores(self):
"""Total number of cores employed"""
return self.mpi_procs * self.omp_threads
@property
def omp_threads(self):
"""Number of OpenMP threads."""
if self.has_omp:
return self.omp_env["OMP_NUM_THREADS"]
else:
return 1
@property
def pure_mpi(self):
"""True if only MPI is used."""
return self.has_mpi and not self.has_omp
@property
def pure_omp(self):
"""True if only OpenMP is used."""
return self.has_omp and not self.has_mpi
@property
def hybrid_mpi_omp(self):
"""True if we are running in MPI+Openmp mode."""
return self.has_omp and self.has_mpi
@property
def run_info(self):
"""String with info on the run."""
return "MPI: %d, OMP: %d" % (self.mpi_procs, self.omp_threads)
def deepcopy(self):
"""Deep copy of the object."""
return copy.deepcopy(self)
def record_launch(self, queue_id): # retcode):
"""Save submission"""
self.launches.append(
AttrDict(queue_id=queue_id, mpi_procs=self.mpi_procs, omp_threads=self.omp_threads,
mem_per_proc=self.mem_per_proc, timelimit=self.timelimit))
return len(self.launches)
def remove_launch(self, index):
"""Remove launch with the given index."""
self.launches.pop(index)
@property
def num_launches(self):
"""Number of submission tried with this adapter so far."""
return len(self.launches)
@property
def last_launch(self):
"""Return the last launch."""
if len(self.launches) > 0:
return self.launches[-1]
else:
return None
def validate(self):
"""Validate the parameters of the run. Raises self.Error if invalid parameters."""
errors = []
app = errors.append
if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:
app("self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied")
if self.omp_threads > self.hw.cores_per_node:
app("omp_threads > hw.cores_per_node")
if self.mem_per_proc > self.hw.mem_per_node:
app("mem_mb >= self.hw.mem_per_node")
if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:
app("self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied")
if self.priority <= 0:
app("priority must be > 0")
if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):
app("1 <= min_cores <= hardware num_cores >= hint_cores not satisfied")
if errors:
raise self.Error(str(self) + "\n".join(errors))
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self._mpi_procs
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to mpi_procs"""
self._mpi_procs = mpi_procs
@property
def qname(self):
"""The name of the queue."""
return self._qname
def set_qname(self, qname):
"""Set the name of the queue."""
self._qname = qname
# todo this assumes only one wall time. i.e. the one in the mananager file is the one always used.
# we should use the standard walltime to start with but also allow to increase the walltime
@property
def timelimit(self):
"""Returns the walltime in seconds."""
return self._timelimit
@property
def timelimit_hard(self):
"""Returns the walltime in seconds."""
return self._timelimit_hard
def set_timelimit(self, timelimit):
"""Set the start walltime in seconds, fix method may increase this one until timelimit_hard is reached."""
self._timelimit = timelimit
def set_timelimit_hard(self, timelimit_hard):
"""Set the maximal possible walltime in seconds."""
self._timelimit_hard = timelimit_hard
@property
def mem_per_proc(self):
"""The memory per process in megabytes."""
return self._mem_per_proc
@property
def master_mem_overhead(self):
"""The memory overhead for the master process in megabytes."""
return self._master_mem_overhead
def set_mem_per_proc(self, mem_mb):
"""
Set the memory per process in megabytes. If mem_mb <=0, min_mem_per_proc is used.
"""
# Hack needed because abinit is still not able to estimate memory.
# COMMENTED by David.
# This is not needed anymore here because the "hack" is performed directly in select_qadapter/_use_qadpos_pconf
# methods of TaskManager. Moreover, this hack should be performed somewhere else (this part should be
# independent of abinit ... and if we want to have less memory than the average memory available per node, we
# have to allow it!)
#if mem_mb <= self.min_mem_per_proc: mem_mb = self.min_mem_per_proc
self._mem_per_proc = int(mem_mb)
def set_master_mem_overhead(self, mem_mb):
"""
Set the memory overhead for the master process in megabytes.
"""
if mem_mb < 0:
raise ValueError("Memory overhead for the master process should be >= 0")
self._master_mem_overhead = int(mem_mb)
@property
def total_mem(self):
"""Total memory required by the job in megabytes."""
return Memory(self.mem_per_proc * self.mpi_procs + self.master_mem_overhead, "Mb")
@abc.abstractmethod
def cancel(self, job_id):
"""
Cancel the job.
Args:
job_id: Job identifier.
Returns:
Exit status.
"""
def can_run_pconf(self, pconf):
"""True if the qadapter in principle is able to run the :class:`ParalConf` pconf"""
if not self.hint_cores >= pconf.num_cores >= self.min_cores: return False
if not self.hw.can_use_omp_threads(self.omp_threads): return False
if pconf.mem_per_proc > self.hw.mem_per_node: return False
if self.allocation == "force_nodes" and pconf.num_cores % self.hw.cores_per_node != 0:
return False
return self.condition(pconf)
def distribute(self, mpi_procs, omp_threads, mem_per_proc):
"""
Returns (num_nodes, mpi_per_node)
Aggressive: When Open MPI thinks that it is in an exactly- or under-subscribed mode
(i.e., the number of running processes is equal to or less than the number of available processors),
MPI processes will automatically run in aggressive mode, meaning that they will never voluntarily give
up the processor to other processes. With some network transports, this means that Open MPI will spin
in tight loops attempting to make message passing progress, effectively causing other processes to not get
any CPU cycles (and therefore never make any progress)
"""
class Distrib(namedtuple("Distrib", "num_nodes mpi_per_node exact")):
pass
#@property
#def mem_per_node
# return self.mpi_per_node * mem_per_proc
#def set_nodes(self, nodes):
hw = self.hw
# TODO: Add check on user-memory
if mem_per_proc <= 0:
logger.warning("mem_per_proc <= 0")
mem_per_proc = hw.mem_per_core
if mem_per_proc > hw.mem_per_node:
raise self.Error(
"mem_per_proc > mem_per_node.\n Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
# Try to use all then cores in the node.
num_nodes, rest_cores = hw.divmod_node(mpi_procs, omp_threads)
if num_nodes == 0 and mpi_procs * mem_per_proc <= hw.mem_per_node:
# One node is enough
return Distrib(num_nodes=1, mpi_per_node=mpi_procs, exact=True)
if num_nodes == 0: num_nodes = 2
mpi_per_node = mpi_procs // num_nodes
if mpi_per_node * mem_per_proc <= hw.mem_per_node and rest_cores == 0:
# Commensurate with nodes.
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=True)
#if mode == "block", "cyclic"
# Try first to pack MPI processors in a node as much as possible
mpi_per_node = int(hw.mem_per_node / mem_per_proc)
assert mpi_per_node != 0
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
print("exact --> false", num_nodes, mpi_per_node)
if mpi_per_node * omp_threads <= hw.cores_per_node and mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
if (mpi_procs * omp_threads) % mpi_per_node != 0:
# Have to reduce the number of MPI procs per node
for mpi_per_node in reversed(range(1, mpi_per_node)):
if mpi_per_node > hw.cores_per_node: continue
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
if (mpi_procs * omp_threads) % mpi_per_node == 0 and mpi_per_node * mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
else:
raise self.Error("Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
def optimize_params(self, qnodes=None):
"""
This method is called in get_subs_dict. Return a dict with parameters to be added to qparams
Subclasses may provide a specialized version.
"""
#logger.debug("optimize_params of baseclass --> no optimization available!!!")
return {}
def get_subs_dict(self, qnodes=None):
"""
Return substitution dict for replacements into the template
Subclasses may want to customize this method.
"""
#d = self.qparams.copy()
d = self.qparams
d.update(self.optimize_params(qnodes=qnodes))
# clean null values
subs_dict = {k: v for k, v in d.items() if v is not None}
#print("subs_dict:", subs_dict)
return subs_dict
def _make_qheader(self, job_name, qout_path, qerr_path):
"""Return a string with the options that are passed to the resource manager."""
# get substitution dict for replacements into the template
subs_dict = self.get_subs_dict()
# Set job_name and the names for the stderr and stdout of the
# queue manager (note the use of the extensions .qout and .qerr
# so that we can easily locate this file.
subs_dict['job_name'] = job_name.replace('/', '_')
subs_dict['_qout_path'] = qout_path
subs_dict['_qerr_path'] = qerr_path
qtemplate = QScriptTemplate(self.QTEMPLATE)
# might contain unused parameters as leftover $$.
unclean_template = qtemplate.safe_substitute(subs_dict)
# Remove lines with leftover $$.
clean_template = []
for line in unclean_template.split('\n'):
if '$$' not in line:
clean_template.append(line)
return '\n'.join(clean_template)
def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path,
stdin=None, stdout=None, stderr=None, exec_args=None):
"""
Returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
job_name: Name of the job.
launch_dir: (str) The directory the job will be launched in.
executable: String with the name of the executable to be executed or list of commands
qout_path Path of the Queue manager output file.
qerr_path: Path of the Queue manager error file.
exec_args: List of arguments passed to executable (used only if executable is a string, default: empty)
"""
# PbsPro does not accept job_names longer than 15 chars.
if len(job_name) > 14 and isinstance(self, PbsProAdapter):
job_name = job_name[:14]
# Construct the header for the Queue Manager.
qheader = self._make_qheader(job_name, qout_path, qerr_path)
# Add the bash section.
se = ScriptEditor()
# Cd to launch_dir immediately.
se.add_line("cd " + os.path.abspath(launch_dir))
if self.setup:
se.add_comment("Setup section")
se.add_lines(self.setup)
se.add_emptyline()
if self.modules:
# stderr is redirected to mods.err file.
# module load 2>> mods.err
se.add_comment("Load Modules")
se.add_line("module purge")
se.load_modules(self.modules)
se.add_emptyline()
se.add_comment("OpenMp Environment")
if self.has_omp:
se.declare_vars(self.omp_env)
se.add_emptyline()
else:
se.declare_vars({"OMP_NUM_THREADS": 1})
if self.shell_env:
se.add_comment("Shell Environment")
se.declare_vars(self.shell_env)
se.add_emptyline()
if self.pre_run:
se.add_comment("Commands before execution")
se.add_lines(self.pre_run)
se.add_emptyline()
# Construct the string to run the executable with MPI and mpi_procs.
if is_string(executable):
line = self.mpi_runner.string_to_run(self, executable,
stdin=stdin, stdout=stdout, stderr=stderr, exec_args=exec_args)
se.add_line(line)
else:
assert isinstance(executable, (list, tuple))
se.add_lines(executable)
if self.post_run:
se.add_emptyline()
se.add_comment("Commands after execution")
se.add_lines(self.post_run)
return qheader + se.get_script_str() + "\n"
def submit_to_queue(self, script_file):
"""
Public API: wraps the concrete implementation _submit_to_queue
Raises:
`self.MaxNumLaunchesError` if we have already tried to submit the job max_num_launches
`self.Error` if generic error
"""
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
if self.num_launches == self.max_num_launches:
raise self.MaxNumLaunchesError("num_launches %s == max_num_launches %s" % (self.num_launches, self.max_num_launches))
# Call the concrete implementation.
s = self._submit_to_queue(script_file)
self.record_launch(s.qid)
if s.qid is None:
raise self.Error("Error in job submission with %s. file %s \n" %
(self.__class__.__name__, script_file) +
"The error response reads:\n %s \n " % s.err +
"The out response reads:\n %s \n" % s.out)
# Here we create a concrete instance of QueueJob
return QueueJob.from_qtype_and_id(self.QTYPE, s.qid, self.qname), s.process
@abc.abstractmethod
def _submit_to_queue(self, script_file):
"""
Submits the job to the queue, probably using subprocess or shutil
This method must be provided by the concrete classes and will be called by submit_to_queue
Args:
script_file: (str) name of the script file to use (String)
Returns:
queue_id, process
"""
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
if username is None: username = getpass.getuser()
njobs, process = self._get_njobs_in_queue(username=username)
if process is not None and process.returncode != 0:
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue' +
'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if not isinstance(self, ShellAdapter):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
@abc.abstractmethod
def _get_njobs_in_queue(self, username):
"""
Concrete Subclasses must implement this method. Return (njobs, process)
"""
# Methods to fix problems
def add_exclude_nodes(self, nodes):
return _EXCL_NODES_FILE.add_nodes(self.qname, nodes)
def get_exclude_nodes(self):
return _EXCL_NODES_FILE.read_nodes(self.qname)
@abc.abstractmethod
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation. Return True if nodes have been excluded"""
def more_mem_per_proc(self, factor=1):
"""
Method to increase the amount of memory asked for, by factor.
Return: new memory if success, 0 if memory cannot be increased.
"""
base_increase = 2000
old_mem = self.mem_per_proc
new_mem = old_mem + factor*base_increase
if new_mem < self.hw.mem_per_node:
self.set_mem_per_proc(new_mem)
return new_mem
raise self.Error('could not increase mem_per_proc further')
def more_master_mem_overhead(self, mem_increase_mb=1000):
"""
Method to increase the amount of memory overheaded asked for the master node.
Return: new master memory overhead if success, 0 if it cannot be increased.
"""
old_master_mem_overhead = self.master_mem_overhead
new_master_mem_overhead = old_master_mem_overhead + mem_increase_mb
if new_master_mem_overhead + self.mem_per_proc < self.hw.mem_per_node:
self.set_master_mem_overhead(new_master_mem_overhead)
return new_master_mem_overhead
raise self.Error('could not increase master_mem_overhead further')
def more_cores(self, factor=1):
"""
Method to increase the number of MPI procs.
Return: new number of processors if success, 0 if processors cannot be increased.
"""
# TODO : find a formula that works for all max_cores
if self.max_cores > 40:
base_increase = 4 * int(self.max_cores / 40)
else:
base_increase = 4
new_cores = self.hint_cores + factor * base_increase
if new_cores < self.max_cores:
self.hint_cores = new_cores
return new_cores
raise self.Error('%s hint_cores reached limit on max_core %s' % (new_cores, self.max_cores))
def more_time(self, factor=1):
"""
Method to increase the wall time
"""
base_increase = int(self.timelimit_hard / 10)
new_time = self.timelimit + base_increase*factor
print('qadapter: trying to increase time')
if new_time < self.timelimit_hard:
self.set_timelimit(new_time)
print('new time set: ', new_time)
return new_time
self.priority = -1
raise self.Error("increasing time is not possible, the hard limit has been reached")
####################
# Concrete classes #
####################
class ShellAdapter(QueueAdapter):
"""Simple Adapter used to submit runs through the shell."""
QTYPE = "shell"
QTEMPLATE = """\
#!/bin/bash
$${qverbatim}
"""
def cancel(self, job_id):
return os.system("kill -9 %d" % job_id)
def _submit_to_queue(self, script_file):
# submit the job, return process and pid.
process = Popen(("/bin/bash", script_file), stderr=PIPE)
return SubmitResults(qid=process.pid, out='no out in shell submission', err='no err in shell submission', process=process)
def _get_njobs_in_queue(self, username):
return None, None
def exclude_nodes(self, nodes):
return False
class SlurmAdapter(QueueAdapter):
"""Adapter for SLURM."""
QTYPE = "slurm"
QTEMPLATE = """\
#!/bin/bash
#SBATCH --partition=$${partition}
#SBATCH --job-name=$${job_name}
#SBATCH --nodes=$${nodes}
#SBATCH --total_tasks=$${total_tasks}
#SBATCH --ntasks=$${ntasks}
#SBATCH --ntasks-per-node=$${ntasks_per_node}
#SBATCH --cpus-per-task=$${cpus_per_task}
#####SBATCH --mem=$${mem}
#SBATCH --mem-per-cpu=$${mem_per_cpu}
#SBATCH --hint=$${hint}
#SBATCH --time=$${time}
#SBATCH --exclude=$${exclude_nodes}
#SBATCH --account=$${account}
#SBATCH --mail-user=$${mail_user}
#SBATCH --mail-type=$${mail_type}
#SBATCH --constraint=$${constraint}
#SBATCH --gres=$${gres}
#SBATCH --requeue=$${requeue}
#SBATCH --nodelist=$${nodelist}
#SBATCH --propagate=$${propagate}
#SBATCH --licenses=$${licenses}
#SBATCH --output=$${_qout_path}
#SBATCH --error=$${_qerr_path}
#SBATCH --qos=$${qos}
$${qverbatim}
"""
def set_qname(self, qname):
super(SlurmAdapter, self).set_qname(qname)
if qname:
self.qparams["partition"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SlurmAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ntasks"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SlurmAdapter, self).set_omp_threads(omp_threads)
self.qparams["cpus_per_task"] = omp_threads
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SlurmAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_cpu"] = self.mem_per_proc
# Remove mem if it's defined.
#self.qparams.pop("mem", None)
def set_timelimit(self, timelimit):
super(SlurmAdapter, self).set_timelimit(timelimit)
self.qparams["time"] = qu.time2slurm(timelimit)
def cancel(self, job_id):
return os.system("scancel %d" % job_id)
def optimize_params(self, qnodes=None):
params = {}
if self.allocation == "nodes":
# run on the smallest number of nodes compatible with the configuration
params["nodes"] = max(int(math.ceil(self.mpi_procs / self.hw.cores_per_node)),
int(math.ceil(self.total_mem / self.hw.mem_per_node)))
return params
#dist = self.distribute(self.mpi_procs, self.omp_threads, self.mem_per_proc)
##print(dist)
#if False and dist.exact:
# # Can optimize parameters
# self.qparams["nodes"] = dist.num_nodes
# self.qparams.pop("ntasks", None)
# self.qparams["ntasks_per_node"] = dist.mpi_per_node
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem"] = dist.mpi_per_node * self.mem_per_proc
# self.qparams.pop("mem_per_cpu", None)
#else:
# # Delegate to slurm.
# self.qparams["ntasks"] = self.mpi_procs
# self.qparams.pop("nodes", None)
# self.qparams.pop("ntasks_per_node", None)
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem_per_cpu"] = self.mem_per_proc
# self.qparams.pop("mem", None)
#return {}
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['sbatch', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['sbatch', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
# grab the returncode. SLURM returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split()[3])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
logger.critical('Could not parse job id following slurm...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
try:
if 'exclude_nodes' not in self.qparams:
self.qparams.update({'exclude_nodes': 'node' + nodes[0]})
print('excluded node %s' % nodes[0])
for node in nodes[1:]:
self.qparams['exclude_nodes'] += ',node' + node
print('excluded node %s' % node)
return True
except (KeyError, IndexError):
raise self.Error('qadapter failed to exclude nodes')
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['squeue', '-o "%u"', '-u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['squeue', '-o "%u"', '-u', username], stdout=PIPE, stderr=PIPE,
universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result. lines should have this form:
# username
# count lines that include the username in it
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class PbsProAdapter(QueueAdapter):
"""Adapter for PbsPro"""
QTYPE = "pbspro"
#PBS -l select=$${select}:ncpus=$${ncpus}:mem=$${mem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
#PBS -l select=$${select}:ncpus=1:mem=$${mem}mb:mpiprocs=1:ompthreads=$${ompthreads}
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l select=$${select}
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(PbsProAdapter, self).set_qname(qname)
if qname:
self.qparams["queue"] = qname
def set_timelimit(self, timelimit):
super(PbsProAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(PbsProAdapter, self).set_mem_per_proc(mem_mb)
#self.qparams["mem"] = self.mem_per_proc
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def optimize_params(self, qnodes=None):
return {"select": self.get_select(qnodes=qnodes)}
def get_select(self, ret_dict=False, qnodes=None, memory_policy=None):
"""
Select is not the most intuitive command. For more info see:
* http://www.cardiff.ac.uk/arcca/services/equipment/User-Guide/pbs.html
* https://portal.ivec.org/docs/Supercomputers/PBS_Pro
"""
hw, mem_per_proc = self.hw, int(self.mem_per_proc)
#dist = self.distribute(self.mpi_procs, self.omp_threads, mem_per_proc)
"""
if self.pure_mpi:
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
if num_nodes == 0:
logger.info("IN_CORE PURE MPI: %s" % self.run_info)
chunks = 1
ncpus = rest_cores
mpiprocs = rest_cores
mem = mem_per_proc * ncpus
ompthreads = 1
elif rest_cores == 0:
# Can allocate entire nodes because self.mpi_procs is divisible by cores_per_node.
logger.info("PURE MPI run commensurate with cores_per_node %s" % self.run_info)
chunks = num_nodes
ncpus = hw.cores_per_node
mpiprocs = hw.cores_per_node
mem = ncpus * mem_per_proc
ompthreads = 1
else:
logger.info("OUT-OF-CORE PURE MPI (not commensurate with cores_per_node): %s" % self.run_info)
chunks = self.mpi_procs
ncpus = 1
mpiprocs = 1
mem = mem_per_proc
ompthreads = 1
elif self.pure_omp:
# Pure OMP run.
logger.info("PURE OPENMP run: %s" % self.run_info)
assert hw.can_use_omp_threads(self.omp_threads)
chunks = 1
ncpus = self.omp_threads
mpiprocs = 1
mem = mem_per_proc
ompthreads = self.omp_threads
elif self.hybrid_mpi_omp:
assert hw.can_use_omp_threads(self.omp_threads)
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
#print(num_nodes, rest_cores)
# TODO: test this
if rest_cores == 0 or num_nodes == 0:
logger.info("HYBRID MPI-OPENMP run, perfectly divisible among nodes: %s" % self.run_info)
chunks = max(num_nodes, 1)
mpiprocs = self.mpi_procs // chunks
chunks = chunks
ncpus = mpiprocs * self.omp_threads
mpiprocs = mpiprocs
mem = mpiprocs * mem_per_proc
ompthreads = self.omp_threads
else:
logger.info("HYBRID MPI-OPENMP, NOT commensurate with nodes: %s" % self.run_info)
chunks=self.mpi_procs
ncpus=self.omp_threads
mpiprocs=1
mem= mem_per_proc
ompthreads=self.omp_threads
else:
raise RuntimeError("You should not be here")
"""
if memory_policy is None:
memory_policy = self.memory_policy
if qnodes is None:
qnodes = self.qnodes
else:
if qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if qnodes == "standard":
return self._get_select_standard(ret_dict=ret_dict, memory_policy=memory_policy)
else:
return self._get_select_with_master_mem_overhead(ret_dict=ret_dict, qnodes=qnodes,
memory_policy=memory_policy)
def _get_select_with_master_mem_overhead(self, ret_dict=False, qnodes=None, memory_policy='mem'):
if self.has_omp:
raise NotImplementedError("select with master mem overhead not yet implemented with has_omp")
if qnodes is None:
qnodes = self.qnodes
else:
if qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if qnodes == "exclusive":
return self._get_select_with_master_mem_overhead_exclusive(ret_dict=ret_dict, memory_policy=memory_policy)
elif qnodes == "shared":
return self._get_select_with_master_mem_overhead_shared(ret_dict=ret_dict, memory_policy=memory_policy)
else:
raise ValueError("Wrong value of qnodes parameter : {}".format(self.qnodes))
def _get_select_with_master_mem_overhead_shared(self, ret_dict=False, memory_policy='mem'):
chunk_master, ncpus_master, vmem_master, mpiprocs_master = 1, 1, self.mem_per_proc+self.master_mem_overhead, 1
if self.mpi_procs > 1:
chunks_slaves, ncpus_slaves, vmem_slaves, mpiprocs_slaves = self.mpi_procs - 1, 1, self.mem_per_proc, 1
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master),
chunks_slaves=chunks_slaves, ncpus_slaves=ncpus_slaves,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(vmem_slaves))
if memory_policy == 'vmem':
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_slaves}:vmem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
elif memory_policy == 'mem':
s = "{chunk_master}:ncpus={ncpus_master}:mem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_slaves}:mem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master + chunks_slaves*ncpus_slaves
if tot_ncpus != self.mpi_procs:
raise ValueError('Total number of cpus is different from mpi_procs ...')
else:
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master))
if memory_policy == 'vmem':
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:" \
"mpiprocs={mpiprocs_master}".format(**select_params)
elif memory_policy == 'mem':
s = "{chunk_master}:ncpus={ncpus_master}:mem={vmem_master}mb:" \
"mpiprocs={mpiprocs_master}".format(**select_params)
if ret_dict:
return s, select_params
return s
def _get_select_with_master_mem_overhead_exclusive(self, ret_dict=False, memory_policy='mem'):
max_ncpus_master = min(self.hw.cores_per_node,
int((self.hw.mem_per_node-self.mem_per_proc-self.master_mem_overhead)
/ self.mem_per_proc) + 1)
if max_ncpus_master >= self.mpi_procs:
chunk, ncpus, mem, mpiprocs = 1, self.mpi_procs, self.hw.mem_per_node, self.mpi_procs
if memory_policy == 'vmem':
select_params = AttrDict(chunks=chunk, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(mem))
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunks=chunk, ncpus=ncpus, mpiprocs=mpiprocs, mem=int(mem))
s = "{chunks}:ncpus={ncpus}:mem={mem}mb:mpiprocs={mpiprocs}".format(**select_params)
tot_ncpus = chunk*ncpus
else:
ncpus_left = self.mpi_procs-max_ncpus_master
max_ncpus_per_slave_node = min(self.hw.cores_per_node, int(self.hw.mem_per_node/self.mem_per_proc))
nslaves_float = float(ncpus_left)/float(max_ncpus_per_slave_node)
ncpus_per_slave = max_ncpus_per_slave_node
mpiprocs_slaves = max_ncpus_per_slave_node
chunk_master = 1
mem_slaves = self.hw.mem_per_node
explicit_last_slave = False
chunk_last_slave, ncpus_last_slave, mem_last_slave, mpiprocs_last_slave = None, None, None, None
if nslaves_float > int(nslaves_float):
chunks_slaves = int(nslaves_float) + 1
pot_ncpus_all_slaves = chunks_slaves*ncpus_per_slave
if pot_ncpus_all_slaves >= self.mpi_procs-1:
explicit_last_slave = True
chunks_slaves = chunks_slaves-1
chunk_last_slave = 1
ncpus_master = 1
ncpus_last_slave = self.mpi_procs - 1 - chunks_slaves*ncpus_per_slave
mem_last_slave = self.hw.mem_per_node
mpiprocs_last_slave = ncpus_last_slave
else:
ncpus_master = self.mpi_procs-pot_ncpus_all_slaves
if ncpus_master > max_ncpus_master:
raise ValueError('ncpus for the master node exceeds the maximum ncpus for the master ... this'
'should not happen ...')
if ncpus_master < 1:
raise ValueError('ncpus for the master node is 0 ... this should not happen ...')
elif nslaves_float == int(nslaves_float):
chunks_slaves = int(nslaves_float)
ncpus_master = max_ncpus_master
else:
raise ValueError('nslaves_float < int(nslaves_float) ...')
mem_master, mpiprocs_master = self.hw.mem_per_node, ncpus_master
if explicit_last_slave:
if memory_policy == 'vmem':
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(mem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(mem_slaves),
chunk_last_slave=chunk_last_slave, ncpus_last_slave=ncpus_last_slave,
vmem_last_slave=int(mem_last_slave),
mpiprocs_last_slave=mpiprocs_last_slave)
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:vmem={vmem_slaves}mb:mpiprocs={mpiprocs_slaves}+" \
"{chunk_last_slave}:ncpus={ncpus_last_slave}:vmem={vmem_last_slave}mb:" \
"mpiprocs={mpiprocs_last_slave}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, mem_master=int(mem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, mem_slaves=int(mem_slaves),
chunk_last_slave=chunk_last_slave, ncpus_last_slave=ncpus_last_slave,
mem_last_slave=int(mem_last_slave),
mpiprocs_last_slave=mpiprocs_last_slave)
s = "{chunk_master}:ncpus={ncpus_master}:mem={mem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:mem={mem_slaves}mb:mpiprocs={mpiprocs_slaves}+" \
"{chunk_last_slave}:ncpus={ncpus_last_slave}:mem={mem_last_slave}mb:" \
"mpiprocs={mpiprocs_last_slave}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master+chunks_slaves*ncpus_per_slave+chunk_last_slave*ncpus_last_slave
else:
if memory_policy == 'vmem':
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(mem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(mem_slaves))
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:vmem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, mem_master=int(mem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, mem_slaves=int(mem_slaves))
s = "{chunk_master}:ncpus={ncpus_master}:mem={mem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:mem={mem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master + chunks_slaves*ncpus_per_slave
if tot_ncpus != self.mpi_procs:
raise ValueError('Total number of cpus is different from mpi_procs ...')
if ret_dict:
return s, select_params
return s
def _get_select_standard(self, ret_dict=False, memory_policy='mem'):
if not self.has_omp:
chunks, ncpus, mem, mpiprocs = self.mpi_procs, 1, self.mem_per_proc, 1
if memory_policy == 'vmem':
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(mem))
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, mem=int(mem))
s = "{chunks}:ncpus={ncpus}:mem={mem}mb:mpiprocs={mpiprocs}".format(**select_params)
else:
chunks, ncpus, mem, mpiprocs, ompthreads = self.mpi_procs, self.omp_threads, self.mem_per_proc, 1, self.omp_threads
if memory_policy == 'vmem':
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(mem),
ompthreads=ompthreads)
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}:ompthreads={ompthreads}".format(**select_params)
elif memory_policy == 'mem':
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, mem=int(mem),
ompthreads=ompthreads)
s = "{chunks}:ncpus={ncpus}:mem={mem}mb:mpiprocs={mpiprocs}:ompthreads={ompthreads}".format(
**select_params)
if ret_dict:
return s, select_params
return s
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
# grab the return code. PBS returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split('.')[0])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['qstat', '-a', '-u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['qstat', '-a', '-u', username], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form
# '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09'
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.split('\n')
njobs = len([line.split() for line in outs if username in line])
return njobs, process
def exclude_nodes(self, nodes):
return False
class TorqueAdapter(PbsProAdapter):
"""Adapter for Torque."""
QTYPE = "torque"
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
####PBS -l mppwidth=$${mppwidth}
#PBS -l nodes=$${nodes}:ppn=$${ppn}
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
# Submission environment
#PBS -V
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
QueueAdapter.set_mem_per_proc(self, mem_mb)
#self.qparams["mem"] = self.mem_per_proc
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
QueueAdapter.set_mpi_procs(self, mpi_procs)
num_nodes, rest_cores = self.hw.divmod_node(mpi_procs, omp_threads=1)
if num_nodes == 0:
self.qparams["nodes"] = 1
self.qparams["ppn"] = mpi_procs
else:
if rest_cores != 0:
# Pack cores as much as possible.
num_nodes += 1
self.qparams["nodes"] = num_nodes
self.qparams["ppn"] = self.hw.cores_per_node
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in torque')
class SGEAdapter(QueueAdapter):
"""
Adapter for Sun Grid Engine (SGE) task submission software.
See also:
* https://www.wiki.ed.ac.uk/display/EaStCHEMresearchwiki/How+to+write+a+SGE+job+submission+script
* http://www.uibk.ac.at/zid/systeme/hpc-systeme/common/tutorials/sge-howto.html
"""
QTYPE = "sge"
QTEMPLATE = """\
#!/bin/bash
#$ -account_name $${account_name}
#$ -N $${job_name}
#$ -q $${queue_name}
#$ -pe $${parallel_environment} $${ncpus}
#$ -l h_rt=$${walltime}
# request a per slot memory limit of size bytes.
##$ -l h_vmem=$${mem_per_slot}
##$ -l mf=$${mem_per_slot}
###$ -j no
#$ -M $${mail_user}
#$ -m $${mail_type}
# Submission environment
##$ -S /bin/bash
###$ -cwd # Change to current working directory
###$ -V # Export environment variables into script
#$ -e $${_qerr_path}
#$ -o $${_qout_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SGEAdapter, self).set_qname(qname)
if qname:
self.qparams["queue_name"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SGEAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ncpus"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SGEAdapter, self).set_omp_threads(omp_threads)
logger.warning("Cannot use omp_threads with SGE")
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SGEAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_slot"] = str(int(self.mem_per_proc)) + "M"
def set_timelimit(self, timelimit):
super(SGEAdapter, self).set_timelimit(timelimit)
# Same convention as pbspro e.g. [hours:minutes:]seconds
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
# grab the returncode. SGE returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form
# Your job 1659048 ("NAME_OF_JOB") has been submitted
queue_id = int(out.split(' ')[2])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation"""
raise self.Error('qadapter failed to exclude nodes, not implemented yet in sge')
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['qstat', '-u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['qstat', '-u', username], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should contain username
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class MOABAdapter(QueueAdapter):
"""Adapter for MOAB. See https://computing.llnl.gov/tutorials/moab/"""
QTYPE = "moab"
QTEMPLATE = """\
#!/bin/bash
#MSUB -a $${eligible_date}
#MSUB -A $${account}
#MSUB -c $${checkpoint_interval}
#MSUB -l feature=$${feature}
#MSUB -l gres=$${gres}
#MSUB -l nodes=$${nodes}
#MSUB -l partition=$${partition}
#MSUB -l procs=$${procs}
#MSUB -l ttc=$${ttc}
#MSUB -l walltime=$${walltime}
#MSUB -l $${resources}
#MSUB -p $${priority}
#MSUB -q $${queue}
#MSUB -S $${shell}
#MSUB -N $${job_name}
#MSUB -v $${variable_list}
#MSUB -o $${_qout_path}
#MSUB -e $${_qerr_path}
$${qverbatim}
"""
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(MOABAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["procs"] = mpi_procs
def set_timelimit(self, timelimit):
super(MOABAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2slurm(timelimit)
def set_mem_per_proc(self, mem_mb):
super(MOABAdapter, self).set_mem_per_proc(mem_mb)
#TODO
#raise NotImplementedError("set_mem_per_cpu")
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in moad')
def cancel(self, job_id):
return os.system("canceljob %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['msub', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['msub', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
queue_id = None
if process.returncode == 0:
# grab the returncode. MOAB returns 0 if the job was successful
try:
# output should be the queue_id
queue_id = int(out.split()[0])
except:
# probably error parsing job code
logger.critical('Could not parse job id following msub...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['showq', '-s -u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['showq', '-s -u', username], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form:
##
## active jobs: N eligible jobs: M blocked jobs: P
##
## Total job: 1
##
# Split the output string and return the last element.
out = out.splitlines()[-1]
njobs = int(out.split()[-1])
return njobs, process
class BlueGeneAdapter(QueueAdapter):
"""
Adapter for LoadLever on BlueGene architectures.
See:
http://www.prace-ri.eu/best-practice-guide-blue-gene-q-html/#id-1.5.4.8
https://www.lrz.de/services/compute/supermuc/loadleveler/
"""
QTYPE = "bluegene"
QTEMPLATE = """\
#!/bin/bash
# @ job_name = $${job_name}
# @ class = $${class}
# @ error = $${_qout_path}
# @ output = $${_qerr_path}
# @ wall_clock_limit = $${wall_clock_limit}
# @ notification = $${notification}
# @ notify_user = $${mail_user}
# @ environment = $${environment}
# @ account_no = $${account_no}
# @ job_type = bluegene
# @ bg_connectivity = $${bg_connectivity}
# @ bg_size = $${bg_size}
$${qverbatim}
# @ queue
"""
def set_qname(self, qname):
super(BlueGeneAdapter, self).set_qname(qname)
if qname:
self.qparams["class"] = qname
#def set_mpi_procs(self, mpi_procs):
# """Set the number of CPUs used for MPI."""
# super(BlueGeneAdapter, self).set_mpi_procs(mpi_procs)
# #self.qparams["ntasks"] = mpi_procs
#def set_omp_threads(self, omp_threads):
# super(BlueGeneAdapter, self).set_omp_threads(omp_threads)
# #self.qparams["cpus_per_task"] = omp_threads
#def set_mem_per_proc(self, mem_mb):
# """Set the memory per process in megabytes"""
# super(BlueGeneAdapter, self).set_mem_per_proc(mem_mb)
# #self.qparams["mem_per_cpu"] = self.mem_per_proc
def set_timelimit(self, timelimit):
"""Limits are specified with the format hh:mm:ss (hours:minutes:seconds)"""
super(BlueGeneAdapter, self).set_timelimit(timelimit)
self.qparams["wall_clock_limit"] = qu.time2loadlever(timelimit)
def cancel(self, job_id):
return os.system("llcancel %d" % job_id)
def bgsize_rankspernode(self):
"""Return (bg_size, ranks_per_node) from mpi_procs and omp_threads."""
bg_size = int(math.ceil((self.mpi_procs * self.omp_threads)/ self.hw.cores_per_node))
bg_size = max(bg_size, 32) # TODO hardcoded
ranks_per_node = int(math.ceil(self.mpi_procs / bg_size))
return bg_size, ranks_per_node
def optimize_params(self, qnodes=None):
params = {}
bg_size, rpn = self.bgsize_rankspernode()
print("in optimize params")
print("mpi_procs:", self.mpi_procs, "omp_threads:",self.omp_threads)
print("bg_size:",bg_size,"ranks_per_node",rpn)
return {"bg_size": bg_size}
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
if sys.version_info[0] < 3:
process = Popen(['llsubmit', script_file], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['llsubmit', script_file], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
# grab the return code. llsubmit returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# on JUQUEEN, output should of the form
#llsubmit: The job "juqueen1c1.zam.kfa-juelich.de.281506" has been submitted.
token = out.split()[3]
s = token.split(".")[-1].replace('"', "")
queue_id = int(s)
except:
# probably error parsing job code
logger.critical("Could not parse job id following llsubmit...")
raise
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
if sys.version_info[0] < 3:
process = Popen(['llq', '-u', username], stdout=PIPE, stderr=PIPE)
else:
# need string not bytes so must use universal_newlines
process = Popen(['llq', '-u', username], stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result. lines should have this form:
#
# Id Owner Submitted ST PRI Class Running On
# ------------------------ ---------- ----------- -- --- ------------ -----------
# juqueen1c1.281508.0 paj15530 1/23 13:20 I 50 n001
# 1 job step(s) in query, 1 waiting, 0 pending, 0 running, 0 held, 0 preempted
#
# count lines that include the username in it
outs = out.split('\n')
njobs = len([line.split() for line in outs if username in line])
return njobs, process
def exclude_nodes(self, nodes):
return False
| montoyjh/pymatgen | pymatgen/io/abinit/qadapters.py | Python | mit | 87,473 | [
"ABINIT",
"pymatgen"
] | 2456e6e5eef1bf0e211c2a552bc00907392e0e0559e2b07baef02e431fd95ffd |
###
# Utilities used in RunMC.py for its Monte Carlo algorithm.
# -Tristan Truttmann (tristan.truttmann@gmail.com)
###
# Note: Possible future improvement is to record density matrices for faster convergence.
# The followign ~20 lines define imporant files and directories to write to and get from.
# It also extends the path for dependencies:
# These are python libraries that are needed:
import copy
import os
import multiprocessing
import numpy as np
import shutil
# If the user does not have statsmodels, then they must manually attach the adolecule
# to the adsorbent in the file InputGeom.xyz:
try: import statsmodels.api as sm
except ImportError: pass
import sys
import time
# These are paths that are needed:
# <CommandHome> is the path to where qsub or ./RunMC.py was called:
# If we are running in PBS, use the PBS working directory environment variable for <CommandHome>:
if os.getenv('PBS_O_WORKDIR') != None:
CommandHome = os.getenv('PBS_O_WORKDIR')
# Otherwise, just use the directory where the script was called:
else:
CommandHome = os.getcwd()
# Then append paths that contain dependencies:
sys.path.append(CommandHome)
#====================================================================================
def bias(config, history, MetaHeight, MetaWidths):
# Funciton that generates a bias based on user prefrences and sampling history.
#====================================================================================
# Note: Some day I might make the Gaussians spill over to the next unit cell, or the next rotational
# element on a sphere. I might also make the guassians have a equal span in linear distance
# on the surface of a sphere.
try:
present = config.FloppyCoord()
BiasEnergy = np.sum((MetaHeight * np.exp(-1/2*(present-history)**2/MetaWidths**2)))
return BiasEnergy
except:
raise
#====================================================================================
class PTableClass:
# Class that contains two methods:
## <anum(sym)> Accepts atomic symbol (string) and returns atomic number (int).
## <symbol(anum)> Accepts atomic number (int) and returns atomic symbol (string).
#====================================================================================
def __init__(self):
# A list object that stores atomic symbols.
self.symbols = [
"H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar",
"K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br",
"Kr","Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb",
"Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho",
"Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl","Pb","Bi",
"Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es",
"Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Cn","Uut","Fl","Uup",
"Lv","Uus","Uuo"]
def anum(self,sym):
try:
class NoElement(Exception): pass
for i in range(len(self.symbols)):
if self.symbols[i] == sym:
return i + 1
raise NoElement
except NoElement:
sys.stderr.write("There is no element with symbol " + str(sym) + ".\n")
raise
def symbol(self,anum):
try:
class NoElement(Exception): pass
return self.symbols[anum - 1]
except IndexError:
sys.stderr.write("There is no element with atomic number " + str(anum) + ".\n")
raise
# Then we create an instance to be imported.
ptable = PTableClass()
# The next ~5 lines define exceptions that are used:
# Raised when trying to read from an incompatible xyz format:
class FileFormatError(Exception): pass
# Raised when dimenstions of a ConfigClasses anum and coord arrays to not align:
class SizeInconsistency(Exception): pass
# Raised when there is a linear angle that is interfering with angle calculations:
class LinearAngleError(Exception): pass
# Raised when SIESTA returns an error:
class SiestaError(Exception): pass
#====================================================================================
def angle(v1,v2,PosN = None):
# This funciton returns the angle between two 3-vectors. It returns degrees.
# If PosN is specified, If the cross between the two vectors is more-or-less in the
# opposite direction as <PosN>, then the returned angle is negative.
#====================================================================================
try:
if (v1.size != 3) or (v2.size != 3):
raise ValueError
TheCos = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))
# Occasionally, TheSin will be slighter greater than 1 due to machine error.
# I added a quick condition to fix this:
if TheCos > 1:
radians = 0.
else:
radians = np.arccos(TheCos)
degrees = np.rad2deg(radians)
# However if a normal vector <PosN> is supplied, the angle maybe be negative
# according to the right hand rule.
if not np.array_equal(PosN, None):
if np.dot( np.cross(v1,v2) ,PosN) < 0:
degrees = -degrees
elif (np.dot( np.cross(v1,v2), PosN) == 0) and (np.linalg.norm(np.cross(v1,v2)) > 0):
raise LinearAngleError
return degrees
except ValueError:
sys.stderr.write("The vectors must be 3 dimensions for angle to work.")
raise
except LinearAngleError:
sys.stderr.write("You chose a refrence normal vector that is perpendicular to the actual cross vector. You surely made a mistake." )
raise
#====================================================================================
class ConfigClass:
# Inspired from Dr. Gopinath Subramanian's ConfigClass in molpro-neb/ClassDefinition.py,
# this class contains informaiton of atomic positions and identities, as well as
# characteristics such as energy and the centroid, as well as a few other methods that
# make life a lot easier during this Monte Carlo simulation.
# Attributes:
# anum: A numpy array of integers storing the atomic number of each atom.
#
# coord: A nx3 numpy array of loats storing the x, y, and z positions of each atom.
#
# E: Storing the energy of the configuration in eV. Default 0.0
#
# AdmoleculeSize: An integer storing the number of atoms in the admolecule.
#
# LatticeMatrix: A 3x3 array storing the 3 lattice vectors of the whole system.
# Default None.
#
# LatticeGrid: A 3-element integer numpy aray that stores the number of repeating
# unites of the adsorbent inside on repeating unit of the entire system.
# Methods:
# __init__(): Can be initializd with (1) zero arguments to create empty configuration,
# (2) with a string of an xyz file, or (3) with another ConfigClass instance
# which it will copy.
#
# centroid(): Returns the centroid of the system as a 3-element numpy array.
#
# adsorbent(): Returns another ConfigClass that only contains the atoms of the
# adsorbent. Only works if AdmoleculeSize is properly defined.
#
# admolecule(): Returns another ConfigClass that only contains the atoms of the
# ad-molecule. Only works if AdmoleculeSize is properly defined.
#
# verify(): Checks to ensure that the sizes of anum and coord are consistent.
# Raises an error if they are inconsisstent.
#
# WriteGeom(): Appends the geometry to an xyz file. Like all write meathods,
# the first arguement is name of xyz file and the second (optional)
# argument is a lock for writing to files.
#
# StartWriteEnergy(): Creates a python file to write the energy to and writes the
# Important information at the beginning of the file
#
# WriteEnergy(): Appends energy information to the specied file in a python-
# readable format.
#
# StartWriteConfig(): Creates a python file to write configuration information to.
#
# WriteConfig(): Appends configuration information to the specified file in a
# python-readable format. The goal is to be able to use this
# file to regenerate the ConfigClass object if necessary.
#
# StartWrite(): Runs StartWriteEnergy() and StartWriteConfig(). First argument
# is the directory that the files will be written to, and the
# second (optional) argument is a lock for writing to files.
# It will generte files names named EnergyEnsemble.py and ConfigEnsemble.py
# and will overwrite existing files.
#
# Write(): Runs WriteGeom(), WriteEnergy(), and WriteConfig(). First
# argument is the directory that the files will be written to,
# and the second (optional) argument is a lock for writing to
# files. Will append to files named GeomEnsemble.xyz,
# EnergyEnsemble.py, and ConfigEnsemble.py.
#
# FloppyCoord(): Returns the centroid of the admolecule as well as the euler
# angles defining the orientation of the admolecule with repsect
# to the global xyz coordinate system. See the code for
# how the angles are determined.
#
#====================================================================================
def __init__(self,father=None):
# If there is no argument, an empty default ConfigClass is created:
if father is None:
self.anum = np.zeros(0 , dtype = int)
self.coord = np.zeros((0,3), dtype=float)
self.E = 0.0
self.AdmoleculeSize = None
self.LatticeMatrix = None
self.SubLatticeGrid = None
# If the argument is a string, then it attempts to read it as an xyz file:
elif type(father) is str:
try:
# First some defaults are set that are not available from the xyz file:
self.E = 0.0
self.AdmoleculeSize = None
self.LatticeMatrix = None
self.SubLatticeGrid = None
# It opens the file and reads the number of atoms from line 1:
file = open(father,'r')
natom = int(file.readline())
# It creates empty arrays for soon-to-come structural information:
self.anum = np.zeros(natom , dtype = int)
self.coord = np.empty((natom,3),dtype=float)
# Then it skips past the title line:
file.readline()
# Then it steps through every atom in the file
# (accodring to the first line):
for iatom in range(natom):
# I parses the line and converts symbols to atomic numbers in
# the anum array:
atom = file.readline().split()
self.anum[iatom] = ptable.anum(atom[0])
# Then it steps through every coordinate in the line and records
# them in the coord array:
for icoord in range(3):
self.coord[iatom][icoord] = float(atom[icoord + 1])
# Then it check to make sure there is no other information in the file
# to check for errors. It raises an error if necessary:
nextline = file.readline().replace(' ','').replace('\t','').replace('\n','')
if nextline != '':
raise FileFormatError
file.close()
# It handels three likely erros with a short error message and raises
# the error again:
except(ValueError,IndexError,FileFormatError):
sys.stderr.write('The file ' + father + ' either has an unsupported format or has several structures. Delete extra structures or fix formating error.\n')
raise
# If there is an argument, but it is not a string, it will assume that
# it is another ConfigClass instance:
else:
try:
# It first runs the verify method, which will raise an error if it
# finds father to be invalid:
father.verify()
# Then it creates a deep copy and assigns self's attributes to the copy's:
CopyConfig = copy.deepcopy(father)
self.anum = CopyConfig.anum
self.coord = CopyConfig.coord
self.E = CopyConfig.E
self.AdmoleculeSize = CopyConfig.AdmoleculeSize
self.LatticeMatrix = CopyConfig.LatticeMatrix
self.SubLatticeGrid = CopyConfig.SubLatticeGrid
# Then it handles the error by writing to standard error and raising again:
except (AttributeError,SizeInconsistency):
sys.stderr.write( "You have tried to initialize a ConfigClass with some incompatiblae Object. Perhaps you used Dr. Subramanian's ConfigClass in molpro-neb/ClassDefinition.py. Consider using the one from MC_Util.py." )
raise
self.verify()
# A method to retreive lattice information from a fdf file:
def QueryLattice(self,FileName):
SuccessMessage = 'The lattice information was successfully retreived.\n'
# If no lattice information is found, LatticeMatrix defaults to None.
self.LatticeMatrix = None
# Then it opens the file and reads through until it finds lattice
# information keywords:
file = open(FileName,'r')
while True:
line = file.readline()
if line == '':
print 'The lattice information could not be found. LatticeMatrix = None\n'
break
# If the fdf file provides lattice vectors, getting the vectors is quite easy:
if '%block latticevectors' in line.lower():
# First it creates the matrix of zeros:
self.LatticeMatrix = np.zeros((3,3))
# Then each vector is read:
for ivec in range(3):
# Each vector is split into a string list:
StringVec = file.readline().split()
for idim in range(3):
# Each element is interpreted as a float and assigned to matrix:
self.LatticeMatrix[idim,ivec] = float(StringVec[idim])
# Then it raises an error if the end of the block is not found:
if '%endblock latticevectors' not in file.readline().lower():
raise FileFormatError
print SuccessMessage
break
# If the fdf file provides lattice parameters (crystalography format),
# then getting the lattice vectors is a little bit more involved:
if '%block latticeparameters' in line.lower():
line = file.readline().split()
# First it parses the line and retrives lattice parameters:
a = float(line[0])
b = float(line[1])
c = float(line[2])
RadAlpha = float(line[3]) * np.pi / 180
RadBeta = float(line[4]) * np.pi / 180
RadGamma = float(line[5]) * np.pi / 180
# Then it raises an error if the end of the block is not found:
if '%endblock latticeparameters' not in file.readline().lower():
raise FileFormatError
# Then it find each element of the matrix with given info:
x1 = a
y1 = 0
z1 = 0
x2 = b * np.cos(RadGamma)
y2 = b * np.sin(RadGamma)
z2 = 0
x3 = c * np.cos(RadBeta)
y3 = (b * c * np.cos(RadAlpha) - x2*x3) / y2
z3 = np.sqrt(c**2 - x3**2 - y3**2)
# Then it assigns the information to the LatticeMatrix:
self.LatticeMatrix = np.array([(x1,x2,x3),(y1,y2,y3),(z1,z2,z3)])
print SuccessMessage
break
return
# The default representation prints the number of atoms:
def __repr__(self):
return "ConfigClass Object of " + str(len(self.anum)) + " atoms."
# When converted to a string, it lists the atomic numbers of all atoms:
def __str__(self):
return self.anum.__str__()
# Tool to return cetnroid of system. Adapted from Dr. Gopinath Subramanian's
# molpro-neb routines:
def centroid(self):
return np.average(self.coord,axis=0)
# Returns a new ConfigClass instance that represents the adsorbent. Only works if
# AdmoleculeSize is properly defined:
def adsorbent(self):
# First it makes a copy of self and declares a new ConfigClass:
TotalSystem = copy.deepcopy(self)
NewConfig = ConfigClass()
# Then is assigns values to attributes appropriately:
NewConfig.anum = TotalSystem.anum[: len(TotalSystem.anum)-TotalSystem.AdmoleculeSize]
NewConfig.coord = TotalSystem.coord[: len(TotalSystem.coord)-TotalSystem.AdmoleculeSize]
NewConfig.E = 0.0
NewConfig.AdmoleculeSize = TotalSystem.AdmoleculeSize
NewConfig.LatticeMatrix = None
NewConfig.SubLatticeGrid = None
return NewConfig
# Returns a new ConfigClass instance that represents the admolecule. Only works
# if AdmoleculeSize is properly defined:
def admolecule(self):
TotalSystem = copy.deepcopy(self)
NewConfig = ConfigClass()
NewConfig.anum = TotalSystem.anum[len(TotalSystem.anum)-TotalSystem.AdmoleculeSize :]
NewConfig.coord = TotalSystem.coord[len(TotalSystem.coord)-TotalSystem.AdmoleculeSize:]
NewConfig.E = 0.0
NewConfig.AdmoleculeSize = TotalSystem.AdmoleculeSize
NewConfig.LatticeMatrix = None
NewConfig.SubLatticeGrid = None
return NewConfig
# Raises an error if inconsistencies are found. It checks the
# datatype and length of the anum and coord arrays:
def verify(self):
try:
valid = len(self.anum) == len(self.coord)
valid = valid and type(self.anum) is type(self.coord) is np.ndarray
if not valid:
raise SizeInconsistency
except SizeInconsistency:
sys.stderr.write('There is a size inconsistency in your ConfigClass object.')
raise
# Writes geometry of self to xyz file:
def WriteGeom(self,FileName,lock = multiprocessing.Lock()):
# First check for size inconsistencies:
self.verify()
# Then write number of atoms:
buffer = str(len(self.coord)) + '\n\n'
# Then loop through every atom:
for iatom in range(len(self.anum)):
# Then convert the numbers in anum to the atomic symbols:
buffer += ptable.symbol(self.anum[iatom])
# Then loop through every coordinate in each atom:
for icoord in range(3):
buffer += ' ' + str(self.coord[iatom][icoord])
buffer += '\n'
# Then append the buffer to the file:
lock.acquire()
file = open(FileName,'a')
file.write(buffer)
file.close()
lock.release()
return
# Prepares a file to write the energies to in a python-readable format:
def StartWriteEnergy(self,FileName,lock = multiprocessing.Lock()):
# First it checks for size inconsistencies:
self.verify()
# Then it imports numpy:
buffer = 'import numpy as np\n'
# Then it initializes an empty numpy array:
buffer += 'energy = np.array((),dtype=float)\n'
# Then it writes the buffer to the file:
lock.acquire()
file = open(FileName,'w')
file.write(buffer)
file.close()
lock.release()
return
# Appends enrgy data to a python-readable file:
def WriteEnergy(self,FileName,lock = multiprocessing.Lock()):
# First it checks for size inconsistencies:
self.verify()
# Then it directly writes the energy to the file:
lock.acquire()
file = open(FileName,'a')
file.write('energy = np.append(energy,' + str(self.E) + ')\n')
file.close()
lock.release()
return
# Generates a new file to record the most important ConfigClass attributes to:
def StartWriteConfig(self,FileName, lock = multiprocessing.Lock()):
# First it checks for size inconsistencies:
self.verify()
# Then it imports necessary libraries and initializes an empty list:
buffer = 'import sys\n'
buffer += 'from numpy import *\n'
buffer += 'ConfigList = []\n'
buffer += '# Dear User: Append the directory where MC_Util.py is\n'
buffer += 'sys.path.append("..") # (Assumes in directory above)\n'
buffer += 'from MC_Util import ConfigClass\n\n'
# Then it writes the buffer to the file:
lock.acquire()
file = open(FileName,'w')
file.write(buffer)
file.close()
lock.release()
return
# Appends most important ConfigClass attributes to a python-readable file:
def WriteConfig(self, FileName, lock = multiprocessing.Lock()):
# First it checks for size inconsistencies:
self.verify()
# Then it writes important information to buffer:
buffer = '\nTempConfig = ConfigClass()\n'
buffer += 'TempConfig.anum = ' + repr(self.anum) + '\n'
buffer += 'TempConfig.coord = ' + repr(self.coord) + '\n'
buffer += 'TempConfig.E = ' + repr(self.E) + '\n'
buffer += 'TempConfig.AdmoleculeSize = ' + repr(self.AdmoleculeSize) + '\n'
buffer += 'TempConfig.LatticeMatrix = ' + repr(self.LatticeMatrix) + '\n'
buffer += 'TempConfig.SubLatticeGrid = ' + repr(self.SubLatticeGrid) + '\n'
buffer += 'ConfigList.append(ConfigClass(TempConfig))\n'
# Then it appends the buffer to the file:
lock.acquire()
file = open(FileName,'a')
file.write(buffer)
file.close()
lock.release()
return
# Generates a new file to record the floppy coordinates to:
def StartWriteFloppy(self, FileName, lock = multiprocessing.Lock()):
# First it checks for size inconsistencies:
self.verify()
# Then it imports numpy:
buffer = 'import numpy as np\n'
# Then it initializes an empty numpy array:
buffer += 'FloppyCoord = np.zeros((0,6))\n'
# Then it writes the buffer to the file:
lock.acquire()
file = open(FileName,'w')
file.write(buffer)
file.close()
lock.release()
return
# Appends Floppy coordinate data to a python-readable file:
def WriteFloppy(self,FileName,lock = multiprocessing.Lock()):
# First it checks for size inconsistencies:
self.verify()
# Then it directly writes the energy to the file:
lock.acquire()
file = open(FileName,'a')
file.write('FloppyCoord = np.append(FloppyCoord,' + 'np.' + repr(self.FloppyCoord()) + ')\n')
file.close()
lock.release()
return
# Runs StartWriteEnergy() and StartWriteConfig() automatically:
def StartWrite(self, CommandHome, lock = multiprocessing.Lock() ):
try:
# First it makes sure the path ends with '/':
if not CommandHome.endswith('/'):
CommandHome += '/'
# Then it calls the two functions:
self.StartWriteEnergy(CommandHome + '/output/EnergyEnsemble.py',lock = lock)
# I have decided this is only needed for debugging purposes:
#self.StartWriteConfig(CommandHome + '/output/ConfigEnsemble.py',lock = lock)
self.StartWriteFloppy(CommandHome + '/output/FloppyEnsemble.py',lock = lock)
return
except AttributeError:
sys.stdout.write('The first argument of ConfigClass().StartWrite() must be a string\n')
raise
# Runs WriteGom, WriteEnergy, and WriteConfig automatically:
def Write(self, CommandHome, lock = multiprocessing.Lock() ):
try:
# First it makes sure the path ends with '/':
if not CommandHome.endswith('/'):
CommandHome += '/'
# Then it calls the three functions and returns:
self.WriteGeom(CommandHome + '/output/GeomEnsemble.xyz', lock = lock)
self.WriteEnergy(CommandHome + '/output/EnergyEnsemble.py', lock = lock)
# I have decided WriteConfig is only needed for debugging pursposes:
#self.WriteConfig(CommandHome + '/output/ConfigEnsemble.py', lock = lock)
self.WriteFloppy(CommandHome + '/output/FloppyEnsemble.py', lock = lock)
return
except AttributeError:
sys.stdout.write('The first argument of ConfigClass().StartWrite() must be a string\n')
raise
def FloppyCoord(self):
# For the sake of clarity, my comments refer to global coredinates as
# X,Y, and Z, and the local coordinates as x', y', and z'.
try:
# First I define an empty six vector that will be edited and returned at end:
FloppyVec = np.zeros(6)
# Then the first three elements are filled with the centroid.
FloppyVec[0:3] = self.centroid()
# Then the two vectors are created from the admolecule:
admol = self.admolecule()
vec1 = admol.coord[1] - admol.coord[0]
vec2 = admol.coord[2] - admol.coord[1]
# Then it checks if the angle between them is near linear:
if angle(vec1,vec2) > 179 or angle(vec1,vec2) < 1:
raise LinearAngleError
# Then vec1 is normalized and becomes x'
LocalX = vec1 / np.linalg.norm(vec1)
# Then the component of vec2 that is perpendicular to x' is normalized
# and used as y':
LocalY = vec2 - np.dot(vec2,LocalX) * LocalX
LocalY = LocalY / np.linalg.norm(LocalY)
# Then I get z' simply by crossing x' and y'
LocalZ = np.cross(LocalX,LocalY)
# Then I define vector N as a means to find the proper Euler angles:
# In most cases, vecN is simply a cross product of Z and z':
if not np.array_equal(LocalZ, np.array((0,0,1))):
vecN = np.cross(LocalZ, np.array((0,0,1)) )
# However if z' points in the Z direction, then vecN will be indentically
# zero. This is not acceptable. To combat this, we default vecN to the x' direction:
else:
vecN = LocalX
# alpha is the angle between N and X. I assign this to the 4th element of FloppyVec:
FloppyVec[3] = angle(np.array((1,0,0)), vecN, PosN = np.array((0,0,1)))
# beta is the angle between Z and z'. I assign this to the 5th element of FloppyVec:
FloppyVec[4] = angle(np.array((0,0,1)), LocalZ)
# gamma is the angle between N and x'. I assign this the the 6th element of FloppyVec:
FloppyVec[5] = angle(vecN, LocalX, PosN = LocalZ)
# Then I return that 6-vector FloppyVec. This contains all the coordinates
# that are floppy in a simple admolecule system.
# Then when I return the vector, I pad it in brackets so that it is a 2=rank
# tansor to make the concatenate() happy on the other end.
return np.array([FloppyVec])
except LinearAngleError:
print ("The angle between the first 3 atoms in your admolecule is very close to linear. Consider changing the order of your admolecule atoms in your SIESTA input file (sorry).")
raise
#========================================================================================================
def RotateAxisAngle(conf, unit_axis, DegAngle):
# A function adapted from Dr. Gopinath Subramanian's molpro-neb routines.
# Rotates the configuration about the origin, about unit_axis, by angle (degrees).
#========================================================================================================
newconf = copy.deepcopy(conf)
RadAngle = DegAngle * np.pi / 180
ct = np.cos(RadAngle)
st = np.sin(RadAngle)
u = unit_axis[0]
v = unit_axis[1]
w = unit_axis[2]
x = copy.deepcopy(conf.coord[:,0])
y = copy.deepcopy(conf.coord[:,1])
z = copy.deepcopy(conf.coord[:,2])
newconf.coord[:,0] = u*(u*x+v*y+w*z)*(1-ct) + x*ct + (-w*y+v*z)*st
newconf.coord[:,1] = v*(u*x+v*y+w*z)*(1-ct) + y*ct + ( w*x-u*z)*st
newconf.coord[:,2] = w*(u*x+v*y+w*z)*(1-ct) + z*ct + (-v*x+u*y)*st
return newconf
#====================================================================================
def MC_Cycle(StartConfig, EnsembleSize, CommandHome, ScratchPath, WriteLock, LogQ, baseline):
# The "backbone" of the MC algorithm. Will be initated as many parallel processes
# which will each create new ensemble images and add them to a shared ensemble record.
# The cycles will terminate when EnsembleTarget is reached.
#====================================================================================
# First it imports settings chosen by the user:
try:
from MC_Pref import EnsembleTarget
except ImportError:
from MC_Defaults import EnsembleTarget
try:
from MC_Pref import NumberOfSiestaCores
except ImportError:
from MC_Defaults import NumberOfSiestaCores
try:
from MC_Pref import Optimize
except ImportError:
from MC_Defaults import Optimize
try:
from MC_Pref import RhoLimitDeg
except ImportError:
from MC_Defaults import RhoLimitDeg
try:
from MC_Pref import ShepherdOn
except ImportError:
from MC_Defaults import ShepherdOn
try:
from MC_Pref import TransLimit
except ImportError:
from MC_Defaults import TransLimit
try:
from MC_Pref import T_Sequence
except:
from MC_Defaults import T_Sequence
try:
from MC_Pref import MetaDynamicsOn
except:
from MC_Defaults import MetaDynamicsOn
try:
from MC_Pref import MetaWidths
except:
from MC_Defaults import MetaWidths
try:
from MC_Pref import MetaHeight
except:
from MC_Defaults import MetaHeight
try:
from MC_Pref import CounterpoiseOn
except:
from MC_Defaults import CounterpoiseOn
CycleLogger = Logger()
CycleLogger.process.start()
# First the variables defined globally are declared in the funciton and scientific
# constants are defined:
k = 8.6173324e-5 # in eV K^(-1) from Wikipedia.
# I also define an empty 2-rank tensor that will store the past positions of the admolecule:
history = np.zeros((0,6))
# Note: In the future I am interested in trashing multiprocessing (it doesn't work for
# nodes anyways) and getting Parallel Python. I am also thinking about getting ride of
# EnsembleSize and just using the size of history. (But history has to be shared first)
# Then it begins the process of defining new structures and testing them:
OldConfig = ConfigClass(StartConfig)
OldBiasE = 0.
T_index = [0,0]
while True:
# First it handles the simulated anealing sequence:\
T = T_Sequence[T_index[0]][1]
T_index[1] += 1
if T_index[1] == T_Sequence[T_index[0]][0]:
T_index[1] = 0
T_index[0] += 1
if T_index[0] == len(T_Sequence):
T_index[0] = 0
CandConfig = move(OldConfig,TransLimit,RhoLimitDeg)
CandConfig = shepherd(CandConfig) if ShepherdOn else CandConfig
CandConfig.E = AdEnergy(CandConfig,CommandHome,NumberOfSiestaCores,CycleLogger,CounterpoiseOn,baseline,lock = multiprocessing.Lock())
if MetaDynamicsOn:
CandBiasE = bias(CandConfig, history, MetaHeight, MetaWidths)
CandBiasFactor = np.exp(-(CandBiasE-OldBiasE)/(k*T))
else:
CandBiasFactor = 1
rand = np.random.random()
prob = CandBiasFactor * np.exp(-(CandConfig.E-OldConfig.E)/(k*T))
if rand <= prob:
OldConfig = ConfigClass(CandConfig) # This is a way to assign without side-effects
CycleLogger.hit()
if Optimize:
TransLimit = TransLimit * 1.05
RhoLimitDeg = RhoLimitDeg * 1.05
if EnsembleSize.value >= EnsembleTarget:
CycleLogger.process.stop()
LogQ.put(CycleLogger)
break
else:
CycleLogger.miss()
if Optimize:
TransLimit = TransLimit / 1.05
RhoLimitDeg = RhoLimitDeg / 1.05
CycleLogger.ReadWrite.start()
OldConfig.Write(CommandHome, lock = WriteLock)
CycleLogger.ReadWrite.stop()
EnsembleSize.value = EnsembleSize.value + 1
# Recording the position for future metadynamics biasing:
history = np.concatenate((history,OldConfig.FloppyCoord()),axis=0)
# Then I redefine the bias for the OldConfig using the OldConfig's new (or old)
# geometry and the new history.
OldBiasE = bias(OldConfig, history, MetaHeight, MetaWidths)
CycleLogger.ReadWrite.start()
CycleLogger.summary(CommandHome + '/output/time.log',WriteLock)
CycleLogger.ReadWrite.stop()
#====================================================================================
def SIESTA(config,CommandHome,NumberOfSiestaCores,Logger,ghost=None):
# This function uses the file template.fdf in the SiestaFiles directory as a template
# to run a SIESTA job to compute the potential energy. It appends the geometric
# coordinates to the end of the file in the form "X\tY\tZ\tN" where N is the species
# number (which is extracted from template.fdf. If you want to change the energy
# calculation parameters, edit the template.fdf file.
#====================================================================================
# First I ensure that <ghost> is defined properly:
try:
if (ghost != None) and (ghost != 'adsorbent') and (ghost != 'admolecule'):
raise ValueError
except ValueError:
sys.stdout.write("ghost must be set to None, 'adsorbent', or 'admolecule'.")
# Then the working directory <WorkDir> is created, and the pseudopotentials
# <*.psf> and imput file <template.fdf> are coppied to it:
# Note: In future version I might want to clean out these direcotries if they exist:
WorkDir = '/tmp/' + os.getlogin() # It first creates the subdirectory named with the username.
if not os.path.isdir(WorkDir): # It has to check if the directory already exists.
os.mkdir(WorkDir)
WorkDir = WorkDir + '/proc' + str(multiprocessing.current_process().pid) # This is the name for the subsubdirecotry.
if not os.path.isdir(WorkDir): # Then it checks if the subsubdirectory already exists.
os.mkdir(WorkDir)
Logger.ReadWrite.start()
shutil.copy(CommandHome + '/SiestaFiles/template.fdf', WorkDir + '/')
os.system('cp ' + CommandHome + '/SiestaFiles/*.psf ' + WorkDir + '/' )
Logger.ReadWrite.stop()
# Then we have to define a species dict using the <template.fdf> file. This will
# convert between atomic symbols and Species Numbers that SIESTA uses:
Logger.ReadWrite.start()
with open(WorkDir + '/template.fdf','r') as file:
SpeciesNumbers = dict()
NoteTaking = False
for line in file:
if '%block ChemicalSpeciesLabel' in line:
NoteTaking = True
continue
if '%endblock ChemicalSpeciesLabel' in line:
break
if NoteTaking == True:
Note = line.replace('\n','').replace('\t','').replace(',','').split(' ')
while '' in Note:
Note.remove('')
SpeciesNumbers[Note[1]] = Note[0]
file.close()
Logger.ReadWrite.stop()
# Then we correct the NumberOfAtoms field in the .fdf file:
# Explain in user manual that this line must be blank
os.system("sed -i -e 's/NumberOfAtoms/NumberOfAtoms " + str(len(config.anum)) + "/g' " + WorkDir + "/template.fdf")
# Then we define an output buffer <OutBuffer> in memory to store the string that
# that we will write to the file:
OutBuffer = '%block AtomicCoordinatesAndAtomicSpecies\n'
# We split the buffer into the the adsorbent and the admolecule (two loops):
# Loop for adsorbent:
for i in range(len(config.adsorbent().anum)):
for j in range(3):
OutBuffer += str(config.adsorbent().coord[i][j]) + '\t'
# If we are ghosting these the adsorbent, we must just write a zero
if ghost == 'adsorbent':
OutBuffer += SpeciesNumbers[str(-config.adsorbent().anum[i])]
else:
OutBuffer += SpeciesNumbers[str(config.adsorbent().anum[i])]
OutBuffer += '\n'
# Loop for admolecule:
for i in range(len(config.admolecule().anum)):
for j in range(3):
OutBuffer += str(config.admolecule().coord[i][j]) + '\t'
# If we are ghosting the admolecule, we include the ghost atom:
if ghost == 'admolecule':
OutBuffer += SpeciesNumbers[str(-config.admolecule().anum[i])]
else:
OutBuffer += SpeciesNumbers[str(config.admolecule().anum[i])]
OutBuffer += '\n'
OutBuffer = OutBuffer + '%endblock AtomicCoordinatesAndAtomicSpecies'
# Then we write the geometry block to the file:
Logger.ReadWrite.start()
with open(WorkDir + '/template.fdf','a') as file:
file.write(OutBuffer)
file.close()
Logger.ReadWrite.stop()
# Then it moves into the <WorkDir> directory, runs the SIESTA job, then moves up:
os.chdir(WorkDir)
Logger.siesta.start()
status = os.system('mpirun -np ' + str(NumberOfSiestaCores) + ' siesta < template.fdf | tee template.out')
Logger.siesta.stop()
os.chdir('..')
# Then we need to raise an error if Siesta has an error:
if status != 0:
sys.stderr.write('SIESTA returned an error.')
raise SiestaError
#Then we extract the energy from the output file:
Energy = 0.00
# Dear User: This only works for harris functional. If you edit <template.fdf>,
# please replace 'siesta: Eharris(eV) =' appropriately.
Logger.ReadWrite.start()
with open(WorkDir + '/template.out','r') as file:
for line in file:
if ('siesta: Eharris(eV) =' in line) or ('siesta: E_KS(eV) =' in line):
line = line.replace('siesta: Eharris(eV) =','')
line = line.replace('siesta: E_KS(eV) =','')
Energy = float(line)
break
file.close()
Logger.ReadWrite.stop()
# Finally it deletes the working directory <WorkDir> and writes the Enegy in eV
# to <config.E>:
shutil.rmtree(WorkDir)
return Energy
#====================================================================================
def AdEnergy(config,CommandHome,NumberOfSiestaCores,Logger,CounterpoiseOn,baseline,lock = multiprocessing.Lock()):
# This function takes the configuration and the 'baseline' energy of the species seperatre
# and returns the adsorbtion energy. If <CounterpoiseOn = True>, then a Counterpoise
# calcualtion is done automatically and the basis set superposition energy is recorded
# in the a file named "output/SuperpositionError.csv".
#====================================================================================
# Note: Consider jsut getting rid all the passed arguments and picking up the
# global variable names. But you would probably have to do this for the SIESTA function too.
# If counterpoise is turned off, then we simply subtract the system energy frome the baseline:
if CounterpoiseOn == False:
return SIESTA(config,CommandHome,NumberOfSiestaCores,Logger,ghost=None) - baseline
# If counterpoise is turned on, then the calculation is a little more complicated:
if CounterpoiseOn:
GhostAdsorbent = SIESTA(config,CommandHome,NumberOfSiestaCores,Logger,ghost='adsorbent')
GhostAdmolecule = SIESTA(config,CommandHome,NumberOfSiestaCores,Logger, ghost='admolecule')
NoGhost = SIESTA(config,CommandHome,NumberOfSiestaCores,Logger, ghost=None)
AdEnergy = NoGhost - (GhostAdsorbent + GhostAdmolecule)
# We also keep track of the basis-set-superposition error for recording purposes:
SuperError = baseline - (GhostAdsorbent + GhostAdmolecule)
buffer = '\n' + str(SuperError)
Logger.ReadWrite.start()
lock.acquire()
file = open(CommandHome + '/output/SuperError.csv','a')
file.write(buffer)
file.close()
lock.release()
Logger.ReadWrite.stop()
return AdEnergy
#====================================================================================
def attach(adsorbent,admolecule,RightSide=None):
# Attached the admolecule to the adsorbent. Attaches to side that <norm> points in if
# supplied. This will only work for surfaces that are reletively thin and flat.
# <RightSide> should be 3-element numpy array
#====================================================================================
# Note: Function under constuction.
# Note: Could add vectorization ro remove redundancies.
# Note: Since I' no longer using bse I could get rid of statsmodels prerequisite.
# First make coppies of the arguments to prevent side effects:
admolecule = copy.deepcopy(admolecule)
adsorbent = copy.deepcopy(adsorbent)
# Then perform 3 linear regressions on the adsorbent, create normal vectors to the
# detmerined planes, then project the admolecule atomic positions onto the norma
# vectors, then take the distribution with the lowest range. This represents it
# as a plane.
x = adsorbent.coord[:,0]
y = adsorbent.coord[:,1]
z = adsorbent.coord[:,2]
Norms = np.zeros((3,3))
AdsorbentDists = np.zeros((3,len(adsorbent.anum)))
Widths = np.zeros(3)
#Regression 1:
X = np.column_stack((y,z))
X = sm.add_constant(X) # Design matrix
Z = x # Dependent variable
results = sm.OLS(Z,X).fit()
m1 = results.params[1]
m2 = results.params[2]
Norms[0] = np.array((1,-m1,-m2))
Norms[0] = Norms[0] / np.linalg.norm(Norms[0])
AdsorbentDists[0] = np.dot(adsorbent.coord,Norms[0])
Widths[0] = np.ptp(AdsorbentDists[0])
#Regression 2:
X = np.column_stack((z,x))
X = sm.add_constant(X) # Design matrix
Z = y # Dependent variable
results = sm.OLS(Z,X).fit()
m1 = results.params[1]
m2 = results.params[2]
Norms[1] = np.array((-m2,1,-m1))
Norms[1] = Norms[1] / np.linalg.norm(Norms[1])
AdsorbentDists[1] = np.dot(adsorbent.coord,Norms[1])
Widths[1] = np.ptp(AdsorbentDists[1])
#Regression 1:
X = np.column_stack((x,y))
X = sm.add_constant(X) # Design matrix
Z = z # Dependent variable
results = sm.OLS(Z,X).fit()
m1 = results.params[1]
m2 = results.params[2]
Norms[2] = np.array((-m1,-m2,1))
Norms[2] = Norms[2] / np.linalg.norm(Norms[2])
AdsorbentDists[2] = np.dot(adsorbent.coord,Norms[2])
Widths[2] = np.ptp(AdsorbentDists[2])
# Then we just choose the disribution with the lowest range and we choose the
# corresponding norm:
Norm = Norms[np.argmin(Widths)]
AdsorbentDist = AdsorbentDists[np.argmin(Widths)]
# If the user specified a side, we will orient the Norm in that direction.
if RightSide != None:
Norm = np.dot(Norm,RightSide) / np.linalg.norm(RightSide)
# Then we project all adsorbent atoms' positions of the adsorbent onto the norm:
AdsorbentDist = AdsorbentDist - np.average(AdsorbentDist)
# We do the same with the admolecule:
AdmoleculeDist = np.dot(admolecule.coord,Norm)
AdmoleculeDist= AdmoleculeDist - np.average(AdmoleculeDist)
# Then we use the max of the AdsorbentDist and the min of the AdmoleculeDist
# and add 1.77 to as the distance between the two centroids at the beginning of the
# Lennard-Jones optimization (1.77 comes from Zimmerman et all. [1])
InterDist = np.max(AdsorbentDist) - np.min(AdmoleculeDist) + 1.77
# Then we have to define an energy function based on a 6-12 potential:
def QuickEnergy(InterDist):
R_eq = 1.77
TransVec = (adsorbent.centroid() + Norm * InterDist) - admolecule.centroid()
NewAdMoleculeCoords = admolecule.coord + TransVec
InterDistances = np.zeros(0)
# Note: Vectorize this in the future if possible.
for iAtom in adsorbent.coord:
for jAtom in NewAdMoleculeCoords:
InterDistances = np.append(InterDistances,np.linalg.norm(iAtom - jAtom))
Energies = (R_eq/InterDistances)**12 - 2*(R_eq/InterDistances)**6
return np.sum(Energies)
# Then I minimize the energy function to find best distance using binarry search:
# We set up "inner" and "outer" walls
# Note: This may not work with strange adrorbent geometries:
inner = 0.0
outer = InterDist
OuterEnergy = QuickEnergy(outer)
# Then I move the walls in closer to the minimum in 15 steps:
for i in range(15):
# We see what the energy half way between the walls is:
MiddleEnergy = QuickEnergy(np.average((inner,outer)))
# If the energy is higher, we move the inner wall to the middle:
if MiddleEnergy >= OuterEnergy:
inner = np.average((inner,outer))
continue
elif MiddleEnergy < OuterEnergy:
# If the energy is lower, we move the outer wall there:
outer = np.average((inner,outer))
OuterEnergy = MiddleEnergy
continue
# Then we use the final outer wall as the distance:
InterDist = outer
# Then I change the position of the admolecule:
TransVec = (adsorbent.centroid() + Norm * InterDist) - admolecule.centroid()
admolecule.coord = admolecule.coord + TransVec
# Then I fuse the two together and return the new object.
adsorbent.coord = np.append(adsorbent.coord,admolecule.coord,axis=0)
adsorbent.anum = np.append(adsorbent.anum,admolecule.anum)
adsorbent.AdmoleculeSize = len(admolecule.anum)
return ConfigClass(adsorbent)
#====================================================================================
def move(config,TransLimit,RhoLimitDeg):
# Returns a randomly pertubed configuration given an original configuration. This
# function works with any single molecule absorbate system as long as the admolecule
# is the last atoms listed in the ConfigClass. Translates it over a flat distribution
# in any direction and rotates it over a flast angle distribution on a random axis.
#====================================================================================
RhoLimitDeg = min(RhoLimitDeg,360)
CopyConfig = copy.deepcopy(config)
PhiTrans = np.random.rand() * 2 * np.pi
CosThetaTrans = np.random.rand() * 2 - 1
RandTrans = np.array((0.0,0.0,0.0))
RandTrans[0] = np.cos(PhiTrans) * np.sqrt(1-CosThetaTrans**2)
RandTrans[1] = np.sin(PhiTrans) * np.sqrt(1-CosThetaTrans**2)
RandTrans[2] = CosThetaTrans
RandTrans = RandTrans * np.random.rand() * TransLimit
PhiTurn = np.random.rand() * 2 * np.pi
CosThetaTurn = np.random.rand() * 2 - 1
RandAxis = np.array((0.0,0.0,0.0))
RandAxis[0] = np.cos(PhiTurn) * np.sqrt(1-CosThetaTurn**2)
RandAxis[1] = np.sin(PhiTurn) * np.sqrt(1-CosThetaTurn**2)
RandAxis[2] = CosThetaTurn
RandRho = (2 * np.random.rand() - 1) * RhoLimitDeg
# Note: I just adjusted it so Translimit is absolute (it is now normalized)
# Note: Here is a good place to get material for my honors!
# Note: My convension is the rho is the rotation around r and phi and theta describe r vector. Clarify this in the code.
# Note: I also changed my scheme to move the atom. So I should maybe expalin all that.
admolecule = config.admolecule() # Note: Consider deleting this line.
SystemSize=len(CopyConfig.anum) # Note: Consider deleting this line
offset = admolecule.centroid()
admolecule.coord = admolecule.coord - offset
admolecule = RotateAxisAngle(admolecule,RandAxis,RandRho)
admolecule.coord = admolecule.coord + RandTrans + offset
NewConfig = ConfigClass(CopyConfig) # Note: Make a notational choice of whether I deep copy or use the initializer.
NewConfig.coord[SystemSize-config.AdmoleculeSize:SystemSize] = admolecule.coord
return NewConfig
#====================================================================================
def moveR(config,Dummy,TransLimit,dummy):
# Returns a randomly pertubed configuration given an original configuration. Unlike
# the move() function above, this one moves every atom over a flat distribution in
# all direactions.
# Dear User: If you want to freeze some atoms or limit the movment of some
# atoms more tan others, just redefine the <Weight> accordingly.
# Note: consider adding option to move lattice vectors.
# Note: I'm getting sloppy with my capitalization convention. Double think this.
# Note: The dummy is for compatibility between move() and moveR(). Think more about this later
#====================================================================================
CopyConfig = copy.deepcopy(config)
Weight = np.ones(len(CopyConfig.anum))
ThetaTrans = np.random.rand(len(CopyConfig.anum)) * 2 * np.pi
CosPhiTrans = np.random.rand(len(CopyConfig.anum)) * 2 - 1
dR = np.zeros((CopyConfig.coord.shape))
dR[:][0] = np.cos(ThetaTrans) * np.sqrt(1-CosPhiTrans**2) * Weight * TransLimit
dR[:][1] = np.sin(ThetaTrans) * np.sqrt(1-CosPhiTrans**2) * Weight * TransLimit
dR[:][2] = CosPhiTrans * Weight * TransLimit
NewConfig = ConfigClass(CopyConfig) # Note: it feels weird that I am copying like this. I should have a better way to do this.
NewConfig.Coord = CopyConfig.Coord + dR
return NewConfig
#====================================================================================
def shepherd(config):
# This function checks if the admolecule centroid is more than 1 unit cell from the
# admolecule centroid in X or Y, and, if so, moves it 1 unit cell back toward the centroid.
#====================================================================================
CopyConfig = ConfigClass(config) # Note: I chose to do ConfigClass rather than deepcopy because it checks to make sure you are using ConfigClass and has error handling.
# First it imports preferences from the MC_Pref.py file:
try:
from MC_Pref import SubLatticeGrid
except ImportError:
from MC_Defults import SubLatticeGrid
# First we define a lattice basis matrix:
fence = config.LatticeMatrix / SubLatticeGrid
# Then we split the config int adsorbent and admolecule:
adsorbent=ConfigClass(config.adsorbent())
admolecule=ConfigClass(config.admolecule())
# We record the total number of config atoms. (Used later:)
SystemSize=len(CopyConfig.anum)
# Then it represents the admolecule position as the difference between the two cetnroids:
dCentroid = admolecule.centroid() - adsorbent.centroid()
# Then it projects the difference in centroids onto the lattice basis.
coeff = np.linalg.solve(fence,dCentroid)
# Then it limits the position to a single lattice cell with shift upward, followed by
# modulus followed by a shift downward.
ModCoeff = ((coeff+0.5)%1)-0.5
yank = np.dot(fence,(ModCoeff-coeff))
admolecule.coord = admolecule.coord + yank
NewConfig = ConfigClass(CopyConfig)
NewConfig.coord[SystemSize-config.AdmoleculeSize:SystemSize] = admolecule.coord
return NewConfig
#====================================================================================
class Timer:
# This is an object that is used to count the cumulative time elapsted during certain
# operations like running SIESTA. This is to keep track of performance.
# Note: I might want to make my own config class that you can add and initialize with another instance.
#====================================================================================
class DoubleStart(Exception):
pass
class DoubleStop(Exception):
pass
def __init__(self):
self.duration = 0.0
self.tick = None
self.timing = False
def start(self):
try:
if self.timing:
raise self.DoubleStart
self.tick = time.time()
self.timing = True
except self.DoubleStart:
sys.stderr.write( "You seem to have used start() twice without using stop(). No big deal, I will just pretend you didn't do that, but consider contacting the developer about this bug." )
def update(self):
if self.timing:
self.duration = self.duration + time.time() - self.tick
self.tick = time.time()
def stop(self):
try:
if not self.timing:
raise self.DoubleStop
self.duration = self.duration + time.time() - self.tick
self.timing = False
except self.DoubleStop:
sys.stderr.wite( "You seem to have used stop() twice without using start(). No big deal, I will just pretend you didn't do that, but consider contacting the developer about this bug." )
# Adding two Timers will update both of them and return a timer with a duration
# that is the sum of the duration of the two twrms, ticks at the time the addition
# happens, and is timing if either of the two terms are timing.
def __add__(self, other):
self.update() # Note: Make sure I make deep copies if necessary.
other.update()
sum = Timer() # Note: I'm not sure if I should call timer or self
sum.tick = time.time()
sum.timing = self.timing or other.timing
sum.duration = self.duration + other.duration
return sum
#====================================================================================
class Logger:
# This is an object that is used to log the performance of the MC program including
# the time spend on readding/writing files or running siesta and the number "hit rate"
# of the MC algorithm. If the SubProcess and SuperProcess methods are both greater
# then zero, then summary() will also give you a value for parallel speedup as if
# the SuperProcess process is waiting for multiple process processes running in parallel.
# Note: by default use the subprocess I think
# Note: I need to find a way to prevent side effects on this.
#====================================================================================
def __init__(self):
self.siesta = Timer()
self.ReadWrite = Timer()
self.process = Timer()
self.SuperProcess = Timer()
self.hits = 0
self.misses = 0
def hit(self):
self.hits = self.hits + 1
def miss(self):
self.misses = self.misses + 1
def summary(self,FileName,FileLock = multiprocessing.Lock()):
self.siesta.update()
self.ReadWrite.update()
self.process.update()
self.SuperProcess.update()
sum = 'Logger summary for Process ' + str(multiprocessing.current_process().pid) + ':\n'
sum += 'Time spent on SIESTA: ' + str(self.siesta.duration) + 's'
if self.process.duration > 0:
sum += ' (' + str(self.siesta.duration/self.process.duration*100.0) + '%)'
sum += '\nTime spent on reading and writing: ' + str(self.ReadWrite.duration) + 's'
if self.process.duration > 0:
sum += ' (' + str(self.ReadWrite.duration/self.process.duration*100.0) + '%)'
sum += '\nTime of this process: ' + str(self.process.duration) + 's\n'
if self.SuperProcess.duration > 0 :
sum += 'Time of this super process: ' + str(self.SuperProcess.duration) + 's\n'
if self.process.duration > 0 and self.SuperProcess.duration > 0:
sum += 'Parallel speedup: ' + str(self.process.duration/self.SuperProcess.duration) + '\n'
sum += 'Total hits: ' + str(self.hits)
if (self.hits + self.misses) > 0:
sum += ' (' + str(self.hits/float(self.hits+self.misses)*100.0) + '%)'
sum += '\nTotal misses: ' + str(self.misses)
if (self.hits + self.misses) > 0:
sum += ' (' + str(self.misses/float(self.hits+self.misses)*100.0) + '%)'
sum += '\n'
FileLock.acquire()
with open(FileName,'a') as WriteFile: # Note: make sure this isn't supposed to be ea capital W
WriteFile.write(sum)
WriteFile.close()
FileLock.release()
def __add__(self,other):
sum = Logger()
sum.siesta = self.siesta + other.siesta
sum.ReadWrite = self.ReadWrite + other.ReadWrite
sum.process = self.process + other.process
sum.hits = self.hits + other.hits
sum.misses = self.misses + other.misses
sum.SuperProcess = self.SuperProcess + other.SuperProcess
return sum
# Note: Add some error handeling here.
# References:
# [1] Zimmerman, Paul M., Martin Head-Gordon, and Alexis T. Bell. "Selection and validation
# of charge and Lennard-Jones parameters for QM/MM simulations of hydrocarbon interactions # with zeolites." Journal of chemical theory and computation 7.6 (2011): 1695-1703.
| ttruttmann/monte-carlo | MC_Util.py | Python | mit | 58,383 | [
"Molpro",
"SIESTA"
] | 9eb294482ef1236f55e904c395402d7f0c47680411c958835329c1f4b144f576 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffyio(RPackage):
"""Routines for parsing Affymetrix data files based upon file format
information. Primary focus is on accessing the CEL and CDF file
formats."""
homepage = "https://bioconductor.org/packages/affyio/"
url = "https://bioconductor.org/packages/3.5/bioc/src/contrib/affyio_1.46.0.tar.gz"
version('1.46.0', 'e1f7a89ae16940aa29b998a4dbdc0ef9')
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.46.0')
| lgarren/spack | var/spack/repos/builtin/packages/r-affyio/package.py | Python | lgpl-2.1 | 1,756 | [
"Bioconductor"
] | 345b1f35c6690a74c9b8f1986ec15aec5489d2e394f3d15e7a8913d9a1b71b6d |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkFieldDataToAttributeDataFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkFieldDataToAttributeDataFilter(), 'Processing.',
('vtkDataSet',), ('vtkDataSet',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkFieldDataToAttributeDataFilter.py | Python | bsd-3-clause | 519 | [
"VTK"
] | 8f111d5cbc205cc4db76c171364e0e84fb8969aeed66da6d12e1b9eb6c8cb338 |
from gpaw.xc.libxc import LibXC
from math import pi
import numpy as np
nspins = 1
for name in [
'LDA', 'PBE', 'revPBE', 'RPBE',
'LDA_X', 'GGA_X_PBE_R', 'GGA_X_RPBE',
'LDA_C_PW',
]:
xc = LibXC(name)
xc.initialize(nspins)
libxc = xc.xc
lxc_fxc = libxc.calculate_fxc_spinpaired
lxc_fxc_fd = libxc.calculate_fxc_fd_spinpaired
na = 2.0
if nspins == 2:
nb = 1.0
else:
nb = 0.0
print na, nb
if (nb > 0.0): assert (nspins == 2)
if nspins == 2:
sigma0 = 2.0 # (0.0, 1.0, 1.0)
sigma1 = 2.0
sigma2 = 5.0 # (1.0, 2.0, 0.0)
else:
sigma0 = 2.0 # (0.0, 1.0, 1.0)
sigma1 = 0.0
sigma2 = 0.0
taua=(3.*pi**2)**(2./3.)*na**(5./3.)/2.*sigma0
taub=(3.*pi**2)**(2./3.)*nb**(5./3.)/2.*sigma2
if ((sigma1 > 0.0) or (sigma2 > 0.0)): assert (nspins == 2)
na = np.array([na])
sigma0 = np.array([sigma0])
dvdn = np.zeros((1))
dvdnda2 = np.zeros((6))
dvda2da2 = np.zeros_like(dvdnda2)
dvdn_N = np.zeros_like(dvdn)
dvdnda2_N = np.zeros_like(dvdnda2)
dvda2da2_N = np.zeros_like(dvdnda2)
lxc_fxc(na, dvdn, sigma0, dvdnda2, dvda2da2)
lxc_fxc_fd(na, dvdn_N, sigma0, dvdnda2_N, dvda2da2_N)
error = [0.0, 'exact']
for E in [
('dvdn', dvdn[0], dvdn_N[0]),
('dvdnda2', dvdnda2[0], dvdnda2_N[0]),
('dvda2da2', dvda2da2[0], dvda2da2_N[0]),
]:
for e in E[2:]:
de = abs(e - E[1])
if de > error[0]:
error[0] = de
error[1] = E[0]
print name, error[0], error[1]
assert error[0] < 5.0e-7
| qsnake/gpaw | gpaw/test/lxc_fxc.py | Python | gpl-3.0 | 1,639 | [
"GPAW"
] | cb8bf0afd11c421f33645c34c2439a1fa99c0be83d51f0099912b4e5ae803164 |
"""Utilities allowing for high-level testing throughout Pulsar."""
import traceback
import sys
import threading
import os
from contextlib import contextmanager
from stat import S_IXGRP, S_IXOTH
from os import pardir, stat, chmod, access, X_OK, pathsep, environ
from os import makedirs, listdir
from os.path import join, dirname, isfile, split
from os.path import exists
from tempfile import mkdtemp
from shutil import rmtree
import time
import webob
from webtest import TestApp
from webtest.http import StopableWSGIServer
import galaxy.util
import pulsar.util
from galaxy.job_metrics import NULL_JOB_INSTRUMENTER
from galaxy.util.bunch import Bunch
from pulsar.managers.util import drmaa
from pulsar.tools import ToolBox
from pulsar.managers.base import JobDirectory
from pulsar.web.framework import file_response
from unittest import TestCase, skip
try:
from nose.tools import nottest
except ImportError:
def nottest(x):
return x
import stopit
from functools import wraps
def timed(timeout):
def outer_wrapper(f):
@wraps(f)
def wrapper(*args, **kwargs):
with stopit.ThreadingTimeout(timeout) as to_ctx_mgr:
f(*args, **kwargs)
if to_ctx_mgr.state != to_ctx_mgr.EXECUTED:
raise Exception("Test function timed out.")
return wrapper
return outer_wrapper
INTEGRATION_MAXIMUM_TEST_TIME = 15
integration_test = timed(INTEGRATION_MAXIMUM_TEST_TIME)
TEST_DIR = dirname(__file__)
ROOT_DIR = join(TEST_DIR, pardir)
TEST_TEMPDIR_PREFIX = 'tmp_pulsar_'
class TempDirectoryTestCase(TestCase):
def setUp(self):
self.temp_directory = temp_directory_persist(prefix=TEST_TEMPDIR_PREFIX)
def tearDown(self):
rmtree(self.temp_directory)
def get_test_toolbox():
toolbox_path = join(dirname(__file__), pardir, "test_data", "test_shed_toolbox.xml")
toolbox = ToolBox(toolbox_path)
return toolbox
def get_test_tool():
return get_test_toolbox().get_tool("tool1")
class TestManager:
def setup_temp_directory(self):
self.temp_directory = temp_directory_persist(prefix='test_manager_')
self.__job_directory = JobDirectory(self.temp_directory, '1')
def cleanup_temp_directory(self):
rmtree(self.temp_directory)
def job_directory(self, job_id):
return self.__job_directory
@contextmanager
def test_job_directory():
with temp_directory(prefix='job_') as directory:
yield JobDirectory(directory, '1')
@contextmanager
def temp_directory(prefix=''):
directory = temp_directory_persist(prefix=prefix)
try:
yield directory
finally:
rmtree(directory)
def temp_directory_persist(prefix=''):
return mkdtemp(prefix=TEST_TEMPDIR_PREFIX + prefix)
@contextmanager
def test_manager():
manager = TestManager()
manager.setup_temp_directory()
yield manager
manager.cleanup_temp_directory()
class TestAuthorization:
def __init__(self):
self.allow_setup = True
self.allow_tool_file = True
self.allow_execution = True
self.allow_config = True
def authorize_setup(self):
if not self.allow_setup:
raise Exception
def authorize_tool_file(self, name, contents):
if not self.allow_tool_file:
raise Exception
def authorize_execution(self, job_directory, command_line):
if not self.allow_execution:
raise Exception
def authorize_config_file(self, job_directory, name, path):
if not self.allow_config:
raise Exception
class TestDependencyManager:
def dependency_shell_commands(self, requirements, **kwds):
return []
class BaseManagerTestCase(TestCase):
def setUp(self):
self.app = minimal_app_for_managers()
self.staging_directory = self.app.staging_directory
self.authorizer = self.app.authorizer
def tearDown(self):
rmtree(self.staging_directory)
@nottest
def _test_simple_execution(self, manager, timeout=None):
command = """python -c "import sys; sys.stdout.write(\'Hello World!\'); sys.stdout.flush(); sys.stderr.write(\'moo\'); sys.stderr.flush()" """
job_id = manager.setup_job("123", "tool1", "1.0.0")
manager.launch(job_id, command)
time_end = None if timeout is None else time.time() + timeout
while manager.get_status(job_id) not in ['complete', 'cancelled']:
if time_end and time.time() > time_end:
raise Exception("Timeout.")
self.assertEqual(manager.stderr_contents(job_id), b'moo')
self.assertEqual(manager.stdout_contents(job_id), b'Hello World!')
self.assertEqual(manager.return_code(job_id), 0)
manager.clean(job_id)
self.assertEqual(len(listdir(self.staging_directory)), 0)
def _test_cancelling(self, manager):
job_id = manager.setup_job("124", "tool1", "1.0.0")
command = self._python_to_command("import time; time.sleep(1000)")
manager.launch(job_id, command)
time.sleep(0.05)
manager.kill(job_id)
manager.kill(job_id) # Make sure kill doesn't choke if pid doesn't exist
self._assert_status_becomes_cancelled(job_id, manager)
manager.clean(job_id)
def _python_to_command(self, code, quote='"'):
assert '"' not in code
return 'python -c "%s"' % "; ".join(code.split("\n"))
def _assert_status_becomes_cancelled(self, job_id, manager):
i = 0
while True:
i += 1
status = manager.get_status(job_id)
if status in ["complete", "failed"]:
raise AssertionError("Expected cancelled status but got %s." % status)
elif status == "cancelled":
break
time.sleep(0.01)
if i > 100: # Wait one second
raise AssertionError("Job failed to cancel quickly.")
def minimal_app_for_managers():
""" Minimimal app description for consumption by managers.
"""
staging_directory = temp_directory_persist(prefix='minimal_app_')
rmtree(staging_directory)
authorizer = TestAuthorizer()
return Bunch(staging_directory=staging_directory,
authorizer=authorizer,
job_metrics=NullJobMetrics(),
dependency_manager=TestDependencyManager(),
object_store=object())
class NullJobMetrics:
def __init__(self):
self.default_job_instrumenter = NULL_JOB_INSTRUMENTER
@nottest
@contextmanager
def server_for_test_app(app):
try:
from paste.exceptions.errormiddleware import ErrorMiddleware
error_app = ErrorMiddleware(app.app, debug=True, error_log="errors.log")
except ImportError:
# paste.exceptions not available for Python 3.
error_app = app.app
create_kwds = {
}
if os.environ.get("PULSAR_TEST_FILE_SERVER_HOST"):
create_kwds["host"] = os.environ.get("PULSAR_TEST_FILE_SERVER_HOST")
server = StopableWSGIServer.create(error_app, **create_kwds)
try:
server.wait()
yield server
finally:
server.shutdown()
# There seem to be persistent transient problems with the testing, sleeping
# between creation of test app instances for greater than .5 seconds seems
# to help (async loop length in code is .5 so this maybe makes some sense?)
if "TEST_WEBAPP_POST_SHUTDOWN_SLEEP" in environ:
time.sleep(int(environ.get("TEST_WEBAPP_POST_SHUTDOWN_SLEEP")))
@nottest
@contextmanager
def test_pulsar_server(global_conf={}, app_conf={}, test_conf={}):
with test_pulsar_app(global_conf, app_conf, test_conf) as app:
with server_for_test_app(app) as test_pulsar_server:
yield test_pulsar_server
class RestartablePulsarAppProvider:
def __init__(self, global_conf={}, app_conf={}, test_conf={}, web=True):
self.staging_directory = temp_directory_persist(prefix='staging_')
self.global_conf = global_conf
self.app_conf = app_conf
self.test_conf = test_conf
self.web = web
@contextmanager
def new_app(self):
with test_pulsar_app(
self.global_conf,
self.app_conf,
self.test_conf,
staging_directory=self.staging_directory,
web=self.web,
) as app:
yield app
def cleanup(self):
try:
rmtree(self.staging_directory)
except Exception:
pass
@contextmanager
def restartable_pulsar_app_provider(**kwds):
try:
has_app = RestartablePulsarAppProvider(**kwds)
yield has_app
finally:
has_app.cleanup()
@nottest
@contextmanager
def test_pulsar_app(
global_conf={},
app_conf={},
test_conf={},
staging_directory=None,
web=True,
):
clean_staging_directory = False
if staging_directory is None:
staging_directory = temp_directory_persist(prefix='staging_')
clean_staging_directory = True
# Make staging directory world executable for run as user tests.
mode = stat(staging_directory).st_mode
chmod(staging_directory, mode | S_IXGRP | S_IXOTH)
cache_directory = temp_directory_persist(prefix='cache_')
app_conf["staging_directory"] = staging_directory
app_conf["file_cache_dir"] = cache_directory
app_conf["ensure_cleanup"] = True
app_conf["conda_auto_init"] = app_conf.get("conda_auto_init", False)
app_conf["conda_auto_install"] = app_conf.get("conda_auto_install", False)
try:
with _yield_app(global_conf, app_conf, test_conf, web) as app:
yield app
finally:
to_clean = [cache_directory]
if clean_staging_directory:
to_clean.append(staging_directory)
for directory in to_clean:
try:
rmtree(directory)
pass
except Exception:
pass
@contextmanager
def _yield_app(global_conf, app_conf, test_conf, web):
# Yield either wsgi webapp of the underlying pulsar
# app object if the web layer is not needed.
try:
if web:
from pulsar.web.wsgi import app_factory
app = app_factory(global_conf, **app_conf)
yield TestApp(app, **test_conf)
else:
from pulsar.main import load_app_configuration
from pulsar.core import PulsarApp
app_conf = load_app_configuration(local_conf=app_conf)
app = PulsarApp(**app_conf)
yield app
finally:
try:
shutdown_args = []
if not web:
shutdown_args.append(2)
app.shutdown(*shutdown_args)
except Exception:
pass
def skip_unless_environ(var):
if var in environ:
return lambda func: func
return skip("Environment variable %s not found, dependent test skipped." % var)
def skip_unless_executable(executable):
if _which(executable):
return lambda func: func
return skip("PATH doesn't contain executable %s" % executable)
def skip_unless_module(module):
available = True
try:
__import__(module)
except (ImportError, RuntimeError):
# drmaa raises RuntimeError if DRMAA_LIBRARY_PATH is unset
available = False
if available:
return lambda func: func
return skip("Module %s could not be loaded, dependent test skipped." % module)
def skip_unless_any_module(modules):
available = False
for module in modules:
try:
__import__(module)
except ImportError:
continue
available = True
if available:
return lambda func: func
return skip("None of the modules %s could be loaded, dependent test skipped." % modules)
def skip_if_none(value):
if value is not None:
return lambda func: func
return skip
def skip_without_drmaa(f):
return skip_if_none(drmaa.Session)(f)
def _which(program):
def is_exe(fpath):
return isfile(fpath) and access(fpath, X_OK)
fpath, fname = split(program)
if fpath:
if is_exe(program):
return program
else:
for path in environ["PATH"].split(pathsep):
path = path.strip('"')
exe_file = join(path, program)
if is_exe(exe_file):
return exe_file
return None
class TestAuthorizer:
def __init__(self):
self.authorization = TestAuthorization()
def get_authorization(self, tool_id):
return self.authorization
class JobFilesApp:
def __init__(self, root_directory=None, allow_multiple_downloads=False):
self.root_directory = root_directory
self.served_files = []
self.allow_multiple_downloads = allow_multiple_downloads
def __call__(self, environ, start_response):
req = webob.Request(environ)
params = req.params.mixed()
method = req.method
if method == "POST":
resp = self._post(req, params)
elif method == "GET":
resp = self._get(req, params)
else:
raise Exception("Unhandled request method %s" % method)
return resp(environ, start_response)
def _post(self, request, params):
path = params['path']
if not galaxy.util.in_directory(path, self.root_directory):
assert False, "{} not in {}".format(path, self.root_directory)
parent_directory = dirname(path)
if not exists(parent_directory):
makedirs(parent_directory)
pulsar.util.copy_to_path(params["file"].file, path)
return webob.Response(body='')
def _get(self, request, params):
path = params['path']
if path in self.served_files and not self.allow_multiple_downloads: # emulate Galaxy not allowing the same request twice...
raise Exception("Same file copied multiple times...")
if not galaxy.util.in_directory(path, self.root_directory):
assert False, "{} not in {}".format(path, self.root_directory)
self.served_files.append(path)
return file_response(path)
@contextmanager
def files_server(directory=None, allow_multiple_downloads=False):
if not directory:
with temp_directory() as directory:
app = TestApp(JobFilesApp(directory, allow_multiple_downloads=allow_multiple_downloads))
with server_for_test_app(app) as server:
yield server, directory
else:
app = TestApp(JobFilesApp(directory, allow_multiple_downloads=allow_multiple_downloads))
with server_for_test_app(app) as server:
yield server
def dump_other_threads():
# Utility for debugging threads that aren't dying during
# tests.
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
print(t.getName())
traceback.print_stack(sys._current_frames()[t.ident])
# Extracted from: https://github.com/python/cpython/blob/
# 937ee9e745d7ff3c2010b927903c0e2a83623324/Lib/test/support/__init__.py
class EnvironmentVarGuard:
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
| galaxyproject/pulsar | test/test_utils.py | Python | apache-2.0 | 16,551 | [
"Galaxy"
] | 44b6333673cfc52b498eeac2a68f7b02aac5c9591b828a2481a6221d1724db82 |
# pmx Copyright Notice
# ============================
#
# The pmx source code is copyrighted, but you can freely use and
# copy it as long as you don't change or remove any of the copyright
# notices.
#
# ----------------------------------------------------------------------
# pmx is Copyright (C) 2006-2013 by Daniel Seeliger
#
# All Rights Reserved
#
# Permission to use, copy, modify, distribute, and distribute modified
# versions of this software and its documentation for any purpose and
# without fee is hereby granted, provided that the above copyright
# notice appear in all copies and that both the copyright notice and
# this permission notice appear in supporting documentation, and that
# the name of Daniel Seeliger not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# DANIEL SEELIGER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL DANIEL SEELIGER BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ----------------------------------------------------------------------
import sys, os
from ctypes import cdll, c_int, c_float, c_double
from ctypes import c_char_p,POINTER,c_int,byref
c_real = c_float
rvec=c_real*3
matrix=c_real*3*3
class Trajectory:
def __init__(self, filename):
self.filename = filename
self.libgmx_path = self.__check_gmxlib()
self.libgmx = cdll.LoadLibrary(self.libgmx_path)
self.fp = None
self.natoms = c_int()
self.step = c_int()
self.time = c_real()
self.prec = c_real()
self.bOK = c_int()
self.box = matrix()
self.x = POINTER(rvec)()
self.__have_first_frame = False
self.open_xtc(filename)
self.read_first_xtc()
def __check_gmxlib(self):
p = os.environ.get('GMX_DLL')
if not p:
print >> sys.stderr, "pmx_Error_> Path to Gromacs shared libraries is not set (GMX_DLL)"
print >> sys.stderr, "pmx_Error_> Cannot load \"libgmx.so\""
sys.exit(1)
return os.path.join(p,'libgmx.so')
def open_xtc(self, filename):
self.libgmx.open_xtc.restype = POINTER(c_char_p)
self.fp = self.libgmx.open_xtc(filename, "r")
def close_xtc(self):
self.libgmx.close_xtc(self.fp)
def read_first_xtc(self):
ret = self.libgmx.read_first_xtc(self.fp,byref(self.natoms),byref(self.step),byref(self.time),self.box,byref(self.x),byref(self.prec),byref(self.bOK))
if not ret:
print >> sys.stderr, "pmx_Error_> in Trajectory.read_first_xtc()"
sys.exit(1)
self.__have_first_frame = True
def read_next_xtc(self):
if not self.__have_first_frame:
print >> sys.stderr, "pmx_Error_> First frame not read"
sys.exit(1)
ret = self.libgmx.read_next_xtc(self.fp,self.natoms,byref(self.step),byref(self.time),self.box,self.x,byref(self.prec),byref(self.bOK))
return ret
def update_box( self, box ):
for i in range(3):
for k in range(3):
box[i][k] = self.box[i][k]
def update_atoms( self, atom_sel ):
assert len(atom_sel.atoms) == self.natoms.value
for i, atom in enumerate(atom_sel.atoms):
if atom_sel.unity == 'A':
atom.x[0] = self.x[i][0]*10
atom.x[1] = self.x[i][1]*10
atom.x[2] = self.x[i][2]*10
else:
atom.x[0] = self.x[i][0]
atom.x[1] = self.x[i][1]
atom.x[2] = self.x[i][2]
def update( self, atom_sel ):
self.update_atoms(atom_sel )
self.update_box( atom_sel.box )
def get_box(self):
return [
[self.box[0][0], self.box[0][1], self.box[0][1]],
[self.box[1][0], self.box[1][1], self.box[1][1]],
[self.box[2][0], self.box[2][1], self.box[2][1]],
]
def get_natoms(self):
return self.natoms.value
def get_time(self):
return self.time.value
def get_step(self):
return self.step.value
def get_prec(self):
return self.prec.value
def get_x(self, unity='nm'):
x = []
for i in range(self.get_natoms):
if unity == 'A':
x.append( [self.x[i][0]*10, self.x[i][1]*10, self.x[i][2]*10 ])
else:
x.append( [self.x[i][0], self.x[i][1], self.x[i][2] ])
return x
def __str__(self):
s = '< Trajectory: %s | natoms = %d | step = %d | time = %8.2f >' % (self.filename, self.get_natoms(), self.get_step(), self.get_time())
return s
def __iter__(self):
return self
def next(self):
ok = self.read_next_xtc()
if not ok:
raise StopIteration
return self
| tectronics/pmx | pmx/xtc.py | Python | lgpl-3.0 | 5,230 | [
"Gromacs"
] | acdafc2d304defb630c524c16d42bee9da5f0db90c5d15556fbc81f8214e416d |
SU_ITEMS = [
'The Xiphos',
"Kraken's Cutlass",
'Saber of the Stormsail',
'Aeterna',
'The Defender',
'Aurumvorax',
'Durandal the Blazing Sword',
"Qarak's Will",
"Tylwulf's Betrayal",
'Blacktongue',
'Feltongue',
'Shadowsabre',
'Grotesque Bite',
'The Captain',
'Nimmenjuushin',
"Hephasto's Reaver",
'Elder Law',
'Malus Domestica',
'Dreamflange',
'Firequeen',
'Stormpick',
'Viper Mandate',
'Legion',
"Dead Lake's Lady",
'Thunderbane',
'Screaming Serpent',
"Night's Embrace",
'Lancea',
'Steel Pillar',
'Darkspite',
'The Pride of Caldeum',
'Bitter Harvest',
'Bone Scalpel',
'Frostneedle',
'Plunderbird',
"Meshif's Iron Parrot",
'Warshrike',
'Buzzbomb',
'Lacerator',
'Griefbringer',
"Vizjerei's Folly",
'Shamanka',
'Aerin Nexus',
"Xorine's Cane",
'Spire of Kehjan',
'Chillstring',
'Gjallarhorn',
'Stormstrike',
'Fleshstinger',
'Hellreach',
'Ratbane',
"Nihlathak's Bombard",
'Horned Hunter',
'Askarbydd',
'Luminescence',
"Serenthia's Scorn",
'Hellrain',
'Dreamweaver',
'Twin Terrors',
'Implement V.II',
'Legio Di Manes',
"Zular Khan's Tempest",
'Frysturgard',
'Kaskara of the Taan',
'Talic the Unwilling',
'Claw of the Spirit Wolf',
'Esubane',
"Skeld's Battlesong",
'Cleaver of Mirrors',
'Peace Warder',
"Ord Rekar's Testament",
"Yaggai's Sagaris",
'Faerie Pyre',
"Azgar's Mark",
'Guardian of Scosglen',
'Hanfod Tan',
'Iron Shard',
"Reaper's Hand",
"Karybdus' Descent",
'Dreamflayer',
'Mente Scura',
"Hwanin's Hwacha",
"Nymyr's Shadow",
'Grim Visor',
'Manastorm',
'Storm Focus',
"King's Desire",
"Mad King's Spine",
'Darkfeast',
'Herald of Pestilence',
'Chober Chaber',
'In Fero Salva',
'Black Sun Spear',
"Habacalva's Legacy",
'Ghostmoon',
'Bane of the Horadrim',
'Gravetalon',
"Leah's Vision",
'Starhawk',
"Screen of Viz-Jaq'Taar",
'Razorspine',
"Hratli's Craft",
'Alchemist Apron',
"Hepsheeba's Mantle",
'Candlewake',
'Skin of Kabraxis',
'Icetomb',
'Elemental Disciple',
'Scales of the Serpent',
'Robe of Steel',
'Silks of the Victor',
'Khazra Pouch',
'Lacuni Cowl',
'Shiverhood',
'Metalhead',
"Aidan's Lament",
'Eyes of Septumos',
'Gromotvod',
'Royal Circlet',
'Crown of the Black Rose',
"Trang-Oul's Breath",
'Gaze of the Dead',
'Witch Drum',
'Lightforge',
"Vaetia's Wall",
'Shield of Hakan II',
'Rainbow Fury',
'Stone Guardian',
'Sash of the Frozen Seas',
'The Endless Loop',
"Ashaera's Armor",
"Nor Tiraj's Wisdom",
"Lilith's Temptation",
"Blackjade's Legacy",
'Hellmouth',
"Dacana's Fist",
'Blind Anger',
'Hammerfist',
'Clockwork Slippers',
"Titan's Steps",
'Shrill Sacrament',
'Binding Treads',
"Rodeo's Tramplers",
"Karcheus' Temptation",
'Lionblood Crest',
'Crown of Arnazeus',
'Helepolis',
'Vision of the Furies',
'Impundulu',
'Golden Chariot',
'Silver Scorpion',
'The Doom Gong of Xiansai',
'Hexward',
'Banner of Bitter Winds',
'Shadowtwin',
'Bear Grin',
'Eagle Eye',
"Bul Kathos' Temper",
"Rodeo's Hide",
"Toraja's Champion",
'Coldhunger',
'Savage Hunter',
'Forest Defender',
'Phoenix Beak',
"Nature's Orphan",
'Bottled Will-o-Wisp',
'Dark Guardian',
"Hero's Fang",
'Grim Silhouette',
"Barghest's Howl",
'Hivemind',
'The Book of Kalen',
'Orationis Tenebris',
"Argentek's Tide",
"Astreon's Citadel",
'Ignis Demonia',
'The Ritualist',
'Dervish of Aranoch',
"Bartuc's Curse",
'Zann Prodigy',
'Armor of the Old Religion',
'Maleficence'
]
SSU_ITEMS = [
'The Eviscerator',
'Sherazade',
'Alnair',
'Sarandeshi Hellcaller',
'Astral Blade',
'Eternal Vigil',
"Azgar's Crystal",
'Shadowfang',
'Carsomyr',
'The Grandfather',
'Cold Blood',
'Headsman',
'The Colonel',
'Dux Infernum',
'Gotterdammerung',
'Fire Hydra',
'Hammer of Jholm',
'Solar Scion',
'The Redeemer',
'Battlemaiden',
'Vizjerei Fury',
'Moonfang',
'Heartseeker',
'The Retiarius',
'Stormchaser',
'Freakshow',
'Wizardspike',
'Black Razor',
'Drow Valor',
'Dark Nemesis',
'Piranha Swarm',
'Penumbra',
'Ophiophagus',
"Deckard Cain's Heirloom",
'Staff of Shadows',
'Spire of Sarnakyle',
'Absolute Zero',
"Valthek's Command",
'Etrayu',
'Signal Fire',
'Windforce',
'Buriza-Do Kyanon',
'Manticore Sting',
"Athulua's Wrath",
'Chasmstriker',
"Panthera's Bite",
"Titan's Revenge",
'Sagittarius',
'Mind Rake',
'Black Ice',
'Storm Blade',
"Gladiator's Rage",
'Berserrker',
'The Pathless',
'Colliding Fury',
'Thunder King of Sescheron',
'The King of Ents',
'Eye of the Storm',
'Mr. Painless',
'The Biting Frost',
'Soul Reaver',
'Shadowfiend',
'The Defiler',
'Heart of Fire',
'Advent of Hatred',
'Stygian Fury',
'Hand of Rathma',
'Daydreamer',
'Malevolence',
'Smokeless Fire',
'The Pyre',
'Adjudicator',
'The Angiris Star',
'Malleus Maleficarum',
'Venom Sting',
"Jaguar's Grasp",
'Holy Wars',
'Mistress of Pain',
'Glowing Vertigo',
"Atanna's Key",
"Galeona's Lash",
'Steel Punisher',
"Natalya's Deception",
'The Petulant',
'Scales of the Drake',
'Flameskin',
'Hide of the Basilisk',
"Arkaine's Valor",
'Wolverine Skin',
'Segnitia',
"Auriel's Robe",
'Strength Beyond Strength',
'Goetia Plate',
'Khazra Plate',
'Endless Pride',
"Skull of the Viz-Jaq'taar",
'Reapers',
'Veil of Steel',
'Undead Crown',
'Dark Pact',
'Soulsplitter',
'Radiance',
"Griffon's Eye",
'Idol of Rygnar',
'Black Masquerade',
'The Flying Saucer',
'Stormflyer',
'Madawc Val Narian',
'The Collector',
'Celestial Barrier',
'Lunar Eclipse',
'Dementia',
'Black Void',
'Champion of the Triune',
'Demonic Touch',
'Rogue Foresight',
'Facebreaker',
'Lorekeeper',
'Lamha Na Draoithe',
'Spirit Walker',
"Angel's Wrath",
"Akarat's Trek",
"Knight's Grace",
"Rakkis' Benediction",
'Rainbow Maiden',
'Deviant Crown',
'Asgardsreia',
'Danmaku',
'Crimson Dream',
'Hammerfall',
'Eternal Bodyguard',
"Zayl's Temptation",
'Warmonger',
'Voice of Arreat',
'Boarfrost',
"Lilith's Legion",
'Pawnstorm',
"The Ancients' Legacy",
"Greenwalker's Charge",
'Sky Spirit',
"Ranger's Disguise",
'Homunculus',
'Feardrinker',
"The Sentinel's Sorrow",
'Sinwar',
'Blessed Wrath',
'Veil of the Tainted Sun',
'Mark of the Que-Hegan',
'The Vanquisher',
'The Last Crusader',
'Herald of Zakarum',
'Angelhost',
'Cloak of the Outcast',
'Adamantine Guard',
'Frozen Heart',
"Lachdanan's Visage",
"Atanna Khan's Dress",
'Despondence'
]
SSSU_ITEMS = [
'Azurewrath',
'Wintry Majesty',
'The Point of No Return',
'The Searing Heat',
'The Worshipper',
'Arrogance',
'The Awakening',
"Tyrael's Might",
"Tinker's Gambit",
'Unveiling Eye',
'Desolation'
]
SETS = {
"Achilios' Eagle Eye": "Achilios' Wake",
"Achilios' Refuge": "Achilios' Wake",
"Achilios' Sledgehammer": "Achilios' Wake",
"Achilios' Stealth": "Achilios' Wake",
'Ancient Bronze': 'Tundra Walker',
'Angel of Death': "Astrogha's Moon",
'Apefoot': 'Testament of the Apes',
'Apehand': 'Testament of the Apes',
'Apeshoot': 'Testament of the Apes',
'Apeskin': 'Testament of the Apes',
'Apeskull': 'Testament of the Apes',
'Apprentice': "Oak's Teaching",
'Eye of Wisdom (Druid)': "Oak's Teaching",
'Archangel Dark Angel': 'Satanic Mantra',
'Arctic Shard': 'Wintertide',
'Eye of Wisdom (Sorceress)': 'Wintertide',
'Avalanche': 'Mount Arreat',
"Bear's Warding": 'Gathering of the Tribes',
'Bilefroth Skin': 'Henchmen Trophies',
"Blackskull's Horns": 'Henchmen Trophies',
"Bremm's Retribution": 'Curse of the Zakarum',
'Cauldron': 'The Mysteries',
"Celestia's Charge": "Celestia's Myth",
"Celestia's Passion": "Celestia's Myth",
"Celestia's Ribbon": "Celestia's Myth",
"Celestia's Wings": "Celestia's Myth",
'Charged Cloud': 'Thunderstorm',
'Eye of Wisdom (Amazon)': 'Thunderstorm',
"Cinadide's Anvil": "Cinadide's Craft",
"Cinadide's Bellows": "Cinadide's Craft",
"Cinadide's Bender": "Cinadide's Craft",
"Cinadide's Malus": "Cinadide's Craft",
'Darkling': "Astrogha's Moon",
'Dead Lens': 'Universal Law',
'Death Magnet': 'Battle Devices',
'Eye of Wisdom (Assassin)': 'Battle Devices',
'Destiny': 'Spirits of the Nephalem',
'Doomcloud Spine': 'Henchmen Trophies',
'Dyad': "Adria's Circle",
'Earth': 'Pantheon',
'Elemental Clash': 'Elemental Children',
'Elemental Fury': 'Elemental Children',
'Elemental Spirit': 'Elemental Children',
'Elemental Storm': 'Elemental Children',
'Emerald Cloud': 'Rainbow Warrior',
'Emerald Earth': 'Rainbow Warrior',
'Emerald Flower': 'Rainbow Warrior',
'Emerald Sky': 'Rainbow Warrior',
'Eruption': 'Mount Arreat',
'Fangskin Scales': 'Henchmen Trophies',
'Femur of the Prophet': 'Celestial Orchard',
'Fire': 'Pantheon',
'Frost': 'Pantheon',
"Geleb's Greed": 'Curse of the Zakarum',
"Giyua's Sacrum": 'Celestial Orchard',
'Grey Ranger': 'Tundra Walker',
"Hadriel's Avenger": "Hadriel's Lore",
"Hadriel's Presence": "Hadriel's Lore",
"Hadriel's Pure Heart": "Hadriel's Lore",
"Hadriel's Wings": "Hadriel's Lore",
'Hand of Inarius': 'Sanctuary',
"Hunter's Camouflage": 'Big Game Hunter',
"Hunter's Claw Fist": 'Big Game Hunter',
"Hunter's Skin": 'Big Game Hunter',
"Hunter's Trackless Step": 'Big Game Hunter',
'Hypersurface': 'Universal Law',
'Idol of Fortune': 'Trophy Hunter',
'Imperial Guard': 'Guardian of the Sightless Eye',
'Eye of Wisdom (Barbarian)': 'Guardian of the Sightless Eye',
"Imperius' Aura": "Imperius' Edict",
"Imperius' Radiance": "Imperius' Edict",
"Imperius' Sky Hammer": "Imperius' Edict",
"Imperius' Winged Feet": "Imperius' Edict",
'Killing Vector': 'Universal Law',
"Lazarus' Chasuble": 'Archbishop Lazarus',
"Lazarus' Lamen": 'Archbishop Lazarus',
"Lazarus' Surplice Cap": 'Archbishop Lazarus',
"Lazarus' Votive Lamp": 'Archbishop Lazarus',
'Lend Me Thy Light': 'Satanic Mantra',
"Lilith's Disguise": 'Sanctuary',
"Lone Wolf's Claws": "Lone Wolf's Path",
"Lone Wolf's Fur": "Lone Wolf's Path",
"Lone Wolf's Maul": "Lone Wolf's Path",
"Lone Wolf's Track": "Lone Wolf's Path",
"Maffer's Frenzy": 'Curse of the Zakarum',
"Malthael's Crown": "Malthael's Sanctuary",
"Malthael's Halo": "Malthael's Sanctuary",
"Malthael's Ward": "Malthael's Sanctuary",
"Malthael's Wrath": "Malthael's Sanctuary",
"Marchosias' Anger": "Marchosias' Essence",
"Marchosias' Evil Grin": "Marchosias' Essence",
"Marchosias' Hatred": "Marchosias' Essence",
"Marchosias' Might": "Marchosias' Essence",
'Monad': "Adria's Circle",
"Nature's Embrace": 'Caoi Dulra',
"Nature's Vigil": 'Caoi Dulra',
"Nature's Will": 'Caoi Dulra',
"Nature's Wrath": 'Caoi Dulra',
'Pentacle': 'The Mysteries',
'Quantum Bevel': 'Universal Law',
"Rathma's Chase": "Rathma's Empire",
"Rathma's Death Gaze": "Rathma's Empire",
"Rathma's Skeleton": "Rathma's Empire",
"Rathma's Tyranny": "Rathma's Empire",
'Ravine': 'Mount Arreat',
"Red Vex' Curse": "Red Vex' Embrace",
"Red Vex' Flayer": "Red Vex' Embrace",
"Red Vex' Idol": "Red Vex' Embrace",
"Red Vex' Sin": "Red Vex' Embrace",
'Sacred Charge': 'Spirits of the Nephalem',
'Sacred Circle': 'The Mysteries',
'Sacrificial Mind': 'The Offering',
'Eye of Wisdom (Necromancer)': 'The Offering',
"Scosglen's History": 'Last King of Scosglen',
"Scosglen's Legends": 'Last King of Scosglen',
"Scosglen's Myths": 'Last King of Scosglen',
"Scosglen's Tales": 'Last King of Scosglen',
"Serpent's Coil": 'The Snake Pit',
"Serpent's Fangs": 'The Snake Pit',
"Serpent's Tail": 'The Snake Pit',
"Serpent's Tongue": 'The Snake Pit',
"Snake's Battle Chant": 'Gathering of the Tribes',
'Snowstorm': 'Mount Arreat',
"Stareye's Claws": 'Henchmen Trophies',
'Steppe Sleeper': 'Tundra Walker',
'Tetrad': "Adria's Circle",
'The Catalyst': 'Vizjerei Dominion',
'The Coming Storm': 'Gathering of the Tribes',
'The Conqueror': 'Trophy Hunter',
'The Darkness': 'Vizjerei Dominion',
'The Dragon Grave': 'Celestial Orchard',
'The Guardian': 'Spirits of the Nephalem',
'The Hunger': 'Vizjerei Dominion',
'The Judgment': 'Vizjerei Dominion',
'The Presence': 'Vizjerei Dominion',
'The Protector': 'Spirits of the Nephalem',
'The Trickster': 'Celestial Orchard',
'The Witness': 'Armageddon',
'Eye of Wisdom (Paladin)': 'Armageddon',
"Through Death's Veil": 'Satanic Mantra',
'Till We Have Heaven In Sight': 'Satanic Mantra',
'Tower Defense': 'The Towerlord',
'Tower Denial': 'The Towerlord',
'Tower Rush': 'The Towerlord',
'Tower Wall': 'The Towerlord',
"Tracker's Effigy": 'Creed',
"Tracker's Runeward": 'Creed',
"Tracker's Stealth": 'Creed',
"Tracker's Strike": 'Creed',
'Triad': "Adria's Circle",
'Vale Hunter': 'Tundra Walker',
"Vasily's Crescent": "Vasily's Following",
"Vasily's Eclipse": "Vasily's Following",
"Vasily's Falling Star": "Vasily's Following",
"Vasily's Shepherd Moon": "Vasily's Following",
'Viper Skin': "Astrogha's Moon",
"Vizjun's Engine": "Vizjun's Devices",
"Vizjun's Foresight": "Vizjun's Devices",
"Vizjun's Monitor": "Vizjun's Devices",
"Vizjun's Science": "Vizjun's Devices",
"Warmage's Breath": 'The Warmage',
"Warmage's Fireblade": 'The Warmage',
"Warmage's Flameshroud": 'The Warmage',
"Warmage's Wake": 'The Warmage',
'Water': 'Pantheon',
'Witchblade': 'The Mysteries',
"Witchhunter's Crucifix": "Witchhunter's Attire",
"Witchhunter's Faith": "Witchhunter's Attire",
"Witchhunter's Ire": "Witchhunter's Attire",
"Witchhunter's Hood": "Witchhunter's Attire",
"Wolf's Fang": 'Gathering of the Tribes',
'Worldstone': 'Sanctuary',
"Wyand's Perfidy": 'Curse of the Zakarum',
"Yaerius' Alembic": "Yaerius' Grey Omen",
"Yaerius' Mediation": "Yaerius' Grey Omen",
"Yaerius' Simulacrum": "Yaerius' Grey Omen",
"Yaerius' Untruth": "Yaerius' Grey Omen",
"Zann Esu's Binding Circle": "Zann Esu's Secrets",
"Zann Esu's Charm": "Zann Esu's Secrets",
"Zann Esu's Robes": "Zann Esu's Secrets",
"Zann Esu's Rune Loop": "Zann Esu's Secrets",
"Zerae's Holy Wrath": "Zerae's Divinity",
"Zerae's Redemption": "Zerae's Divinity",
"Zerae's Refinement": "Zerae's Divinity",
"Zerae's Vindication": "Zerae's Divinity"
}
RINGS = [
'Der Nebelring',
'Ras Algethi',
'Sigil of Tur Dulra',
'Seal of the Nephalem Kings',
'Ouroboros',
'Witchcraft',
"Giant's Knuckle",
'Empyrean Glory',
'Bad Mood',
'Elemental Band',
'Adrenaline Rush',
'Black Hand Sigil',
'Signet of the Gladiator',
'Ring of Truth',
'Ring of Disengagement',
'Ring of Regha',
'Earth Rouser',
"Xorine's Ring",
'Ripstar',
'The Seal of Kharos',
'Empyrean Band',
'Bloodbond',
"Myokai's Path",
'Sigil of the 7 Deadly Sins',
"Assur's Bane"
]
AMULETS = [
'Black Dwarf',
'Klaatu Barada Nikto',
"Vizjerei's Necklace",
'Locket of Dreams',
'The Tesseract',
'Niradyahk',
'Death Ward',
'Dyers Eve',
"Quov Tsin's Talisman",
'Gallowlaugh',
'Beads of the Snake Queen',
'Lamen of the Archbishop',
'In For The Kill',
'Khanduran Royal Pendant',
'The Buried Hawk',
'Scarab of Death',
'Fren Slairea',
'Hangman',
'Angel Heart',
'The Dreamcatcher',
"Athulua's Oracle",
'Felblood',
'Teganze Pendant',
'Witchmoon',
"Jerhyn's Tawiz",
'Celestial Sigil'
]
JEWELS = [
'Heavenstone',
'Xepera Xeper Xeperu',
"Zann Esu's Stone",
'Suicide Note',
"Inarius' Rock",
'Arkenstone',
'Atomus',
'Jewel of Luck',
'Storm Shard',
'The Boulder',
'Demonstone Blood',
"Asheara's Cateye",
'Farsight Globe',
'Zakarum Stoning Rock',
'Katamari',
'Wishmaster'
]
QUIVERS = [
'Bag of Tricks',
"Kingsport's Signals",
'Locust Hive',
'Cindercone',
'Hanabigami',
'Lammergeier',
"Devil's Dance",
'Plague Gland',
"Larzuk's Bandolier",
'The Tranquilizer'
]
MOS = [
'Larzuk\'s Round Shot',
'Vizjun\'s Ball Bearing',
'Nor Tiraj\'s Flaming Sphere',
'The Demon Core',
'Uldyssian\'s Spirit',
'Orb of Annihilation',
'Warbringer',
'Ten Pin Striker',
'Wrathspirit',
'Idol of Stars',
'Nagapearl',
'The Moon Crystal',
'Auriel\'s Focus',
'Solitude',
'Crystal of Tears',
'Essence of Itherael',
'Periapt of Life',
'Lodestone',
'Kara\'s Trinket',
'Monsterball',
'Heart of Frost',
'Relic of Yaerius',
'The Endless Light',
'Explorer\'s Globe',
'The Perfect Sphere',
'Marksman\'s Eye',
'Zayl\'s Soul Orb',
'Farnham\'s Lost Marble',
'Eye of Malic',
'Apple of Discord',
'Elemental Ire',
'Empyrean Touch',
'Path of Brutality',
'Celestial Wind',
'Elemental Dominion',
'Sigil of Absolution',
'Breath of Thaumaturgy',
'Arcane Hunger'
]
CHARMS = [
'Sunstone of the Twin Seas',
'Sacred Sunstone',
'Shadow Vortex',
'Worldstone Orb',
'Caoi Dulra Fruit',
'Soulstone Shard',
'Eye of Divinity',
'Nexus Crystal',
'The Butcher\'s Tooth',
'Optical Detector',
'Laser Focus Crystal',
'Sacred Worldstone Key',
'Scroll of Kings',
'Visions of Akarat',
'Moon of the Spider',
'Horazon\'s Focus',
'The Black Road',
'Legacy of Blood',
'Fool\'s Gold',
'Spirit Trance Herb',
'Idol of Vanity',
'Azmodan\'s Heart',
'Weather Control',
'Silver Seal of Ureh',
'Crystalline Flame Medallion',
'Soul of Kabraxis',
'Eternal Bone Pile',
'The Book of Lies',
'Dragon Claw',
'The Ancient Repositories',
'Xazax\'s Illusion',
'Astrogha\'s Venom Stinger',
'The Sleep',
'Neutrality Pact',
'Glorious Book of Median',
'Rathma\'s Supremacy',
'Vial of Elder Blood',
'Six Angel Bag',
'Hammer of the Taan Judges',
'Zakarum\'s Ear',
'Sunstone of the Gods',
'Umbaru Treasure',
'Corrupted Wormhole',
'Demonsbane',
'Lylia\'s Curse',
'Cold Fusion Schematics',
'Spirit of Creation'
]
TROPHIES = [
'Archbishop Lazarus Trophy',
'Astrogha Trophy',
'Banisher of Light Trophy',
'Duncraig Trophy',
'Eve of Destruction Trophy',
'Ghosts of Old Bremmtown Trophy',
'Heart of Sin Trophy',
'Judgement Day Trophy',
'Kingdom of Shadow Trophy',
'Legacy of Blood Trophy',
'Lord of Lies Trophy',
"Nephalem's Sacrifice Trophy",
'Quov Tsin Trophy',
'The Triune Trophy',
'The Veiled Prophet Trophy',
'The Void Trophy',
'Tran Athulua Trophy',
'Viz-jun Trophy',
'Xazax Trophy',
'Yshari Sanctum Trophy',
'Archbishop Lazarus Fragment',
'Astrogha Fragment',
'Banisher of Light Fragment',
'Duncraig Fragment',
'Eve of Destruction Fragment',
'Ghosts of Old Bremmtown Fragment',
'Heart of Sin Fragment',
'Judgement Day Fragment',
'Kingdom of Shadow Fragment',
'Legacy of Blood Fragment',
'Lord of Lies Fragment',
"Nephalem's Sacrifice Fragment",
'Quov Tsin Fragment',
'The Triune Fragment',
'The Veiled Prophet Fragment',
'The Void Fragment',
'Tran Athulua Fragment',
'Viz-jun Fragment',
'Xazax Fragment',
'Yshari Sanctum Fragment'
]
SHRINE_VESSELS = [
'Creepy Vessel',
'Sacred Vessel',
'Quiet Vessel',
'Hidden Vessel',
'Tainted Vessel',
'Ornate Vessel',
'Fascinating Vessel',
'Intimidating Vessel',
'Weird Vessel',
'Trinity Vessel',
'Spiritual Vessel',
'Eerie Vessel',
'Enchanted Vessel',
'Shimmering Vessel',
'Magical Vessel',
'Abandoned Vessel'
]
SHRINES = [
'Creepy Shrine',
'Sacred Shrine',
'Quiet Shrine',
'Hidden Shrine',
'Tainted Shrine',
'Ornate Shrine',
'Fascinating Shrine',
'Intimidating Shrine',
'Weird Shrine',
'Trinity Shrine',
'Spiritual Shrine',
'Eerie Shrine',
'Enchanted Shrine',
'Shimmering Shrine',
'Magical Shrine',
'Abandoned Shrine'
]
VESSEL_TO_SHRINE = {
'Creepy Vessel': 'Creepy Shrine',
'Sacred Vessel': 'Sacred Shrine',
'Quiet Vessel': 'Quiet Shrine',
'Hidden Vessel': 'Hidden Shrine',
'Tainted Vessel': 'Tainted Shrine',
'Ornate Vessel': 'Ornate Shrine',
'Fascinating Vessel': 'Fascinating Shrine',
'Intimidating Vessel': 'Intimidating Shrine',
'Weird Vessel': 'Weird Shrine',
'Trinity Vessel': 'Trinity Shrine',
'Spiritual Vessel': 'Spiritual Shrine',
'Eerie Vessel': 'Eerie Shrine',
'Enchanted Vessel': 'Enchanted Shrine',
'Shimmering Vessel': 'Shimmering Shrine',
'Magical Vessel': 'Magical Shrine',
'Abandoned Vessel': 'Abandoned Shrine'
}
IGNORED_ITEMS = [
'Apple',
'Horadric Cube',
'Minor Healing Potion',
'Light Healing Potion',
'Healing Potion',
'Greater Healing Potion',
'Super Healing Potion',
'Minor Mana Potion',
'Light Mana Potion',
'Mana Potion',
'Greater Mana Potion',
'Super Mana Potion',
'Rejuvenation Potion',
'Full Rejuvenation Potion',
'Ring',
'Amulet',
'Catalyst of Disenchantment',
'Catalyst of Learning',
'Signet of Gold',
'Greater Signet of Gold',
'Large Axe (1)',
'Quilted Armor (1)',
'Buckler (1)',
'Hand Axe (1)',
'Javelin (1)',
'Katar (1)',
'Short Staff (1)',
'Short Sword (1)',
'Wand (1)'
]
RUNEWORDS = [
'Dawn',
'Shark',
'Enyo',
'Azrael',
'Void',
'Oblivion',
'Berith',
'Gehenna',
'Triune',
'Adramelech',
'Archangel',
'Shattered Stone',
'Banshee',
'Amok',
'Shockwave',
'Hornet',
'Balance',
'Nyx',
'Hive',
'Phantom',
'Curse',
'Typhaon',
'Stardust',
'Dead Man\'s Breath',
'Thammuz',
'Python',
'Cyclops',
'Tynged',
'Tartarus',
'Araboth',
'Tombstone',
'Faseroptic',
'Urada',
'Minefield',
'Instinct',
'Sabertooth',
'Midas\' Touch',
'Myrmidon',
'Asymptomatic',
'Endor',
'Atlacamani',
'Seed of Conflict',
'Solarion',
'King\'s Blood',
'Rattus',
'Malicicle',
'Demhe',
'Arachnophilia',
'Calypso',
'Neurogenesis',
'Pax Mystica',
'Hadad',
'Summanus',
'Akhenaten',
'Ljosalf',
'Aes Dana',
'Elverfolk',
'Erilaz',
'Zodiac',
'Kyrie',
'Hand of Frost',
'Prophecy',
'Bane',
'Judas',
'Path',
'Jokulmorder',
'Gabriel',
'Durga',
'Galdr',
'Apostasy',
'Oris\' Herald',
'Raid',
'Hastata',
'Haste',
'Hastilude',
'Hastur',
'Hastin',
'Fleshbane',
'Patriot',
'Chrysopelea',
'Dajjal',
'Quantum',
'Myriad',
'Naiad',
'Burlesque',
'Tau',
'Dead Star',
'Cheetah',
'Fennec',
'Manitou',
'Raptor',
'Thundercloud',
'Kodiak',
'Dragon Seed',
'Aspect',
'Corsair',
'Ice Breaker',
'Eris',
'Colliding Worlds',
'Manta',
'Cecaelia',
'Arnazeus Pinnacle',
'Aegina',
'Titanomachia',
'Sankara',
'Rusalka',
'Evanescence',
'Freybug',
'Flame',
'Scar',
'Firefly',
'Trishula',
'Herfjotur',
'Stalactite',
'Specter',
'Askari Device',
'Shedim',
'Carabosse',
'Fiend',
'Riot',
'Judge',
'Choronzon',
'Anarchy',
'Hail',
'Nahemah',
'Joker',
'Perfection',
'Khattak',
'Shamo',
'Hieros Gamos',
'Lataif-as-Sitta',
'Oniwaka',
'Blooddancer',
'Ram',
'Essus',
'Thunderbird',
'Gharaniq',
'Savitr',
'Skarn',
'Anak',
'Khan',
'Lahmu',
'Kahless',
'Gilgamesh',
'El\'druin',
'Wolfsangel',
'Soldier of Light',
'Peacock',
'Lynx',
'Sylvanshine',
'Tarqeq',
'Cernunnos',
'Raudna',
'Dirge',
'Amphibian',
'Ocean',
'Samhain',
'Augur',
'Lincos',
'Laadan',
'Lojban',
'Loxian',
'Hermanubis',
'Fiacla-Gear\'s Weathervane',
'Black Cat',
'Mantra',
'Lemuria',
'Chthon',
'Eventide',
'Inti',
'Leviathan',
'Tzeentch',
'Bartuc\'s Eye',
'Loa',
'Jinx',
'Misery',
'Seid',
'Roc',
'Mkodos',
'Cut',
'Deep Water',
'Eurynome',
'Ladon',
'Hali',
'Dagda',
'Santa Compana',
'Styx',
'Xazax',
'Koan',
'Ghoul',
'Dead Ringer',
'Ngozi',
'Semhazai',
'Krypteia',
'Doomguard',
'Genie',
'Atlantis',
'Aether',
'Crucible',
'Kronos',
'Rhea',
'Force of Mind',
'Ensi',
'Nehushtan',
'Resheph',
'Taqiyya',
'Orisha',
'Rex Deus',
'Jaguar',
'Hadriel\'s Protector',
'Jihad',
'Intifada',
'Lammasu',
'Quaoar',
'Takfir',
'Brahman',
'Oriflamme',
'Skilt en Vriend',
'Judgement',
'Circe',
'Ker',
'Vertigo',
'Gravastar',
'Kabbalah',
'Hestia',
'Kallisti',
'Sauron',
'Helgrotha',
'Inanna',
'Brocken',
'Grace',
'Khany',
'Shaula',
'Sway of the Stars',
'Dark Exile',
'Rebel',
'Lumen Arcana',
'Paaliaq',
'Victory\n(Median XL - 6 years)',
'Thelema',
'Cathedral',
'Mirage',
'Dragonheart',
'Erawan',
'Unity',
'Linga Sharira',
'Pygmalion',
'Eternal\nMedian 2005-2012\nThanks everyone!',
'Hellfire Plate',
'Summit',
'Cannonball',
'Ra',
'Alchemy',
'Dreadlord',
'Bogspitter',
'Eidolon',
'Amanita',
'Thundercap',
'Checkmate',
'Sphinx',
'Lily',
'Eulenspiegel',
'Wintermute',
'Indigo',
'Rathma\'s Blessing',
'Geas',
'Pharaoh',
'Nomad',
'Goddess',
'Kodo',
'Wall of Fire',
'Avatar',
'Derweze',
'Khalim\'s Protector',
'Rainbow',
'Prodigy',
'Fuse',
'Pulsa Dinura',
'Truce',
'Dyaus Pita',
'Ahriman',
'Nero',
'Lysra',
'Iblis',
'Mercy',
'Brawl',
'Kali',
'Aiwass',
'Skald',
'Icarus',
'Drekavac',
'Snowsquall',
'Retribution',
'Knave',
'Epicenter',
'Outlaw',
'Ginfaxi',
'Craton',
'Megalith',
'Nephilim',
'Hibagon',
'Riptide',
'Wind Runner',
'Stata Mater',
'Bona Dea',
'Amaterasu',
'Siegfried',
'Cambion',
'Lohengrin',
'Unicorn',
'Shaheeda',
'Eaglehorn Mask',
'Edda',
'Lion',
'Eloi',
'Nix',
'Ea',
'Eos',
'Heart of Skovos',
'Afrit',
'Rahab',
'Iambe',
'Ligeia',
'Dar-Al-Harb',
'Scorched Earth',
'Orchid',
'Shadowsteps',
'Algiz',
'Nasrudin',
'Ekam',
'Fawkes',
'Sagarmatha',
'Morthwyrtha',
'Wodziwob',
'Greisen',
'Force Shock',
'Enmerkar',
'Warpath',
'Gauntlet',
'Huracan',
'E-Engur-A',
'Tonatiuh',
'Triune\'s Blessing',
'Aegipan',
'Nezha',
'Natha',
'Norma',
'Grove',
'Slyph',
'Nigra',
'Nature\'s Grace',
'Cube',
'Warlock',
'Nahual',
'Lorelei',
'Quimbanda',
'Wyrm',
'Hecatomb',
'Twisted Mind',
'Ilmatar',
'Hierodule',
'Crusade',
'Surya',
'Malakbel',
'Rotundjere',
'Battle Rage',
'Asmodai',
'Sangreal',
'Zohar',
'Asclepion',
'Amaymon',
'Myrrhbearer',
'Lightwell',
'Lyrannikin',
'Kundalini',
'Demeter',
'Curandera',
'Astarte',
'Oracle',
'Vanity',
'Comaetho',
'Venefica',
'Cassilda',
'Space Dementia',
'Dawn',
'Shark',
'Enyo',
'Azrael',
'Void',
'Banshee',
'Amok',
'Shockwave',
'Hornet',
'Balance',
'Nyx',
'Hive',
'Thammuz',
'Python',
'Cyclops',
'Instinct',
'Myrmidon',
'Endor',
'King\'s Blood',
'Demhe',
'Pax Mystica',
'Hadad',
'Summanus',
'Akhenaten',
'Ljosalf',
'Prophecy',
'Bane',
'Judas',
'Path',
'Raid',
'Hastata',
'Haste',
'Patriot',
'Chrysopelea',
'Dajjal',
'Quantum',
'Cheetah',
'Fennec',
'Manitou',
'Aspect',
'Corsair',
'Ice Breaker',
'Aegina',
'Titanomachia',
'Scar',
'Firefly',
'Shedim',
'Carabosse',
'Fiend',
'Riot',
'Judge',
'Khattak',
'Shamo',
'Ram',
'Essus',
'Thunderbird',
'Gharaniq',
'Peacock',
'Lynx',
'Sylvanshine',
'Ocean',
'Samhain',
'Augur',
'Lincos',
'Black Cat',
'Mantra',
'Lemuria',
'Chthon',
'Loa',
'Jinx',
'Misery',
'Deep Water',
'Eurynome',
'Ladon',
'Koan',
'Ghoul',
'Genie',
'Atlantis',
'Aether',
'Ensi',
'Nehushtan',
'Resheph',
'Jihad',
'Intifada',
'Lammasu',
'Quaoar',
'Circe',
'Ker',
'Vertigo',
'Gravastar',
'Inanna',
'Brocken',
'Rebel',
'Lumen Arcana',
'Paaliaq',
'Victory\n(Median XL - 6 years)',
'Thelema',
'Cathedral',
'Mirage',
'Natasha\'s Legacy',
'Summit',
'Cannonball',
'Ra',
'Alchemy',
'Sphinx',
'Lily',
'Geas',
'Pharaoh',
'Nomad',
'Goddess',
'Rainbow',
'Prodigy',
'Fuse',
'Iblis',
'Mercy',
'Brawl',
'Kali',
'Aiwass',
'Epicenter',
'Outlaw',
'Ginfaxi',
'Craton',
'Stata Mater',
'Bona Dea',
'Amaterasu',
'Siegfried',
'Edda',
'Afrit',
'Rahab',
'Iambe',
'Ligeia',
'Algiz',
'Nasrudin',
'Ekam',
'Fawkes',
'Enmerkar',
'Warpath',
'Aegipan',
'Nezha',
'Natha',
'Cube',
'Warlock',
'Ilmatar',
'Hierodule',
'Asmodai',
'Sangreal',
'Zohar',
'Lyrannikin',
'Kundalini',
'Demeter',
'Curandera'
]
MISC_ITEMS = [
'Belladonna Extract',
'Small Cycle',
'Medium Cycle',
'Large Cycle',
'Sunless Crystal Bird',
'Deity\'s Bow',
'Mystic Dye',
'Dark Summoning',
'Ancient Book',
'Emblem of Destruction',
'Emblem of Hatred',
'Emblem of Lies',
'Emblem of Pain',
'Emblem of Suffering',
'Emblem of Terror',
'Oil of Conjuration',
'Oil of Enhancement',
'Oil of Craft',
'Oil of Renewal',
'Oil of Luck',
'Oil of Disjunction',
'Mephisto\'s Soulstone',
'Dream Fragment (1)',
'Dream Fragment (2)',
'Dream Fragment (3)',
'Dream Fragment (4)',
'Dream Fragment (5)',
'Star Chart (1)',
'Star Chart (2)',
'Star Chart (3)',
'Star Chart (4)',
'Key',
'Mark of Infusion',
'Mystic Orb'
]
DEFAULT_TRADE_POST_TEMPLATE = '''[color=#FFFF00][size=26]Selling[/size][/color]
[hr][/hr]
[color=#00FF00][size=24]Sets[/size][/color]
[hr][/hr]
{sets}
[color=#804000][size=24]SU[/size][/color]
[hr][/hr]
{su}
[color=#804000][size=24]SSU[/size][/color]
[hr][/hr]
{ssu}
[color=#804000][size=24]SSSU[/size][/color]
[hr][/hr]
{sssu}
[color=#808080][size=24]Runewords[/size][/color]
[hr][/hr]
{rws}
[color=#CD853F][size=24]Crafted[/size][/color]
[hr][/hr]
{crafted}
[color=#804000][size=24]Rings/Amulets/Quivers/MOs/Jewels[/size][/color]
[hr][/hr]
{rings}
{amulets}
{quivers}
{mos}
{jewels}
[color=#808080][size=24]Bases[/size][/color]
[hr][/hr]
{rw_bases}
{shrine_bases}
[color=#FF7F50][size=24]Charms[/size][/color]
[hr][/hr]
{charms}
[color=#FFA500][size=24]Trophies[/size][/color]
[hr][/hr]
{trophies}
[color=#FFFFFF][size=24]Misc[/size][/color]
[hr][/hr]
{shrines}
{misc}
'''
| baiumbg/useless-cogs | mxl/constants.py | Python | mit | 31,567 | [
"CRYSTAL",
"Firefly",
"Jaguar",
"TINKER"
] | 67de4deb3c27b94c28b889321f81f3f9191306a4d4086a037b49cb2bffac83e6 |
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
import numpy as np
import os
import sys
from galsim_test_helpers import *
imgdir = os.path.join(".", "SBProfile_comparison_images") # Directory containing the reference
# images.
try:
import galsim
except ImportError:
path, filename = os.path.split(__file__)
sys.path.append(os.path.abspath(os.path.join(path, "..")))
import galsim
# Some values to use in multiple tests below:
test_hlr = 1.8
test_fwhm = 1.8
test_sigma = 1.8
test_sersic_n = [1.5, 2.5, 4, -4] # -4 means use explicit DeVauc rather than n=4
test_scale = [1.8, 0.05, 0.002, 0.002]
test_sersic_trunc = [0., 8.5]
test_flux = 1.8
# These are the default GSParams used when unspecified. We'll check that specifying
# these explicitly produces the same results.
default_params = galsim.GSParams(
minimum_fft_size = 128,
maximum_fft_size = 4096,
alias_threshold = 5.e-3,
maxk_threshold = 1.e-3,
kvalue_accuracy = 1.e-5,
xvalue_accuracy = 1.e-5,
shoot_accuracy = 1.e-5,
realspace_relerr = 1.e-3,
realspace_abserr = 1.e-6,
integration_relerr = 1.e-5,
integration_abserr = 1.e-7)
def test_gaussian():
"""Test the generation of a specific Gaussian profile using SBProfile against a known result.
"""
import time
t1 = time.time()
mySBP = galsim.SBGaussian(flux=1, sigma=1)
savedImg = galsim.fits.read(os.path.join(imgdir, "gauss_1.fits"))
savedImg.setCenter(0,0)
dx = 0.2
myImg = galsim.ImageF(savedImg.bounds, scale=dx)
myImg.setCenter(0,0)
tot = mySBP.draw(myImg.view())
printval(myImg, savedImg)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Gaussian profile disagrees with expected result")
np.testing.assert_almost_equal(
myImg.array.sum() *dx**2, tot, 5,
err_msg="Gaussian profile SBProfile::draw returned wrong tot")
# Repeat with the GSObject version of this:
gauss = galsim.Gaussian(flux=1, sigma=1)
# Reference images were made with old centering, which is equivalent to use_true_center=False.
myImg = gauss.draw(myImg, dx=dx, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Gaussian disagrees with expected result")
np.testing.assert_almost_equal(
myImg.array.sum() *dx**2, myImg.added_flux, 5,
err_msg="Gaussian profile GSObject::draw returned wrong added_flux")
# Check a non-square image
print myImg.bounds
recImg = galsim.ImageF(45,66)
recImg.setCenter(0,0)
recImg = gauss.draw(recImg, dx=dx, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
recImg[savedImg.bounds].array, savedImg.array, 5,
err_msg="Drawing Gaussian on non-square image disagrees with expected result")
np.testing.assert_almost_equal(
recImg.array.sum() *dx**2, recImg.added_flux, 5,
err_msg="Gaussian profile GSObject::draw on non-square image returned wrong added_flux")
# Check with default_params
gauss = galsim.Gaussian(flux=1, sigma=1, gsparams=default_params)
gauss.draw(myImg,dx=0.2, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Gaussian with default_params disagrees with expected result")
gauss = galsim.Gaussian(flux=1, sigma=1, gsparams=galsim.GSParams())
gauss.draw(myImg,dx=0.2, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Gaussian with GSParams() disagrees with expected result")
# Test photon shooting.
do_shoot(gauss,myImg,"Gaussian")
# Test kvalues
do_kvalue(gauss,"Gaussian")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_gaussian_properties():
"""Test some basic properties of the SBGaussian profile.
"""
import time
t1 = time.time()
gauss = galsim.Gaussian(flux=test_flux, sigma=test_sigma)
# Check that we are centered on (0, 0)
cen = galsim.PositionD(0, 0)
np.testing.assert_equal(gauss.centroid(), cen)
# Check Fourier properties
np.testing.assert_almost_equal(gauss.maxK(), 3.7169221888498383 / test_sigma)
np.testing.assert_almost_equal(gauss.stepK(), 0.533644625664 / test_sigma)
np.testing.assert_equal(gauss.kValue(cen), (1+0j) * test_flux)
import math
np.testing.assert_almost_equal(gauss.xValue(cen), 1./(2.*math.pi) * test_flux / test_sigma**2)
# Check input flux vs output flux
for inFlux in np.logspace(-2, 2, 10):
gauss = galsim.Gaussian(flux=inFlux, sigma=2.)
outFlux = gauss.getFlux()
np.testing.assert_almost_equal(outFlux, inFlux)
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_gaussian_radii():
"""Test initialization of Gaussian with different types of radius specification.
"""
import time
t1 = time.time()
import math
# Test constructor using half-light-radius:
test_gal = galsim.Gaussian(flux = 1., half_light_radius = test_hlr)
hlr_sum = radial_integrate(test_gal, 0., test_hlr, 1.e-4)
print 'hlr_sum = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in Gaussian constructor with half-light radius")
# test that getFWHM() method provides correct FWHM
got_fwhm = test_gal.getFWHM()
test_fwhm_ratio = (test_gal.xValue(galsim.PositionD(.5 * got_fwhm, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'fwhm ratio = ', test_fwhm_ratio
np.testing.assert_almost_equal(
test_fwhm_ratio, 0.5, decimal=4,
err_msg="Error in FWHM for Gaussian initialized with half-light radius")
# test that getSigma() method provides correct sigma
got_sigma = test_gal.getSigma()
test_sigma_ratio = (test_gal.xValue(galsim.PositionD(got_sigma, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'sigma ratio = ', test_sigma_ratio
np.testing.assert_almost_equal(
test_sigma_ratio, math.exp(-0.5), decimal=4,
err_msg="Error in sigma for Gaussian initialized with half-light radius")
# Test constructor using sigma:
test_gal = galsim.Gaussian(flux = 1., sigma = test_sigma)
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(test_sigma,0)) / center
print 'sigma ratio = ',ratio
np.testing.assert_almost_equal(
ratio, np.exp(-0.5), decimal=4,
err_msg="Error in Gaussian constructor with sigma")
# then test that image indeed has the correct HLR properties when radially integrated
got_hlr = test_gal.getHalfLightRadius()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum (profile initialized with sigma) = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in half light radius for Gaussian initialized with sigma.")
# test that getFWHM() method provides correct FWHM
got_fwhm = test_gal.getFWHM()
test_fwhm_ratio = (test_gal.xValue(galsim.PositionD(.5 * got_fwhm, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'fwhm ratio = ', test_fwhm_ratio
np.testing.assert_almost_equal(
test_fwhm_ratio, 0.5, decimal=4,
err_msg="Error in FWHM for Gaussian initialized with sigma.")
# Test constructor using FWHM:
test_gal = galsim.Gaussian(flux = 1., fwhm = test_fwhm)
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(test_fwhm/2.,0)) / center
print 'fwhm ratio = ',ratio
np.testing.assert_almost_equal(
ratio, 0.5, decimal=4,
err_msg="Error in Gaussian constructor with fwhm")
# then test that image indeed has the correct HLR properties when radially integrated
got_hlr = test_gal.getHalfLightRadius()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum (profile initialized with fwhm) = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in half light radius for Gaussian initialized with FWHM.")
# test that getSigma() method provides correct sigma
got_sigma = test_gal.getSigma()
test_sigma_ratio = (test_gal.xValue(galsim.PositionD(got_sigma, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'sigma ratio = ', test_sigma_ratio
np.testing.assert_almost_equal(
test_sigma_ratio, math.exp(-0.5), decimal=4,
err_msg="Error in sigma for Gaussian initialized with FWHM.")
# Check that the getters don't work after modifying the original.
# Note: I test all the modifiers here. For the rest of the profile types, I'll
# just confirm that it is true of applyShear. I don't think that has any chance
# of missing anything.
test_gal_flux1 = test_gal.copy()
print 'fwhm = ',test_gal_flux1.getFWHM()
print 'hlr = ',test_gal_flux1.getHalfLightRadius()
print 'sigma = ',test_gal_flux1.getSigma()
test_gal_flux1.setFlux(3.)
try:
np.testing.assert_raises(AttributeError, getattr, test_gal_flux1, "getFWHM")
np.testing.assert_raises(AttributeError, getattr, test_gal_flux1, "getHalfLightRadius")
np.testing.assert_raises(AttributeError, getattr, test_gal_flux1, "getSigma")
except ImportError:
# assert_raises requires nose, which we don't want to force people to install.
# So if they are running this without nose, we just skip these tests.
pass
test_gal_flux2 = test_gal.copy()
print 'fwhm = ',test_gal_flux2.getFWHM()
print 'hlr = ',test_gal_flux2.getHalfLightRadius()
print 'sigma = ',test_gal_flux2.getSigma()
test_gal_flux2.setFlux(3.)
try:
np.testing.assert_raises(AttributeError, getattr, test_gal_flux2, "getFWHM")
np.testing.assert_raises(AttributeError, getattr, test_gal_flux2, "getHalfLightRadius")
np.testing.assert_raises(AttributeError, getattr, test_gal_flux2, "getSigma")
except ImportError:
pass
test_gal_shear = test_gal.copy()
print 'fwhm = ',test_gal_shear.getFWHM()
print 'hlr = ',test_gal_shear.getHalfLightRadius()
print 'sigma = ',test_gal_shear.getSigma()
test_gal_shear.applyShear(g1=0.3, g2=0.1)
try:
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getFWHM")
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getHalfLightRadius")
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getSigma")
except ImportError:
pass
test_gal_rot = test_gal.copy()
print 'fwhm = ',test_gal_rot.getFWHM()
print 'hlr = ',test_gal_rot.getHalfLightRadius()
print 'sigma = ',test_gal_rot.getSigma()
test_gal_rot.applyRotation(theta = 0.5 * galsim.radians)
try:
np.testing.assert_raises(AttributeError, getattr, test_gal_rot, "getFWHM")
np.testing.assert_raises(AttributeError, getattr, test_gal_rot, "getHalfLightRadius")
np.testing.assert_raises(AttributeError, getattr, test_gal_rot, "getSigma")
except ImportError:
pass
test_gal_shift = test_gal.copy()
print 'fwhm = ',test_gal_shift.getFWHM()
print 'hlr = ',test_gal_shift.getHalfLightRadius()
print 'sigma = ',test_gal_shift.getSigma()
test_gal_shift.applyShift(dx=0.11, dy=0.04)
try:
np.testing.assert_raises(AttributeError, getattr, test_gal_shift, "getFWHM")
np.testing.assert_raises(AttributeError, getattr, test_gal_shift, "getHalfLightRadius")
np.testing.assert_raises(AttributeError, getattr, test_gal_shift, "getSigma")
except ImportError:
pass
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_gaussian_flux_scaling():
"""Test flux scaling for Gaussian.
"""
import time
t1 = time.time()
# decimal point to go to for parameter value comparisons
param_decimal = 12
# init with sigma and flux only (should be ok given last tests)
obj = galsim.Gaussian(sigma=test_sigma, flux=test_flux)
obj *= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __imul__.")
obj = galsim.Gaussian(sigma=test_sigma, flux=test_flux)
obj /= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __idiv__.")
obj = galsim.Gaussian(sigma=test_sigma, flux=test_flux)
obj2 = obj * 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (result).")
obj = galsim.Gaussian(sigma=test_sigma, flux=test_flux)
obj2 = 2. * obj
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (result).")
obj = galsim.Gaussian(sigma=test_sigma, flux=test_flux)
obj2 = obj / 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (result).")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_exponential():
"""Test the generation of a specific exp profile using SBProfile against a known result.
"""
import time
t1 = time.time()
re = 1.0
# Note the factor below should really be 1.6783469900166605, but the value of 1.67839 is
# retained here as it was used by SBParse to generate the original known result (this changed
# in commit b77eb05ab42ecd31bc8ca03f1c0ae4ee0bc0a78b.
# The value of this test for regression purposes is not harmed by retaining the old scaling, it
# just means that the half light radius chosen for the test is not really 1, but 0.999974...
r0 = re/1.67839
mySBP = galsim.SBExponential(flux=1., scale_radius=r0)
savedImg = galsim.fits.read(os.path.join(imgdir, "exp_1.fits"))
myImg = galsim.ImageF(savedImg.bounds, scale=0.2)
mySBP.draw(myImg.view())
printval(myImg, savedImg)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Exponential profile disagrees with expected result")
# Repeat with the GSObject version of this:
expon = galsim.Exponential(flux=1., scale_radius=r0)
expon.draw(myImg,dx=0.2, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Exponential disagrees with expected result")
# Check with default_params
expon = galsim.Exponential(flux=1., scale_radius=r0, gsparams=default_params)
expon.draw(myImg,dx=0.2, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Exponential with default_params disagrees with expected result")
expon = galsim.Exponential(flux=1., scale_radius=r0, gsparams=galsim.GSParams())
expon.draw(myImg,dx=0.2, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Exponential with GSParams() disagrees with expected result")
# Test photon shooting.
do_shoot(expon,myImg,"Exponential")
# Test kvalues
do_kvalue(expon,"Exponential")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_exponential_properties():
"""Test some basic properties of the SBExponential profile.
"""
import time
t1 = time.time()
expon = galsim.Exponential(flux=test_flux, scale_radius=test_scale[0])
# Check that we are centered on (0, 0)
cen = galsim.PositionD(0, 0)
np.testing.assert_equal(expon.centroid(), cen)
# Check Fourier properties
np.testing.assert_almost_equal(expon.maxK(), 10 / test_scale[0])
np.testing.assert_almost_equal(expon.stepK(), 0.37436747851 / test_scale[0])
np.testing.assert_equal(expon.kValue(cen), (1+0j) * test_flux)
import math
np.testing.assert_almost_equal(expon.xValue(cen), 1./(2.*math.pi)*test_flux/test_scale[0]**2)
# Check input flux vs output flux
for inFlux in np.logspace(-2, 2, 10):
expon = galsim.Exponential(flux=inFlux, scale_radius=1.8)
outFlux = expon.getFlux()
np.testing.assert_almost_equal(outFlux, inFlux)
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_exponential_radii():
"""Test initialization of Exponential with different types of radius specification.
"""
import time
t1 = time.time()
import math
# Test constructor using half-light-radius:
test_gal = galsim.Exponential(flux = 1., half_light_radius = test_hlr)
hlr_sum = radial_integrate(test_gal, 0., test_hlr, 1.e-4)
print 'hlr_sum = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in Exponential constructor with half-light radius")
# then test scale getter
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(test_gal.getScaleRadius(),0)) / center
print 'scale ratio = ',ratio
np.testing.assert_almost_equal(
ratio, np.exp(-1.0), decimal=4,
err_msg="Error in getScaleRadius for Exponential constructed with half light radius")
# Test constructor using scale radius:
test_gal = galsim.Exponential(flux = 1., scale_radius = test_scale[0])
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(test_scale[0],0)) / center
print 'scale ratio = ',ratio
np.testing.assert_almost_equal(
ratio, np.exp(-1.0), decimal=4,
err_msg="Error in Exponential constructor with scale")
# then test that image indeed has the correct HLR properties when radially integrated
got_hlr = test_gal.getHalfLightRadius()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum (profile initialized with scale_radius) = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in half light radius for Exponential initialized with scale_radius.")
# Check that the getters don't work after modifying the original.
test_gal_shear = test_gal.copy()
print 'hlr = ',test_gal_shear.getHalfLightRadius()
print 'scale = ',test_gal_shear.getScaleRadius()
test_gal_shear.applyShear(g1=0.3, g2=0.1)
try:
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getHalfLightRadius")
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getScaleRadius")
except ImportError:
pass
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_exponential_flux_scaling():
"""Test flux scaling for Exponential.
"""
import time
t1 = time.time()
# decimal point to go to for parameter value comparisons
param_decimal = 12
# init with scale and flux only (should be ok given last tests)
obj = galsim.Exponential(scale_radius=test_scale[0], flux=test_flux)
obj *= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __imul__.")
obj = galsim.Exponential(scale_radius=test_scale[0], flux=test_flux)
obj /= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __idiv__.")
obj = galsim.Exponential(scale_radius=test_scale[0], flux=test_flux)
obj2 = obj * 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (result).")
obj = galsim.Exponential(scale_radius=test_scale[0], flux=test_flux)
obj2 = 2. * obj
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (result).")
obj = galsim.Exponential(scale_radius=test_scale[0], flux=test_flux)
obj2 = obj / 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (result).")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_sersic():
"""Test the generation of a specific Sersic profile using SBProfile against a known result.
"""
import time
t1 = time.time()
# Test SBSersic
mySBP = galsim.SBSersic(n=3, flux=1, half_light_radius=1)
savedImg = galsim.fits.read(os.path.join(imgdir, "sersic_3_1.fits"))
myImg = galsim.ImageF(savedImg.bounds, scale=0.2)
mySBP.draw(myImg.view())
printval(myImg, savedImg)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Sersic profile disagrees with expected result")
# Repeat with the GSObject version of this:
sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1)
sersic.draw(myImg,dx=0.2, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Sersic disagrees with expected result")
# Check with default_params
sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1, gsparams=default_params)
sersic.draw(myImg,dx=0.2, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Sersic with default_params disagrees with expected result")
sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1, gsparams=galsim.GSParams())
sersic.draw(myImg,dx=0.2, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Sersic with GSParams() disagrees with expected result")
# Test photon shooting.
# Convolve with a small gaussian to smooth out the central peak.
sersic2 = galsim.Convolve(sersic, galsim.Gaussian(sigma=0.3))
do_shoot(sersic2,myImg,"Sersic")
# Test kvalues
do_kvalue(sersic,"Sersic")
# Now repeat everything using a truncation. (Above had no truncation.)
# Test Truncated SBSersic
mySBP = galsim.SBSersic(n=3, flux=1, half_light_radius=1, trunc=10)
savedImg = galsim.fits.read(os.path.join(imgdir, "sersic_3_1_10.fits"))
myImg = galsim.ImageF(savedImg.bounds, scale=0.2)
mySBP.draw(myImg.view())
printval(myImg, savedImg)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Truncated Sersic profile disagrees with expected result")
# Repeat with the GSObject version of this:
sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1, trunc=10)
sersic.draw(myImg,dx=0.2, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using truncated GSObject Sersic disagrees with expected result")
# Test photon shooting.
# Convolve with a small gaussian to smooth out the central peak.
sersic2 = galsim.Convolve(sersic, galsim.Gaussian(sigma=0.3))
do_shoot(sersic2,myImg,"Truncated Sersic")
# Test kvalues
do_kvalue(sersic, "Truncated Sersic")
# Check for normalization consistencies with kValue checks. xValues tested in test_sersic_radii.
# For half-light radius specified truncated Sersic, with flux_untruncated flag set
sersic = galsim.Sersic(n=3, flux=test_flux, half_light_radius=1, trunc=10,
flux_untruncated=True)
do_kvalue(sersic, "Truncated Sersic w/ flux_untruncated, half-light radius specified")
# For scale radius specified Sersic
sersic = galsim.Sersic(n=3, flux=test_flux, scale_radius=0.05)
do_kvalue(sersic, "Sersic w/ scale radius specified")
# For scale radius specified truncated Sersic
sersic = galsim.Sersic(n=3, flux=test_flux, scale_radius=0.05, trunc=10)
do_kvalue(sersic, "Truncated Sersic w/ scale radius specified")
# For scale radius specified truncated Sersic, with flux_untruncated flag set
sersic = galsim.Sersic(n=3, flux=test_flux, scale_radius=0.05, trunc=10, flux_untruncated=True)
do_kvalue(sersic, "Truncated Sersic w/ flux_untruncated, scale radius specified")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_sersic_radii():
"""Test initialization of Sersic with different types of radius specification.
"""
import time
t1 = time.time()
import math
for n, scale in zip(test_sersic_n, test_scale) :
# Test constructor using half-light-radius
if n == -4:
test_gal1 = galsim.DeVaucouleurs(half_light_radius=test_hlr, flux=1.)
test_gal2 = galsim.DeVaucouleurs(half_light_radius=test_hlr, trunc=8.5, flux=1.)
test_gal3 = galsim.DeVaucouleurs(half_light_radius=test_hlr, trunc=8.5, flux=1.,
flux_untruncated=True)
gal_labels = ["DeVauc", "truncated DeVauc", "flux_untruncated DeVauc"]
else:
test_gal1 = galsim.Sersic(n=n, half_light_radius=test_hlr, flux=1.)
test_gal2 = galsim.Sersic(n=n, half_light_radius=test_hlr, trunc=8.5, flux=1.)
test_gal3 = galsim.Sersic(n=n, half_light_radius=test_hlr, trunc=8.5, flux=1.,
flux_untruncated=True)
gal_labels = ["Sersic", "truncated Sersic", "flux_untruncated Sersic"]
gal_list = [test_gal1, test_gal2, test_gal3]
# Check that the returned half-light radius is correct
print 'test_hlr = ',test_hlr
print 'test_gal1 hlr, sr = ',test_gal1.getHalfLightRadius(),test_gal1.getScaleRadius()
print 'test_gal2 hlr, sr = ',test_gal2.getHalfLightRadius(),test_gal2.getScaleRadius()
print 'test_gal3 hlr, sr = ',test_gal3.getHalfLightRadius(),test_gal3.getScaleRadius()
np.testing.assert_almost_equal(
test_gal1.getHalfLightRadius(), test_hlr, decimal=5,
err_msg = "Error in returned HLR for Sersic HLR constructor, n=%.1f"%n)
np.testing.assert_almost_equal(
test_gal2.getHalfLightRadius(), test_hlr, decimal=5,
err_msg = "Error in returned HLR for truncated Sersic HLR constructor, n=%.1f"%n)
np.testing.assert_almost_equal(
test_gal3.getScaleRadius(), test_gal1.getScaleRadius(), decimal=5,
err_msg = "Error in returned SR for flux_untruncated Sersic HLR constructor, n=%.1f"%n)
# Check that the returned flux is correct
print 'test_gal1.getFlux() = ',test_gal1.getFlux()
print 'test_gal2.getFlux() = ',test_gal2.getFlux()
print 'test_gal3.getFlux() = ',test_gal3.getFlux()
np.testing.assert_almost_equal(
test_gal1.getFlux(), 1., decimal=5,
err_msg = "Error in returned Flux for Sersic HLR constructor, n=%.1f"%n)
np.testing.assert_almost_equal(
test_gal2.getFlux(), 1., decimal=5,
err_msg = "Error in returned Flux for truncated Sersic HLR constructor, n=%.1f"%n)
# test_gal3 doesn't match getFlux(), but should have central value match test_gal1.
center1 = test_gal1.xValue(galsim.PositionD(0,0))
center3 = test_gal3.xValue(galsim.PositionD(0,0))
print 'peak value 1,3 = ', center1, center3
np.testing.assert_almost_equal(
center1, center3, 9,
"Error in flux_untruncated Sersic normalization HLR constructor, n=%.1f"%n)
# (test half-light radii)
for test_gal, label in zip(gal_list, gal_labels):
print 'flux = ',test_gal.getFlux()
print 'hlr = ',test_gal.getHalfLightRadius()
print 'scale = ',test_gal.getScaleRadius()
got_hlr = test_gal.getHalfLightRadius()
got_flux = test_gal.getFlux()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5*got_flux, decimal=4,
err_msg = "Error in %s half-light radius constructor, n=%.1f"%(label,n))
# (test scale radii)
for test_gal, label in zip(gal_list, gal_labels):
got_sr = test_gal.getScaleRadius()
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(got_sr,0)) / center
print 'scale ratio = ',ratio
np.testing.assert_almost_equal(
ratio, np.exp(-1.0), decimal=4,
err_msg="Error in getScaleRadius for HLR constructed %s"%label)
# Test constructor using scale radius (test scale radius)
if n == -4:
test_gal1 = galsim.DeVaucouleurs(scale_radius=scale, flux=1.)
test_gal2 = galsim.DeVaucouleurs(scale_radius=scale, trunc=8.5, flux=1.)
test_gal3 = galsim.DeVaucouleurs(scale_radius=scale, trunc=8.5, flux=1.,
flux_untruncated=True)
else:
test_gal1 = galsim.Sersic(n=n, scale_radius=scale, flux=1.)
test_gal2 = galsim.Sersic(n=n, scale_radius=scale, trunc=8.5, flux=1.)
test_gal3 = galsim.Sersic(n=n, scale_radius=scale, trunc=8.5, flux=1.,
flux_untruncated=True)
gal_list = [test_gal1, test_gal2, test_gal3]
# Check that the returned scale radius is correct
print 'test_scale = ',scale
print 'test_gal1 hlr, sr = ',test_gal1.getHalfLightRadius(),test_gal1.getScaleRadius()
print 'test_gal2 hlr, sr = ',test_gal2.getHalfLightRadius(),test_gal2.getScaleRadius()
print 'test_gal3 hlr, sr = ',test_gal3.getHalfLightRadius(),test_gal3.getScaleRadius()
np.testing.assert_almost_equal(
test_gal1.getScaleRadius(), scale, decimal=5,
err_msg = "Error in returned SR for Sersic SR constructor, n=%.1f"%n)
np.testing.assert_almost_equal(
test_gal2.getScaleRadius(), scale, decimal=5,
err_msg = "Error in returned SR for truncated Sersic SR constructor, n=%.1f"%n)
np.testing.assert_almost_equal(
test_gal3.getScaleRadius(), scale, decimal=5,
err_msg = "Error in returned SR for truncated Sersic SR constructor, n=%.1f"%n)
# Returned HLR should match for gals 2,3
got_hlr2 = test_gal2.getHalfLightRadius()
got_hlr3 = test_gal3.getHalfLightRadius()
print 'half light radii of truncated, scale_radius constructed Sersic =',got_hlr2,got_hlr3
np.testing.assert_almost_equal(
got_hlr2, got_hlr3, decimal=4,
err_msg="Error in HLR for scale_radius constructed flux_untruncated Sersic (II).")
# Check that the returned flux is correct
print 'test_gal1.getFlux() = ',test_gal1.getFlux()
print 'test_gal2.getFlux() = ',test_gal2.getFlux()
print 'test_gal3.getFlux() = ',test_gal3.getFlux()
np.testing.assert_almost_equal(
test_gal1.getFlux(), 1., decimal=5,
err_msg = "Error in returned Flux for Sersic HLR constructor, n=%.1f"%n)
np.testing.assert_almost_equal(
test_gal2.getFlux(), 1., decimal=5,
err_msg = "Error in returned Flux for truncated Sersic HLR constructor, n=%.1f"%n)
center1 = test_gal1.xValue(galsim.PositionD(0,0))
center3 = test_gal3.xValue(galsim.PositionD(0,0))
print 'peak value 1,3 = ', center1, center3
np.testing.assert_almost_equal(
center1, center3, 9,
"Error in flux_untruncated Sersic normalization HLR constructor, n=%.1f"%n)
# (test scale radii)
for test_gal, label in zip(gal_list, gal_labels):
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(scale,0)) / center
print 'scale ratio = ',ratio
np.testing.assert_almost_equal(
ratio, np.exp(-1.0), decimal=4,
err_msg="Error in %s scale radius constructor, n=%.1f"%(label,n))
# (test half-light radius)
for test_gal, label in zip(gal_list, gal_labels):
got_hlr = test_gal.getHalfLightRadius()
got_flux = test_gal.getFlux()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5*got_flux, decimal=4,
err_msg="Error in HLR for scale_radius constructed %s"%label)
# Check that the getters don't work after modifying the original.
test_gal_shear = test_gal1.copy()
# They still work after copy()
if n != -4:
print 'n = ',test_gal_shear.getN()
print 'hlr = ',test_gal_shear.getHalfLightRadius()
print 'sr = ',test_gal_shear.getScaleRadius()
test_gal_shear.applyShear(g1=0.3, g2=0.1)
# But not after applyShear() (or others, but this is a sufficient test here)
try:
if n != -4:
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getN")
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getHalfLightRadius")
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getScaleRadius")
except ImportError:
pass
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_sersic_flux_scaling():
"""Test flux scaling for Sersic.
"""
import time
t1 = time.time()
# decimal point to go to for parameter value comparisons
param_decimal = 12
# loop through sersic n
for test_n in test_sersic_n:
# loop through sersic truncation
for test_trunc in test_sersic_trunc:
# init with hlr and flux only (should be ok given last tests)
# n=-4 is code to use explicit DeVaucouleurs rather than Sersic(n=4).
# It should be identical.
if test_n == -4:
init_obj = galsim.DeVaucouleurs(half_light_radius=test_hlr, flux=test_flux,
trunc=test_trunc)
else:
init_obj = galsim.Sersic(test_n, half_light_radius=test_hlr, flux=test_flux,
trunc=test_trunc)
# Test in place *= and /=
obj = init_obj.copy()
obj *= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __imul__.")
np.testing.assert_almost_equal(
init_obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="obj.copy() didn't produce a separate copy.")
obj = init_obj.copy()
obj /= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __idiv__.")
obj2 = init_obj * 2.
np.testing.assert_almost_equal(
init_obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (original).")
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (result).")
obj2 = 2. * init_obj
np.testing.assert_almost_equal(
init_obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (original).")
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (result).")
obj2 = init_obj / 2.
np.testing.assert_almost_equal(
init_obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (original).")
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (result).")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_sersic_05():
"""Test the equivalence of Sersic with n=0.5 and Gaussian
"""
# hlr/sigma = sqrt(2 ln(2)) = 1.177410022515475
hlr_sigma = 1.177410022515475
# cf test_gaussian()
savedImg = galsim.fits.read(os.path.join(imgdir, "gauss_1.fits"))
savedImg.setCenter(0,0)
myImg = galsim.ImageF(savedImg.bounds, scale=0.2)
sersic = galsim.Sersic(n=0.5, flux=1, half_light_radius=1 * hlr_sigma)
myImg = sersic.draw(myImg, normalization="surface brightness", use_true_center=False)
print 'saved image center = ',savedImg(0,0)
print 'image center = ',myImg(0,0)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using Sersic with n=0.5 disagrees with expected result for Gaussian")
do_kvalue(sersic,"n=0.5 Sersic")
# cf test_gaussian_properties()
sersic = galsim.Sersic(n=0.5, flux=test_flux, half_light_radius=test_sigma * hlr_sigma)
cen = galsim.PositionD(0, 0)
np.testing.assert_equal(sersic.centroid(), cen)
np.testing.assert_equal(sersic.kValue(cen), (1+0j) * test_flux)
import math
np.testing.assert_almost_equal(sersic.xValue(cen), 1./(2.*math.pi) * test_flux / test_sigma**2,
decimal=5)
# Also test some random values other than the center:
gauss = galsim.Gaussian(flux=test_flux, sigma=test_sigma)
for (x,y) in [ (0.1,0.2), (-0.5, 0.4), (0, 0.9), (1.2, 0.1), (2,2) ]:
pos = galsim.PositionD(x,y)
np.testing.assert_almost_equal(sersic.xValue(pos), gauss.xValue(pos), decimal=5)
np.testing.assert_almost_equal(sersic.kValue(pos), gauss.kValue(pos), decimal=5)
def test_sersic_1():
"""Test the equivalence of Sersic with n=1 and Exponential
"""
# cf test_exponential()
re = 1.0
r0 = re/1.67839
# The real value of re/r0 = 1.6783469900166605
hlr_r0 = 1.6783469900166605
savedImg = galsim.fits.read(os.path.join(imgdir, "exp_1.fits"))
myImg = galsim.ImageF(savedImg.bounds, scale=0.2)
sersic = galsim.Sersic(n=1, flux=1., half_light_radius=r0 * hlr_r0)
sersic.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using Sersic n=1 disagrees with expected result for Exponential")
do_kvalue(sersic,"n=1 Sersic")
# cf test_exponential_properties()
sersic = galsim.Sersic(n=1, flux=test_flux, half_light_radius=test_scale[0] * hlr_r0)
cen = galsim.PositionD(0, 0)
np.testing.assert_equal(sersic.centroid(), cen)
np.testing.assert_equal(sersic.kValue(cen), (1+0j) * test_flux)
import math
np.testing.assert_almost_equal(sersic.xValue(cen), 1./(2.*math.pi)*test_flux/test_scale[0]**2,
decimal=5)
# Also test some random values other than the center:
expon = galsim.Exponential(flux=test_flux, scale_radius=test_scale[0])
for (x,y) in [ (0.1,0.2), (-0.5, 0.4), (0, 0.9), (1.2, 0.1), (2,2) ]:
pos = galsim.PositionD(x,y)
np.testing.assert_almost_equal(sersic.xValue(pos), expon.xValue(pos), decimal=5)
np.testing.assert_almost_equal(sersic.kValue(pos), expon.kValue(pos), decimal=5)
def test_airy():
"""Test the generation of a specific Airy profile using SBProfile against a known result.
"""
import time
t1 = time.time()
mySBP = galsim.SBAiry(lam_over_diam=1./0.8, obscuration=0.1, flux=1)
savedImg = galsim.fits.read(os.path.join(imgdir, "airy_.8_.1.fits"))
myImg = galsim.ImageF(savedImg.bounds, scale=0.2)
mySBP.draw(myImg.view())
printval(myImg, savedImg)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Airy profile disagrees with expected result")
# Repeat with the GSObject version of this:
airy = galsim.Airy(lam_over_diam=1./0.8, obscuration=0.1, flux=1)
airy.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Airy disagrees with expected result")
# Check with default_params
airy = galsim.Airy(lam_over_diam=1./0.8, obscuration=0.1, flux=1, gsparams=default_params)
airy.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Airy with default_params disagrees with expected result")
airy = galsim.Airy(lam_over_diam=1./0.8, obscuration=0.1, flux=1, gsparams=galsim.GSParams())
airy.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Airy with GSParams() disagrees with expected result")
# Test photon shooting.
airy = galsim.Airy(lam_over_diam=1./0.8, obscuration=0.0, flux=test_flux)
do_shoot(airy,myImg,"Airy obscuration=0.0")
airy2 = galsim.Airy(lam_over_diam=1./0.8, obscuration=0.1, flux=test_flux)
do_shoot(airy2,myImg,"Airy obscuration=0.1")
# Test kvalues
do_kvalue(airy, "Airy obscuration=0.0")
do_kvalue(airy2, "Airy obscuration=0.1")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_airy_radii():
"""Test Airy half light radius and FWHM correctly set and match image.
"""
import time
t1 = time.time()
import math
# Test constructor using lam_over_diam: (only option for Airy)
test_gal = galsim.Airy(lam_over_diam= 1./0.8, flux=1.)
# test half-light-radius getter
got_hlr = test_gal.getHalfLightRadius()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in Airy half-light radius")
# test FWHM getter
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(.5 * test_gal.getFWHM(),0)) / center
print 'fwhm ratio = ',ratio
np.testing.assert_almost_equal(
ratio, 0.5, decimal=4,
err_msg="Error in getFWHM() for Airy.")
# Check that the getters don't work after modifying the original.
test_gal_shear = test_gal.copy()
print 'fwhm = ',test_gal_shear.getFWHM()
print 'hlr = ',test_gal_shear.getHalfLightRadius()
print 'lod = ',test_gal_shear.getLamOverD()
test_gal_shear.applyShear(g1=0.3, g2=0.1)
try:
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getFWHM");
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getHalfLightRadius")
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getLamOverD")
except ImportError:
pass
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_airy_flux_scaling():
"""Test flux scaling for Airy.
"""
import time
t1 = time.time()
# decimal point to go to for parameter value comparisons
param_decimal = 12
test_loD = 1.9
test_obscuration = 0.32
# init with lam_over_r0 and flux only (should be ok given last tests)
obj = galsim.Airy(lam_over_diam=test_loD, flux=test_flux, obscuration=test_obscuration)
obj *= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __imul__.")
obj = galsim.Airy(lam_over_diam=test_loD, flux=test_flux, obscuration=test_obscuration)
obj /= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __idiv__.")
obj = galsim.Airy(lam_over_diam=test_loD, flux=test_flux, obscuration=test_obscuration)
obj2 = obj * 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (result).")
obj = galsim.Airy(lam_over_diam=test_loD, flux=test_flux, obscuration=test_obscuration)
obj2 = 2. * obj
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (result).")
obj = galsim.Airy(lam_over_diam=test_loD, flux=test_flux, obscuration=test_obscuration)
obj2 = obj / 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (result).")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_box():
"""Test the generation of a specific box profile using SBProfile against a known result.
"""
import time
t1 = time.time()
mySBP = galsim.SBBox(xw=1, yw=1, flux=1)
savedImg = galsim.fits.read(os.path.join(imgdir, "box_1.fits"))
myImg = galsim.ImageF(savedImg.bounds, scale=0.2)
mySBP.draw(myImg.view())
printval(myImg, savedImg)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Box profile disagrees with expected result")
# Repeat with the GSObject version of this:
pixel = galsim.Pixel(xw=1, yw=1, flux=1)
pixel.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Pixel disagrees with expected result")
# Check with default_params
pixel = galsim.Pixel(xw=1, yw=1, flux=1, gsparams=default_params)
pixel.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Pixel with default_params disagrees with expected result")
pixel = galsim.Pixel(xw=1, yw=1, flux=1, gsparams=galsim.GSParams())
pixel.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Pixel with GSParams() disagrees with expected result")
# Test photon shooting.
do_shoot(pixel,myImg,"Pixel")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_moffat():
"""Test the generation of a specific Moffat profile using SBProfile against a known result.
"""
import time
t1 = time.time()
# Code was formerly:
# mySBP = galsim.SBMoffat(beta=2, truncationFWHM=5, flux=1, half_light_radius=1)
#
# ...but this is no longer quite so simple since we changed the handling of trunc to be in
# physical units. However, the same profile can be constructed using
# fwhm=1.3178976627539716
# as calculated by interval bisection in devutils/external/calculate_moffat_radii.py
fwhm_backwards_compatible = 1.3178976627539716
mySBP = galsim.SBMoffat(beta=2, half_light_radius=1, trunc=5*fwhm_backwards_compatible, flux=1)
savedImg = galsim.fits.read(os.path.join(imgdir, "moffat_2_5.fits"))
myImg = galsim.ImageF(savedImg.bounds, scale=0.2)
mySBP.draw(myImg.view())
printval(myImg, savedImg)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Moffat profile disagrees with expected result")
# Repeat with the GSObject version of this:
moffat = galsim.Moffat(beta=2, half_light_radius=1, trunc=5*fwhm_backwards_compatible, flux=1)
moffat.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Moffat disagrees with expected result")
# Check with default_params
moffat = galsim.Moffat(beta=2, half_light_radius=1, trunc=5*fwhm_backwards_compatible, flux=1,
gsparams=default_params)
moffat.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Moffat with default_params disagrees with expected result")
moffat = galsim.Moffat(beta=2, half_light_radius=1, trunc=5*fwhm_backwards_compatible, flux=1,
gsparams=galsim.GSParams())
moffat.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Moffat with GSParams() disagrees with expected result")
# Test photon shooting.
do_shoot(moffat,myImg,"Moffat")
# Test kvalues
do_kvalue(moffat, "Moffat")
# The code for untruncated Moffat profiles is specialized for particular beta values, so
# test each of these:
for beta in [ 1.5, 2, 2.5, 3, 3.5, 4, 2.3 ]: # The one last is for the generic case.
moffat = galsim.Moffat(beta=beta, half_light_radius=0.7, flux=test_flux)
do_kvalue(moffat,"Untruncated Moffat with beta=%f"%beta)
# Don't bother repeating the do_shoot tests, since they are rather slow, and the code
# isn't different for the different beta values.
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_moffat_properties():
"""Test some basic properties of the SBMoffat profile.
"""
import time
t1 = time.time()
# Code was formerly:
# mySBP = galsim.Moffat(beta=2.0, truncationFWHM=2, flux=test_flux, half_light_radius=1)
#
# ...but this is no longer quite so simple since we changed the handling of trunc to be in
# physical units. However, the same profile can be constructed using
# fwhm=1.4686232496771867,
# as calculated by interval bisection in devutils/external/calculate_moffat_radii.py
fwhm_backwards_compatible = 1.4686232496771867
psf = galsim.Moffat(beta=2.0, fwhm=fwhm_backwards_compatible,
trunc=2*fwhm_backwards_compatible, flux=test_flux)
# Check that we are centered on (0, 0)
cen = galsim.PositionD(0, 0)
np.testing.assert_equal(psf.centroid(), cen)
# Check Fourier properties
np.testing.assert_almost_equal(psf.maxK(), 11.613036117918105)
np.testing.assert_almost_equal(psf.stepK(), 0.62831853071795873)
np.testing.assert_almost_equal(psf.kValue(cen), test_flux+0j)
np.testing.assert_almost_equal(psf.getHalfLightRadius(), 1.0)
np.testing.assert_almost_equal(psf.getFWHM(), fwhm_backwards_compatible)
np.testing.assert_almost_equal(psf.xValue(cen), 0.50654651638242509)
# Now create the same profile using the half_light_radius:
psf = galsim.Moffat(beta=2.0, half_light_radius=1.,
trunc=2*fwhm_backwards_compatible, flux=test_flux)
np.testing.assert_equal(psf.centroid(), cen)
np.testing.assert_almost_equal(psf.maxK(), 11.613036112206663)
np.testing.assert_almost_equal(psf.stepK(), 0.62831853071795862)
np.testing.assert_almost_equal(psf.kValue(cen), test_flux+0j)
np.testing.assert_almost_equal(psf.getHalfLightRadius(), 1.0)
np.testing.assert_almost_equal(psf.getFWHM(), fwhm_backwards_compatible)
np.testing.assert_almost_equal(psf.xValue(cen), 0.50654651638242509)
# Check input flux vs output flux
for inFlux in np.logspace(-2, 2, 10):
psfFlux = galsim.Moffat(2.0, fwhm=fwhm_backwards_compatible,
trunc=2*fwhm_backwards_compatible, flux=inFlux)
outFlux = psfFlux.getFlux()
np.testing.assert_almost_equal(outFlux, inFlux)
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_moffat_radii():
"""Test initialization of Moffat with different types of radius specification.
"""
import time
t1 = time.time()
import math
test_beta = 2.
# Test constructor using half-light-radius:
test_gal = galsim.Moffat(flux = 1., beta=test_beta, half_light_radius = test_hlr)
hlr_sum = radial_integrate(test_gal, 0., test_hlr, 1.e-4)
print 'hlr_sum = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in Moffat constructor with half-light radius")
# test that getFWHM() method provides correct FWHM
got_fwhm = test_gal.getFWHM()
test_fwhm_ratio = (test_gal.xValue(galsim.PositionD(.5 * got_fwhm, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'fwhm ratio = ', test_fwhm_ratio
np.testing.assert_almost_equal(
test_fwhm_ratio, 0.5, decimal=4,
err_msg="Error in FWHM for Moffat initialized with half-light radius")
# test that getScaleRadius() method provides correct scale
got_scale = test_gal.getScaleRadius()
test_scale_ratio = (test_gal.xValue(galsim.PositionD(got_scale, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'scale ratio = ', test_scale_ratio
np.testing.assert_almost_equal(
test_scale_ratio, 2.**(-test_beta), decimal=4,
err_msg="Error in scale radius for Moffat initialized with half-light radius")
# Test constructor using scale radius:
test_gal = galsim.Moffat(flux = 1., beta=test_beta, scale_radius = test_scale[0])
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(test_scale[0],0)) / center
print 'scale ratio = ',ratio
np.testing.assert_almost_equal(
ratio, pow(2,-test_beta), decimal=4,
err_msg="Error in Moffat constructor with scale")
# then test that image indeed has the matching properties when radially integrated
got_hlr = test_gal.getHalfLightRadius()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum (profile initialized with scale_radius) = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in half light radius for Moffat initialized with scale radius.")
# test that getFWHM() method provides correct FWHM
got_fwhm = test_gal.getFWHM()
test_fwhm_ratio = (test_gal.xValue(galsim.PositionD(.5 * got_fwhm, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'fwhm ratio = ', test_fwhm_ratio
np.testing.assert_almost_equal(
test_fwhm_ratio, 0.5, decimal=4,
err_msg="Error in FWHM for Moffat initialized with scale radius")
# Test constructor using FWHM:
test_gal = galsim.Moffat(flux = 1., beta=test_beta, fwhm = test_fwhm)
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(test_fwhm/2.,0)) / center
print 'fwhm ratio = ',ratio
np.testing.assert_almost_equal(
ratio, 0.5, decimal=4,
err_msg="Error in Moffat constructor with fwhm")
# then test that image indeed has the matching properties when radially integrated
got_hlr = test_gal.getHalfLightRadius()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum (profile initialized with FWHM) = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in half light radius for Moffat initialized with FWHM.")
# test that getScaleRadius() method provides correct scale
got_scale = test_gal.getScaleRadius()
test_scale_ratio = (test_gal.xValue(galsim.PositionD(got_scale, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'scale ratio = ', test_scale_ratio
np.testing.assert_almost_equal(
test_scale_ratio, 2.**(-test_beta), decimal=4,
err_msg="Error in scale radius for Moffat initialized with scale radius")
# Now repeat everything using a severe truncation. (Above had no truncation.)
# Test constructor using half-light-radius:
test_gal = galsim.Moffat(flux = 1., beta=test_beta, half_light_radius = test_hlr,
trunc=2*test_hlr)
hlr_sum = radial_integrate(test_gal, 0., test_hlr, 1.e-4)
print 'hlr_sum = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in Moffat constructor with half-light radius")
# test that getFWHM() method provides correct FWHM
got_fwhm = test_gal.getFWHM()
test_fwhm_ratio = (test_gal.xValue(galsim.PositionD(.5 * got_fwhm, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'fwhm ratio = ', test_fwhm_ratio
np.testing.assert_almost_equal(
test_fwhm_ratio, 0.5, decimal=4,
err_msg="Error in FWHM for Moffat initialized with half-light radius")
# test that getScaleRadius() method provides correct scale
got_scale = test_gal.getScaleRadius()
test_scale_ratio = (test_gal.xValue(galsim.PositionD(got_scale, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'scale ratio = ', test_scale_ratio
np.testing.assert_almost_equal(
test_scale_ratio, 2.**(-test_beta), decimal=4,
err_msg="Error in scale radius for Moffat initialized with half-light radius")
# Test constructor using scale radius:
test_gal = galsim.Moffat(flux=1., beta=test_beta, trunc=2*test_scale[0],
scale_radius=test_scale[0])
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(test_scale[0],0)) / center
print 'scale ratio = ', ratio
np.testing.assert_almost_equal(
ratio, pow(2,-test_beta), decimal=4,
err_msg="Error in Moffat constructor with scale")
# then test that image indeed has the matching properties when radially integrated
got_hlr = test_gal.getHalfLightRadius()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum (truncated profile initialized with scale_radius) = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in half light radius for truncated Moffat "+
"initialized with scale radius.")
# test that getFWHM() method provides correct FWHM
got_fwhm = test_gal.getFWHM()
test_fwhm_ratio = (test_gal.xValue(galsim.PositionD(.5 * got_fwhm, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'fwhm ratio = ', test_fwhm_ratio
np.testing.assert_almost_equal(
test_fwhm_ratio, 0.5, decimal=4,
err_msg="Error in FWHM for truncated Moffat initialized with scale radius")
# Test constructor using FWHM:
test_gal = galsim.Moffat(flux=1, beta=test_beta, trunc=2.*test_fwhm,
fwhm = test_fwhm)
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(test_fwhm/2.,0)) / center
print 'fwhm ratio = ', ratio
np.testing.assert_almost_equal(
ratio, 0.5, decimal=4,
err_msg="Error in Moffat constructor with fwhm")
# then test that image indeed has the matching properties when radially integrated
got_hlr = test_gal.getHalfLightRadius()
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum (truncated profile initialized with FWHM) = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=4,
err_msg="Error in half light radius for truncated Moffat initialized with FWHM.")
# test that getScaleRadius() method provides correct scale
got_scale = test_gal.getScaleRadius()
test_scale_ratio = (test_gal.xValue(galsim.PositionD(got_scale, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'scale ratio = ', test_scale_ratio
np.testing.assert_almost_equal(
test_scale_ratio, 2.**(-test_beta), decimal=4,
err_msg="Error in scale radius for truncated Moffat initialized with scale radius")
# Check that the getters don't work after modifying the original.
test_gal_shear = test_gal.copy()
print 'beta = ',test_gal_shear.getBeta()
print 'fwhm = ',test_gal_shear.getFWHM()
print 'hlr = ',test_gal_shear.getHalfLightRadius()
print 'scale = ',test_gal_shear.getScaleRadius()
test_gal_shear.applyShear(g1=0.3, g2=0.1)
try:
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getBeta");
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getFWHM");
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getHalfLightRadius")
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getScaleRadius");
except ImportError:
pass
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_moffat_flux_scaling():
"""Test flux scaling for Moffat.
"""
import time
t1 = time.time()
# decimal point to go to for parameter value comparisons
param_decimal = 12
for test_beta in [ 1.5, 2., 2.5, 3., 3.8 ]:
for test_trunc in [ 0., 8.5 ]:
# init with scale_radius only (should be ok given last tests)
obj = galsim.Moffat(scale_radius=test_scale[0], beta=test_beta, trunc=test_trunc,
flux=test_flux)
obj *= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __imul__.")
obj = galsim.Moffat(scale_radius=test_scale[0], beta=test_beta, trunc=test_trunc,
flux=test_flux)
obj /= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __idiv__.")
obj = galsim.Moffat(scale_radius=test_scale[0], beta=test_beta, trunc=test_trunc,
flux=test_flux)
obj2 = obj * 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (result).")
obj = galsim.Moffat(scale_radius=test_scale[0], beta=test_beta, trunc=test_trunc,
flux=test_flux)
obj2 = 2. * obj
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (result).")
obj = galsim.Moffat(scale_radius=test_scale[0], beta=test_beta, trunc=test_trunc,
flux=test_flux)
obj2 = obj / 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (result).")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_kolmogorov():
"""Test the generation of a specific Kolmogorov profile using SBProfile against a known result.
"""
import time
t1 = time.time()
mySBP = galsim.SBKolmogorov(lam_over_r0=1.5, flux=test_flux)
# This savedImg was created from the SBKolmogorov implementation in
# commit c8efd74d1930157b1b1ffc0bfcfb5e1bf6fe3201
# It would be nice to get an independent calculation here...
#savedImg = galsim.ImageF(128,128)
#mySBP.draw(image=savedImg, dx=0.2)
#savedImg.write(os.path.join(imgdir, "kolmogorov.fits"))
savedImg = galsim.fits.read(os.path.join(imgdir, "kolmogorov.fits"))
myImg = galsim.ImageF(savedImg.bounds, scale=0.2)
mySBP.draw(myImg.view())
printval(myImg, savedImg)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Kolmogorov profile disagrees with expected result")
# Repeat with the GSObject version of this:
kolm = galsim.Kolmogorov(lam_over_r0=1.5, flux=test_flux)
kolm.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Kolmogorov disagrees with expected result")
# Check with default_params
kolm = galsim.Kolmogorov(lam_over_r0=1.5, flux=test_flux, gsparams=default_params)
kolm.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Kolmogorov with default_params disagrees with expected result")
kolm = galsim.Kolmogorov(lam_over_r0=1.5, flux=test_flux, gsparams=galsim.GSParams())
kolm.draw(myImg, normalization="surface brightness", use_true_center=False)
np.testing.assert_array_almost_equal(
myImg.array, savedImg.array, 5,
err_msg="Using GSObject Kolmogorov with GSParams() disagrees with expected result")
# Test photon shooting.
do_shoot(kolm,myImg,"Kolmogorov")
# Test kvalues
do_kvalue(kolm, "Kolmogorov")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_kolmogorov_properties():
"""Test some basic properties of the Kolmogorov profile.
"""
import time
t1 = time.time()
lor = 1.5
psf = galsim.Kolmogorov(lam_over_r0=lor, flux=test_flux)
# Check that we are centered on (0, 0)
cen = galsim.PositionD(0, 0)
np.testing.assert_equal(psf.centroid(), cen)
# Check Fourier properties
np.testing.assert_almost_equal(psf.maxK(), 8.6440505245909858, 9)
np.testing.assert_almost_equal(psf.stepK(), 0.36982048503361376, 9)
np.testing.assert_almost_equal(psf.kValue(cen), test_flux+0j)
np.testing.assert_almost_equal(psf.getLamOverR0(), lor)
np.testing.assert_almost_equal(psf.getHalfLightRadius(), lor * 0.554811)
np.testing.assert_almost_equal(psf.getFWHM(), lor * 0.975865)
np.testing.assert_almost_equal(psf.xValue(cen), 0.6283160485127478)
# Check input flux vs output flux
lors = [1, 0.5, 2, 5]
for lor in lors:
psf = galsim.Kolmogorov(lam_over_r0=lor, flux=test_flux)
out_flux = psf.getFlux()
np.testing.assert_almost_equal(out_flux, test_flux,
err_msg="Flux of Kolmogorov (getFlux) is incorrect.")
# Also check the realized flux in a drawn image
dx = lor / 10.
img = galsim.ImageF(256,256, scale=dx)
pix = galsim.Pixel(dx)
conv = galsim.Convolve([psf,pix])
conv.draw(image=img)
out_flux = img.array.sum()
np.testing.assert_almost_equal(out_flux, test_flux, 3,
err_msg="Flux of Kolmogorov (image array) is incorrect.")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_kolmogorov_radii():
"""Test initialization of Kolmogorov with different types of radius specification.
"""
import time
t1 = time.time()
import math
# Test constructor using lambda/r0
lors = [1, 0.5, 2, 5]
for lor in lors:
print 'lor = ',lor
test_gal = galsim.Kolmogorov(flux=1., lam_over_r0=lor)
np.testing.assert_almost_equal(
lor, test_gal.getLamOverR0(), decimal=9,
err_msg="Error in Kolmogorov, lor != getLamOverR0")
# test that getFWHM() method provides correct FWHM
got_fwhm = test_gal.getFWHM()
print 'got_fwhm = ',got_fwhm
test_fwhm_ratio = (test_gal.xValue(galsim.PositionD(.5 * got_fwhm, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'fwhm ratio = ', test_fwhm_ratio
np.testing.assert_almost_equal(
test_fwhm_ratio, 0.5, decimal=4,
err_msg="Error in FWHM for Kolmogorov initialized with half-light radius")
# then test that image indeed has the correct HLR properties when radially integrated
got_hlr = test_gal.getHalfLightRadius()
print 'got_hlr = ',got_hlr
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=3,
err_msg="Error in half light radius for Kolmogorov initialized with lam_over_r0.")
# Test constructor using half-light-radius:
test_gal = galsim.Kolmogorov(flux=1., half_light_radius = test_hlr)
hlr_sum = radial_integrate(test_gal, 0., test_hlr, 1.e-4)
print 'hlr_sum = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=3,
err_msg="Error in Kolmogorov constructor with half-light radius")
# test that getFWHM() method provides correct FWHM
got_fwhm = test_gal.getFWHM()
print 'got_fwhm = ',got_fwhm
test_fwhm_ratio = (test_gal.xValue(galsim.PositionD(.5 * got_fwhm, 0.)) /
test_gal.xValue(galsim.PositionD(0., 0.)))
print 'fwhm ratio = ', test_fwhm_ratio
np.testing.assert_almost_equal(
test_fwhm_ratio, 0.5, decimal=4,
err_msg="Error in FWHM for Kolmogorov initialized with half-light radius")
# Test constructor using FWHM:
test_gal = galsim.Kolmogorov(flux=1., fwhm = test_fwhm)
center = test_gal.xValue(galsim.PositionD(0,0))
ratio = test_gal.xValue(galsim.PositionD(test_fwhm/2.,0)) / center
print 'fwhm ratio = ',ratio
np.testing.assert_almost_equal(
ratio, 0.5, decimal=4,
err_msg="Error in Kolmogorov constructor with fwhm")
# then test that image indeed has the correct HLR properties when radially integrated
got_hlr = test_gal.getHalfLightRadius()
print 'got_hlr = ',got_hlr
hlr_sum = radial_integrate(test_gal, 0., got_hlr, 1.e-4)
print 'hlr_sum (profile initialized with fwhm) = ',hlr_sum
np.testing.assert_almost_equal(
hlr_sum, 0.5, decimal=3,
err_msg="Error in half light radius for Gaussian initialized with FWHM.")
# Check that the getters don't work after modifying the original.
test_gal_shear = test_gal.copy()
print 'fwhm = ',test_gal_shear.getFWHM()
print 'hlr = ',test_gal_shear.getHalfLightRadius()
print 'lor = ',test_gal_shear.getLamOverR0()
test_gal_shear.applyShear(g1=0.3, g2=0.1)
try:
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getFWHM");
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getHalfLightRadius");
np.testing.assert_raises(AttributeError, getattr, test_gal_shear, "getLamOverR0");
except ImportError:
pass
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_kolmogorov_flux_scaling():
"""Test flux scaling for Kolmogorov.
"""
import time
t1 = time.time()
# decimal point to go to for parameter value comparisons
param_decimal = 12
test_lor0 = 1.9
# init with lam_over_r0 and flux only (should be ok given last tests)
obj = galsim.Kolmogorov(lam_over_r0=test_lor0, flux=test_flux)
obj *= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __imul__.")
obj = galsim.Kolmogorov(lam_over_r0=test_lor0, flux=test_flux)
obj /= 2.
np.testing.assert_almost_equal(
obj.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __idiv__.")
obj = galsim.Kolmogorov(lam_over_r0=test_lor0, flux=test_flux)
obj2 = obj * 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __rmul__ (result).")
obj = galsim.Kolmogorov(lam_over_r0=test_lor0, flux=test_flux)
obj2 = 2. * obj
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux * 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __mul__ (result).")
obj = galsim.Kolmogorov(lam_over_r0=test_lor0, flux=test_flux)
obj2 = obj / 2.
# First test that original obj is unharmed... (also tests that .copy() is working)
np.testing.assert_almost_equal(
obj.getFlux(), test_flux, decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (original).")
# Then test new obj2 flux
np.testing.assert_almost_equal(
obj2.getFlux(), test_flux / 2., decimal=param_decimal,
err_msg="Flux param inconsistent after __div__ (result).")
t2 = time.time()
print 'time for %s = %.2f'%(funcname(),t2-t1)
if __name__ == "__main__":
test_gaussian()
test_gaussian_properties()
test_gaussian_radii()
test_gaussian_flux_scaling()
test_exponential()
test_exponential_properties()
test_exponential_radii()
test_exponential_flux_scaling()
test_sersic()
test_sersic_radii()
test_sersic_flux_scaling()
test_sersic_05()
test_sersic_1()
test_airy()
test_airy_radii()
test_airy_flux_scaling()
test_box()
test_moffat()
test_moffat_properties()
test_moffat_radii()
test_moffat_flux_scaling()
test_kolmogorov()
test_kolmogorov_properties()
test_kolmogorov_radii()
test_kolmogorov_flux_scaling()
| mardom/GalSim | tests/test_base.py | Python | gpl-3.0 | 79,805 | [
"Galaxy",
"Gaussian"
] | c25ac072e1d45c1f043a62f1ffd7d0ca5195590f2823be8c32628adbaa48eaf2 |
###############################################################################
# Copyright 2016 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "EC-CAS diags" package.
#
# "EC-CAS diags" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "EC-CAS diags" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "EC-CAS diags". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
# Functions / constants common to multiple modules
# Month names
long_monthnames = [
(1,'January'),
(2,'February'),
(3,'March'),
(4,'April'),
(5,'May'),
(6,'June'),
(7,'July'),
(8,'August'),
(9,'September'),
(10,'October'),
(11,'November'),
(12,'December'),
]
short_monthnames = [(n,month[:3]) for n,month in long_monthnames]
del n,month
# Unit conversion
from units import define_conversion, define_unit, conversion_factor
# Define the molar masses
define_conversion ('mol(CO2)', '44.01 g(CO2)')
define_conversion ('mol(CH4)', '16.04 g(CH4)')
define_conversion ('mol(dry_air)', '28.97 g(dry_air)')
define_conversion ('mol(H2O)', '18.01528 g(H2O)')
define_conversion ('mol(CO)', '28.010 g(CO)')
define_conversion ('mol(OH)', '17.00 g(OH)') # 17.01 according to wikipedia
# 17.00 according to GEM-MACH
# The following is a hack to get mass in terms of carbon atoms
# I.e. to allow converting mass to Pg(C)
define_conversion ('g(CO2)', repr(12.01/44.01) + ' g(C)')
define_conversion ('g(CH4)', repr(12.01/16.04) + ' g(C)')
define_conversion ('g(CO)', repr(12.01/28.010) + ' g(C)')
# For the purpose of these diagnostics, assume mole fractions are always with
# respect to air.
define_conversion ('molefraction', 'mol mol(dry_air)-1')
# Also, assume pressure is air pressure.
define_conversion ('Pa', 'kg(air) m-1 s-2')
# Helper method - get unit conversion context
def get_conversion_context (var, context=None):
return context or var.atts.get('specie') or var.name
# Convert a variable from one unit to another
def convert (var, units, context=None, table=None):
if 'units' not in var.atts:
raise ValueError ("Variable '%s' has no units defined, can't do unit conversion!"%var.name)
if var.atts['units'] == units: return var # No conversion necessary
name = var.name
context = get_conversion_context(var, context)
scale = conversion_factor (var.atts['units'], units, context, table=table)
var = var * scale
var.atts['units'] = units
var.name = name
# Extra parameters from the cache interface
if 'low' in var.atts: var.atts['low'] *= scale
if 'high' in var.atts: var.atts['high'] *= scale
return var
# Helper methods to determine if something is of a particular kind of unit.
def can_convert (var, units, context=None, table=None):
try:
convert (var, units, context=context, table=table)
return True
except ValueError: return False
# Helper method - make a copy of a variable.
def copy_var (var):
from copy import copy
var = copy(var)
var.atts = copy(var.atts)
return var
# Helper method - for the given field and units, determine what other fields
# are needed to do the unit conversion.
# Output: list of extra variable names, and list of exponents (+/-1) to apply
# to the variables (+1 = multiply by that variable, -1 = divide by that variable).
def _what_extra_fields (data, fieldname, units, table):
from itertools import product
from units import simplify, inverse
possible_extra_fields = []
possible_extra_units = []
for f in ['dry_air', 'cell_area', 'dp', 'gravity', 'density']:
try:
v = data.find_best(f)
possible_extra_fields.append(f)
possible_extra_units.append(v.atts['units'])
except KeyError: pass
var = data.find_best(fieldname)
errmsg = "Don't know how to convert %s %s from '%s' to '%s'. Extra fields tried: %s"%(getattr(data,'name',''), fieldname, var.atts['units'], units, possible_extra_fields)
# Apply proper context to the target units
context = get_conversion_context(var)
units = simplify(units, global_context=context, table=table)
# Try all combinations of extra fields, see what gives the expected units.
for exps in product(*[[-1,0,1]]*len(possible_extra_fields)):
test = var.atts['units']
for u,ex in zip(possible_extra_units, exps):
if ex == 0: continue
if ex == -1: u = inverse(u)
test = test + ' ' + u
# To check for a match, see if the only difference between the units is a
# scale factor
# First, reduce out all context-free units
test = simplify(test + ' ' + inverse(units), table=table)
# Then, apply the variable context to the remaining units, and see if
# anything else cancels out.
test = simplify(test, global_context=context, table=table)
# See if this reduces to a scalar number (or nothing at all).
if test == '': test = '1'
try:
float(test)
out = zip(*[(f,ex) for f,ex in zip(possible_extra_fields,exps) if ex!=0])
if len(out) > 0: return out
return [], []
except ValueError: pass
raise ValueError (errmsg)
# Helper method - find the field in the dataset, and apply some unit conversion.
# Handle some extra logic, such as going between dry and moist air.
def find_and_convert (product, fieldnames, units, **conditions):
from pygeode.dataset import Dataset
from pygeode.var import Var
from eccas_diags.interfaces import DataInterface
from units import copy_default_table, define_conversion, parse_units, simplify, inverse
# Allow a list of variables to be passed in.
if isinstance(product,list) and isinstance(product[0],Var):
product = Dataset(product)
# Allow a single Dataset to be passed in.
if isinstance(product,Dataset):
product = DataInterface([product])
return_list = True
if isinstance(fieldnames,str):
fieldnames = [fieldnames]
return_list = False
if isinstance(units,str): units = [units]*len(fieldnames)
# Create a separate unit table for each variable, to handle things like
# semi-dry air uniquely.
tables = [copy_default_table() for fieldname in fieldnames]
# Test table, with no entry for dry air.
# So we can partially reduce the units without going from moles to mass.
test_table = copy_default_table()
del test_table['mol'].conversions['dry_air']
# Convert semi-dry air based on the type of output units
for fieldname, out_units, table in zip(fieldnames, units, tables):
in_units = product.find_best(fieldname).atts.get('units','')
in_units = simplify(in_units,table=test_table)
# Allow the user to skip unit conversion by setting output units to None
if out_units is None: continue
out_units = simplify(out_units,table=test_table)
all_units = in_units.split() + out_units.split()
# If looking at molefractions, treat as dry air.
if 'mol(semidry_air)-1' in all_units and 'mol(dry_air)-1' in all_units:
define_conversion ('mol(semidry_air)', 'mol(dry_air)', table=table)
# If converting molefractions to mass, then treat as dry air for the
# purpose of getting mass, then redefine it as moist air afterwards.
elif 'mol(semidry_air)-1' in all_units and 'g(air)-1' in all_units:
define_conversion ('mol(semidry_air)', 'mol(dry_air) g(dry_air)-1 g(air)', table=table)
# If looking at mass, then treat as moist air.
elif 'g(semidry_air)-1' in all_units and 'g(air)-1' in all_units:
define_conversion ('g(semidry_air)', 'g(air)', table=table)
# If converting mass to mixing ratio, then treat as dry air.
elif 'g(semidry_air)-1' in all_units and 'mol(dry_air)-1' in all_units:
define_conversion ('g(semidry_air)', 'g(dry_air)', table=table)
# Find out what extra fields are needed for the conversions
extra_fields = []
exponents = [] # +1 = multiply, -1 = divide
for fieldname, unit, table in zip(fieldnames,units,tables):
# Allow the user to skip unit conversion by setting output units to None
if unit is None: continue
f, exp = _what_extra_fields(product, fieldname, unit, table=table)
extra_fields.extend(f)
exponents.extend(exp)
# Reduce to a unique set of extra fields
if len(extra_fields) > 0:
extra_fields, exponents = zip(*set(zip(extra_fields,exponents)))
# Get all fields (original and extra)
vars = product.find_best(list(fieldnames)+list(extra_fields), **conditions)
# Split into the two categories
vars, extra_vars = vars[:len(fieldnames)], vars[len(fieldnames):]
# Apply the extra fields
for i,fieldname in enumerate(fieldnames):
# Allow the user to skip unit conversion by setting output units to None
if units[i] is None: continue
F, exp = _what_extra_fields(product, fieldname, units[i], table=tables[i])
extra = [extra_vars[extra_fields.index(f)] for f in F]
for v, e in zip(extra,exp):
unit = vars[i].atts['units']
specie = vars[i].atts.get('specie',None)
assert e in (1,-1), "Unhandled exponent %d"%e
if e == 1:
vars[i] *= v
vars[i].atts['units'] = unit + ' ' + v.atts['units']
elif e == -1:
vars[i] /= v
vars[i].atts['units'] = unit + ' ' + inverse(v.atts['units'])
vars[i].name = fieldname
if specie is not None:
vars[i].atts['specie'] = specie
# Do any remaining unit conversions.
# Skip conversions when output unit set to None.
vars = [convert(v, unit, table=table) if unit is not None else v for v,unit,table in zip(vars,units,tables)]
if return_list:
return vars
else:
return vars[0]
grav = .980616e+1 # Taken from GEM-MACH file chm_consphychm_mod.ftn90
# Normalize the time axes to the same start date / units
def fix_timeaxis (data):
from pygeode.timeaxis import StandardTime
from pygeode.dataset import Dataset
if not hasattr(data,'time'): return data # No time axis found?
startdate = dict(year=2009, month=1, day=1)
time = data.time
time = StandardTime(units='days', startdate=startdate, **time.auxarrays)
if isinstance(data,Dataset):
data = Dataset([v.replace_axes(time=time) if v.hasaxis('time') else v for v in data], atts=data.atts)
elif data.hasaxis('time'):
data = data.replace_axes(time=time)
return data
# Calculate the number of days in a year, along a time axis.
def ndays_in_year (time):
from calendar import monthrange
from pygeode.var import Var
year = time.year
month = time.month
ndays = [monthrange(y,m)[1] for y,m in zip(year,month)]
ndays = Var(axes=[time], name='ndays', values=ndays)
return ndays
# Convert a string to an int or float, if possible. (Otherwise, keep it as a string)
def best_type (x):
try: return int(x)
except ValueError: pass
try: return float(x)
except ValueError:pass
return x
# Find overlapping time axis between two variables
def same_times (*varlist):
# Use the same start date (so relative values are comparable)
varlist = map(fix_timeaxis,varlist)
# Get a common set of time values
times = [set(var.time.values) for var in varlist]
times = reduce(set.intersection,times,times[0])
times = sorted(times)
if len(times) == 0:
raise ValueError ("No overlapping timesteps found for %s"%(",".join(v.name for v in varlist)))
return [var(l_time=times) for var in varlist]
# Grab the first available timestep of a variable, and remove the time info.
def first_timestep (var):
if var.hasaxis('time'):
var = var(i_time = 0)
var = var.squeeze('time')
# Strip out forecast info too?
if var.hasaxis('forecast'):
var = var(i_forecast = 0)
var = var.squeeze('forecast')
return var
# Convert a time axis to a list of datetime objects.
def to_datetimes(taxis):
from datetime import datetime,timedelta
ref = datetime(**taxis.startdate)
units = taxis.units
values = taxis.values
return [ref+timedelta(**{units:v}) for v in values]
# Detect regularly-spaced data, and "fill in" the gaps with NaN values.
# Note: loads ALL the data into memory, so use with caution.
def detect_gaps(var):
import numpy as np
from collections import Counter
from pygeode.var import Var, copy_meta
# If no time values, do nothing.
if len(var.time) <= 1: return var
dt, count = Counter(np.diff(var.time.values)).most_common(1)[0]
# If we have an extremely irregular time axis, then don't try to make it
# regular (e.g. for flask data, which is taken whenever they remember to
# do it?)
if count < len(var.time)/10+2: return var
start = var.time.values[0]
stop = var.time.values[-1]
n = int(round((stop-start)/dt)) + 1
full_time = np.linspace(start, stop, n)
full_values = np.empty((len(full_time),)+var.shape[1:],dtype=var.dtype)
full_values[:] = float('nan')
indices = np.asarray(np.round((var.time.values-start)/dt),dtype=int)
full_values[indices,...] = var.get()
taxis = type(var.time)(startdate=var.time.startdate, units=var.time.units, values=full_time)
outvar = Var(axes=(taxis,)+var.axes[1:], values=full_values)
copy_meta (var, outvar)
return outvar
# Adjust a lat/lon grid from -180,180 to 0,360
def rotate_grid (data):
from pygeode.axis import Lon
import numpy as np
if not data.hasaxis('lon'): return data
lon = np.array(data.getaxis('lon').values)
# Check if already rotated
if lon[1] > 0: return data
lon[lon<0] += 360.
lon = Lon(lon)
data = data.replace_axes(lon=lon)
# Re-sort the data
return data.sorted('lon')
# Adjust a lat/lon grid from 0,360 to -180,180
def unrotate_grid (data):
from pygeode.axis import Lon
import numpy as np
if not data.hasaxis('lon'): return data
lon = np.array(data.getaxis('lon').values)
# Check if already unrotated
if lon[1] < 0: return data
lon[lon>=180] -= 360.
lon = Lon(lon)
data = data.replace_axes(lon=lon)
# Re-sort the data
return data.sorted('lon')
# Make sure the latitudes are monotonically increasing
def increasing_latitudes (data):
if not data.hasaxis('lat'): return data
# Check if already increasing
lat = data.getaxis('lat')
if lat.values[1] > lat.values[0]: return data
slices = [slice(None)] * data.naxes
slices[data.whichaxis('lat')] = slice(None,None,-1)
data = data.slice[slices]
return data
# Check if we have a repeated longitude (wraps around)
def have_repeated_longitude (data):
import numpy as np
if not data.hasaxis('lon'): return False
v1 = data.lon.values[0]
v2 = data.lon.values[-1]
if np.allclose((v2-v1)%360, 0.):
return True
return False
# Remove repeated longitude from global data
def remove_repeated_longitude (data):
if have_repeated_longitude(data):
slices = [slice(None)]*data.naxes
slices[data.whichaxis('lon')] = slice(0,len(data.lon)-1)
data = data.slice[slices]
return data
# Add an extra longitude for global data
def add_repeated_longitude (data):
from pygeode.axis import Lon
import numpy as np
import warnings
if not data.hasaxis('lon'): return data
# Check if we already have a repeated longitude
if have_repeated_longitude(data): return data
# Otherwise, add it in as an extra array index
lon = np.array(data.getaxis('lon').values)
lon_indices = range(len(lon)) + [0]
slices = [slice(None)]*data.naxes
slices[data.whichaxis('lon')] = lon_indices
# Temporarily disable warning about divide by zero, triggered because we
# are repeated an axis value, which screws up the code for computing a
# default relative tolerance
#TODO: refactor this routine to avoid this trick.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero")
data = data.slice[slices]
# Construct a new longitude axis with the repeated longitude
lon = lon[lon_indices]
lon[-1] += 360.
lon = Lon(lon)
data = data.replace_axes(lon=lon)
return data
# Compute grid cell areas
# If flat is True, then use a 'flattened' surface for latitude weighting.
# E.g., use approximation cos(lat_center)*(lat_upper-lat_lower)
# The default is to use sin(lat_upper) - sin(lat_lower)
def get_area (latvar, lonvar, flat=False):
import numpy as np
from pygeode.var import Var
from math import pi
r = .637122e7 # Taken from consphy.cdk
lats = latvar.values * (pi / 180)
# Get the boundaries of the latitudes
lat_bounds = (lats[:-1] + lats[1:]) * 0.5
# Including the poles
lat_bounds = np.concatenate([[-pi/2], lat_bounds, [pi/2]])
# Get the boundaries of the longitudes.
# Assume the longitudes are equally spaced and monotonically increasing.
lons = lonvar.values * (pi / 180)
lon_bounds = np.empty([len(lons)+1], dtype=lons.dtype)
lon_bounds[1:-1] = (lons[0:-1] + lons[1:]) / 2
lon_bounds[0] = lon_bounds[1] - (lon_bounds[2] - lon_bounds[1])
lon_bounds[-1] = lon_bounds[-2] + (lon_bounds[-2] - lon_bounds[-3])
# Length in y direction
dlat = abs(np.diff(lat_bounds))
dlat = dlat.reshape([-1,1])
# Length in x direction
dlon = abs(np.diff(lon_bounds))
dlon = dlon.reshape([1,-1])
# Define some trig functions on latitude.
clat = np.cos(lats).reshape([-1,1])
dsinlat = abs(np.diff(np.sin(lat_bounds)))
dsinlat = dsinlat.reshape([-1,1])
if flat is True:
dxdy = r*r * clat * dlat * dlon
else:
dxdy = r*r * dsinlat * dlon
dxdy = Var([latvar, lonvar], values=dxdy)
dxdy.atts['units'] = 'm2'
return dxdy
# Compute an area with blending weight considered.
# For yin-yan grids, this factors in the relative contribution of each grid
# cell to the overlapping grids.
# For other grids, this is identical to cell_area.
def get_blended_area (varlist):
cell_area = None
subgrid_weight = None
for var in varlist:
if var.hasaxis('lat') and var.hasaxis('lon') and var.name == "cell_area":
return var.rename("blended_area")
elif var.name == "cell_area":
cell_area = var
elif var.name == "subgrid_weight":
subgrid_weight = var
if cell_area is not None and subgrid_weight is not None:
blended = (cell_area * subgrid_weight).rename("blended_area")
blended.atts['units'] = cell_area.atts['units']
return blended
raise TypeError ("Unable to construct blended area.")
# Helper method to compute the change in pressure within a vertical layer.
def compute_dp (zaxis, p0):
from pygeode_rpn import fstd
from pygeode.var import Var
from pygeode.ufunc import exp, log
import math
import numpy as np
p0 = convert(p0, 'Pa')
# eta coordinates?
if isinstance(zaxis,fstd.Hybrid):
raise TypeError("Not enough information to compute pressure interfaces on hybrid levels.")
# zeta coordinates?
elif isinstance(zaxis,fstd.LogHybrid):
zeta = zaxis
# Get the full set of coefficients
a_m = zeta.atts['a_m']
b_m = zeta.atts['b_m']
a_t = zeta.atts['a_t']
b_t = zeta.atts['b_t']
# Figure out if we have thermodynamic or momentum levels, and use the
# other set of levels as the interfaces
if set(zeta.A) <= set(a_m) and set(zeta.B) <= set(b_m):
#a_int = a_t
#b_int = b_t
raise ValueError ("Not computing dp on momentum levels.")
elif set(zeta.A) <= set(a_t) and set(zeta.B) <= set(b_t):
a_int = a_m
b_int = b_m
else:
raise ValueError ("Vertical axis must be entirely on model thermodynamic or momentum levels.")
# Find indices of interfaces
a_upper = []
a_lower = []
b_upper = []
b_lower = []
for a in zeta.A:
j = np.searchsorted(a_int, a)
if j == 0: # Beyond actual model lid?
a_upper.append(a_int[j])
b_upper.append(b_int[j])
else:
a_upper.append(a_int[j-1])
b_upper.append(b_int[j-1])
if j == len(a_int) or a_int[j] == a: # Beyond model surface?
a_lower.append(a_int[j-1])
b_lower.append(b_int[j-1])
else:
a_lower.append(a_int[j])
b_lower.append(b_int[j])
# Define a dp operator
a_upper = Var([zeta], values=a_upper)
a_lower = Var([zeta], values=a_lower)
b_upper = Var([zeta], values=b_upper)
b_lower = Var([zeta], values=b_lower)
p_upper = exp(a_upper + b_upper*log(p0/zeta.atts['pref']))
p_lower = exp(a_lower + b_lower*log(p0/zeta.atts['pref']))
dp = p_lower - p_upper
else:
raise TypeError("Can't handle '%s' axis."%zaxis.__class__.__name__)
if dp.hasaxis('forecast'):
dp = dp.transpose('time','forecast','zaxis')
elif dp.hasaxis('time'):
dp = dp.transpose('time','zaxis')
dp.name = 'dp'
dp.atts['units'] = 'Pa'
return dp
# Helper method to compute pressure levels from the given z-axis and surface pressure
def compute_pressure (zaxis, p0):
from pygeode_rpn import fstd
from pygeode.ufunc import exp, log
p0 = convert(p0, 'Pa')
# eta coordinates?
if isinstance(zaxis, fstd.Hybrid):
eta = zaxis
A = eta.auxasvar('A')
B = eta.auxasvar('B')
p = A + B * p0
# zeta coordinates?
elif isinstance(zaxis, fstd.LogHybrid):
zeta = zaxis
A = zeta.auxasvar('A')
B = zeta.auxasvar('B')
pref = zeta.atts['pref']
ptop = zeta.atts['ptop']
p = exp(A + B * log(p0/zeta.atts['pref']))
else:
raise TypeError("Can't handle '%s' axis in this interface."%zaxis.__class__.__name__)
if p.hasaxis('forecast'):
p = p.transpose('time','forecast','zaxis')
elif p.hasaxis('time'):
p = p.transpose('time','zaxis')
p.name = 'air_pressure'
p.atts['units'] = 'Pa'
return p
# Flatten out a variable that has both a date-of-origin axis and a forecast
# axis. Combine into a date-of-validity axis.
from pygeode.var import Var
class SquashForecasts(Var):
def __init__ (self, var):
from pygeode.var import Var, copy_meta
from pygeode.timeutils import reltime
origin_hours = reltime(var.time, units='hours').reshape([-1,1])
forecast_hours = var.forecast.values.reshape([1,-1])
validity_hours = origin_hours + forecast_hours
# Construct new time axis
time = type(var.time)(validity_hours.flatten(), units='hours', startdate = var.time.startdate)
# Re-construct in the original units
time = type(var.time)(startdate=var.time.startdate, units=var.time.units, **time.auxarrays)
axes = [time]+[a for a in var.axes if a is not var.time and a is not var.forecast]
Var.__init__(self, axes, dtype=var.dtype)
copy_meta(var, self)
self._var = var
def getview (self, view, pbar):
import numpy as np
out = np.empty(view.shape, dtype=self.dtype)
nt = len(self._var.time)
nf = len(self._var.forecast)
v = view.map_to(self._var, strict=False)
tslice = list(view.integer_indices[0]//nf)
fslice = list(view.integer_indices[0]%nf)
# Collect forecasts from the same origin date together
# (can read them all at once)
tf_pairs = []
for i,(t,f) in enumerate(zip(tslice,fslice)):
if len(tf_pairs) > 0 and t == tf_pairs[-1][1]:
tf_pairs[-1][0].append(i)
tf_pairs[-1][2].append(f)
else: tf_pairs.append(([i],t,[f]))
for i,t,f in tf_pairs:
data = v.modify_slice(0,[t]).modify_slice(1,f).get(self._var)
out[i,...] = data.reshape(data.shape[1:])
pbar.update(100)
return out
del Var
def squash_forecasts(var):
from pygeode.dataset import Dataset
from pygeode.var import Var
if not var.hasaxis('forecast'): return var
if isinstance(var,Dataset):
return Dataset(map(squash_forecasts,var), atts=var.atts)
assert isinstance(var,Var), "Unhandled case '%s'"%type(var)
return SquashForecasts(var)
# Make a field positive (remove negative values)
from pygeode.var import Var
class Positive(Var):
def __init__ (self, var):
from pygeode.var import Var, copy_meta
Var.__init__(self, var.axes, dtype=var.dtype)
copy_meta(var, self)
self._var = var
def getview (self, view, pbar):
import numpy as np
out = np.array(view.get(self._var))
out[out<0] = 0
pbar.update(100)
return out
del Var
def positive(var): return Positive(var)
# Get a keyword / value that can be used to select a surface level for the
# givem vertical axis.
from pygeode.axis import Pres, Hybrid
from pygeode_rpn.fstd import LogHybrid, Height_wrt_Ground
surface_values = {Pres:1000., Hybrid:1.0, LogHybrid:1.0, Height_wrt_Ground:0.0}
surface_direction = {Pres:+1, Hybrid:+1, LogHybrid:+1, Height_wrt_Ground:-1}
# Rank the different axis types by preference (higher number is better)
surface_preference = {Pres:0, Hybrid:0, LogHybrid:0, Height_wrt_Ground:1}
del Pres, Hybrid, LogHybrid, Height_wrt_Ground
# Find a surface value (or the closest level to the surface)
def select_surface (var):
from pygeode.axis import ZAxis
if not var.hasaxis(ZAxis): return var
zaxis = var.getaxis(ZAxis)
zaxis_type = type(zaxis)
if zaxis_type in surface_values:
sfc_val = surface_values[zaxis_type]
else:
from warnings import warn
warn ("Incomplete information on z-axis. Hopefully, the surface is being selected!")
sfc_val = zaxis.values[{'up':0, 'down':-1}[zaxis.atts['positive']]]
selection = dict([(zaxis.name,sfc_val)])
return var(**selection)
# Criteria for ranking how close a dataset is to the surface
# (higher value is closer)
# To be used in the find_best() method.
def closeness_to_surface (varlist):
from pygeode.axis import ZAxis
for var in varlist:
if var.hasaxis(ZAxis):
zaxis = var.getaxis(ZAxis)
if type(zaxis) in surface_direction:
direction = surface_direction[type(zaxis)]
elif 'positive' in zaxis.atts:
direction = {'up':-1, 'down':+1}[zaxis.atts['positive']]
else: raise Exception ("Don't know how to find orientation of '%s'"%zaxis)
rank = surface_preference.get(type(zaxis),0)
value = max(var.getaxis(ZAxis).values * direction)
# Prefer higher-ranked axis types.
# Collapse (rank,value) tuple to a single value, for compatibility with
# data_scanner logic.
return rank*1E6+value
# Rank a dataset based on the number of timesteps available.
# To be used in the find_best() method.
def number_of_timesteps (varlist):
from pygeode.axis import TAxis
for var in varlist:
if var.hasaxis(TAxis):
return len(var.getaxis(TAxis))
# Rank a dataset based on the time duration.
def length_of_time (varlist):
from pygeode.axis import TAxis
from pygeode.timeutils import reltime
for var in varlist:
if var.hasaxis(TAxis):
taxis = var.getaxis(TAxis)
tvals = reltime(taxis,units='hours')
if len(tvals) == 0: return 0
return tvals[-1] - tvals[0]
# Rank a dataset based on the number of levels available.
# To be used in the find_best() method.
def number_of_levels (varlist):
from pygeode.axis import ZAxis
for var in varlist:
if var.hasaxis(ZAxis):
return len(var.getaxis(ZAxis))
return 0 # No levels found
# Check if we have data on a lat/lon grid.
# To be used in the find_best() method.
def have_gridded_data (varlist):
from pygeode.axis import Lat, Lon
for var in varlist:
if var.hasaxis(Lat) and var.hasaxis(Lon): return True
return False
# Check if we have vertical structure in the data.
def have_vertical_data (varlist):
from pygeode.axis import ZAxis
for var in varlist:
if var.hasaxis(ZAxis): return True
return False
# Check if we have 3D data available (lat/lon/zaxis)
# To be used in the find_best() method.
def have_gridded_3d_data (varlist):
from pygeode.axis import Lat, Lon, ZAxis
for var in varlist:
if var.hasaxis(Lat) and var.hasaxis(Lon) and var.hasaxis(ZAxis): return True
return False
# Check if we have a particular vertical level
def have_level (level):
def have_the_level (dataset):
for var in dataset:
if var.hasaxis('zaxis'):
levels = var.getaxis('zaxis').values
if any(l>= level*0.9 and l<=level*1.1 for l in levels):
return True
return False
return have_the_level
# Check if we have a particular diagnostic level
def have_height (height):
def have_the_height (dataset):
for var in dataset:
if var.hasaxis('height'):
heights = var.getaxis('height').values
if any(z == height for z in heights):
return True
return False
return have_the_height
# Check if we have station data.
def have_station_data (varlist):
for var in varlist:
if var.hasaxis("station"): return True
return False
# Similar to above, but must also have vertical structure.
def have_profile_data (varlist):
from pygeode.axis import ZAxis
for var in varlist:
if var.hasaxis("station") and var.hasaxis(ZAxis): return True
return False
| neishm/EC-CAS-diags | eccas_diags/common.py | Python | lgpl-3.0 | 28,889 | [
"CDK"
] | 8bd012a6e6b7f812157d540a9b2a8b0952f7b83b586bd7d12256d090e6e0e2ff |
# -*- coding: utf-8 -*-
# Copyright 2021 El Nogal - Pedro Gómez <pegomez@elnogal.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api, exceptions, _
from openerp.osv import expression
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dateutil.rrule import rrule
class CommercialRoute(models.Model):
_name = 'commercial.route'
_description = 'Commercial route'
_order = 'user_id, sequence, code'
active = fields.Boolean('Active', default=True)
code = fields.Char('Code', size=32)
name = fields.Char('Name', size=255)
user_id = fields.Many2one('res.users', 'Salesperson')
sequence = fields.Integer('Sequence', default=1)
partner_ids = fields.One2many(
'res.partner', 'commercial_route_id', 'Partners',
readonly=True)
planned = fields.Boolean('Planned')
interval = fields.Integer('Interval', default=1)
initial_date_from = fields.Date('Initial date (from)')
initial_date_to = fields.Date('Initial date (to)')
next_date_from = fields.Date(
string='Next visit date (from)',
compute='_compute_next_date', store=True)
next_date_to = fields.Date(
string='Next visit date (to)',
compute='_compute_next_date', store=True)
duration = fields.Integer('Duration', # For gantt view
compute='_compute_next_date', store=True)
@api.multi
@api.depends('initial_date_from', 'initial_date_to', 'planned', 'interval')
def _compute_next_date(self):
for route_id in self:
next_date_from, next_date_to = route_id._get_interval_dates()
duration = False
if next_date_from and next_date_to:
duration = ((next_date_to - next_date_from).days + 1) * 8
route_id.next_date_from = next_date_from
route_id.next_date_to = next_date_to
route_id.duration = duration
@api.multi
def _get_interval_dates(self):
self.ensure_one()
today = datetime.strptime(
fields.Date.context_today(self), "%Y-%m-%d")
if not (self.planned and self.initial_date_from and self.initial_date_to):
next_date_from = False
next_date_to = False
elif self.interval < 1:
next_date_from = fields.Date.context_today(self)
next_date_to = fields.Date.context_today(self)
else:
initial_date_from = datetime.strptime(
min(self.initial_date_from, self.initial_date_to), "%Y-%m-%d")
initial_date_to = datetime.strptime(
max(self.initial_date_from, self.initial_date_to), "%Y-%m-%d")
end_date = today + relativedelta(weeks=self.interval)
valid_dates_to = (rrule(
freq=2, # Weekly
dtstart=initial_date_to,
until=end_date,
interval=self.interval or 1)
.between(today, end_date, inc=True)
)
next_date_to = valid_dates_to and valid_dates_to[0] or today
next_date_from = next_date_to - (initial_date_to - initial_date_from)
return next_date_from, next_date_to
@api.multi
def name_get(self):
return [
(route.id, (route.code and
(route.code + ' - ') or '') + (route.name or ''))
for route in self
]
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
domain = [
'&' if operator in expression.NEGATIVE_TERM_OPERATORS else '|',
('code', operator, name),
('name', operator, name),
]
recs = self.search(domain + args, limit=limit)
return recs.name_get()
@api.multi
def unlink(self):
type_ids = self._ids
if type_ids:
sql = """
SELECT cl1.relname as table, att1.attname as column
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND cl2.relname = %s
AND att2.attname = 'id'
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND con.contype = 'f'
AND con.confdeltype <> 'c'
"""
self._cr.execute(sql, [self._table])
records = self._cr.fetchall()
for record in records:
table = record[0]
column = record[1]
sql = """SELECT EXISTS(SELECT 1 FROM "%s" WHERE %s in %%s LIMIT 1)""" % (table, column)
self._cr.execute(sql, [type_ids])
exist_record = self._cr.fetchall()
if exist_record[0][0]:
raise exceptions.Warning(
_("You cannot remove a commercial route that is referenced by: %s") % (table))
return super(CommercialRoute, self).unlink()
@api.multi
def update_commercial_route_dates(self):
#route_ids = self or self.search([])
route_ids = self.search([])
for route_id in route_ids:
route_id._compute_next_date()
return True
| ELNOGAL/CMNT_00040_2016_ELN_addons | commercial_route/models/commercial_route.py | Python | agpl-3.0 | 5,588 | [
"VisIt"
] | 4ca4dfb3534ab151ebe54066a02b872abbe0d06c04d24f032b794d213fbf78ea |
#!/bin/env python2.7
import sys
import os
import os.path
import glob
import copy
import traceback
import time
import re
import csv
import tempfile
import urllib.request, urllib.parse, urllib.error
import shutil
import atexit
import subprocess
import time
import math
from collections import defaultdict
from os.path import join, dirname, realpath
try:
sys.path.append(join(dirname(realpath(__file__)),
'..', '..', 'common', 'src'))
except NameError:
pass
from optparse_gui import OptionParser, OptionGroup, GUI, UserCancelledError, ProgressText
from util import *
from version import VERSION
VERSION = '%s' % (VERSION,)
def excepthook(etype, value, tb):
traceback.print_exception(etype, value, tb)
print("Type <Enter> to Exit...", end=' ', file=sys.stderr)
sys.stderr.flush()
input()
sys.excepthook = excepthook
toremove = []
def cleanup():
for d in toremove:
shutil.rmtree(d, ignore_errors=True)
atexit.register(cleanup)
if not GUI() and len(sys.argv) == 2 and sys.argv[1] == '--GUI':
from optparse_gui.needswx import *
sys.exit(1)
if GUI() and len(sys.argv) == 1:
from optparse_gui import OptionParserGUI
parser = OptionParserGUI(version=VERSION)
error_kwargs = {'exit': False}
else:
parser = OptionParser(version=VERSION)
error_kwargs = {}
advanced = OptionGroup(parser, "Advanced")
parser.add_option("-s", "--snvs", type="files", dest="snvs", default=None,
help="Single-Nucleotide-Variant files. Required.", name="SNVs",
notNone=True, remember=True,
filetypes=[("SNVs", "*.vcf;*.csv;*.tsv;*.xls;*.xlsx;*.txt")])
parser.add_option("-r", "--readalignments", type="files", dest="alignments", default=None,
help="Read alignments in BAM/SAM format. Required.", name="Read Alignments",
notNone=True, remember=True,
filetypes=[("Read Alignments (BAM/SAM Format)", "*.bam;*.sam")])
advanced.add_option("-M", "--mincount", type="int", dest="mincount", default=3, remember=True,
help="Minimum number of reads for reference and variant allelels to apply LoH test. Default: 3.",
name="Min. Count")
advanced.add_option("-F", "--full", action="store_true", dest="full", default=False, remember=True,
help="Output extra diagnostic read count fields. Default=False.", name="All Fields")
advanced.add_option("-U", "--uniquereads", action="store_true", dest="unique", default=False, remember=True,
help="Consider only distinct reads.", name="Unique Reads")
advanced.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False, remember=True,
help="Quiet.", name="Quiet")
parser.add_option("-o", "--output", type="savefile", dest="output", remember=True,
help="Output file. Leave empty for console ouptut.", default="",
name="Output File", filetypes=[("All output formats", "*.xlsx;*.xls;*.csv;*.tsv;*.txt"),
("Excel", "*.xlsx"), ("Excel2003", "*.xls"),
("CSV", "*.csv"), ("TSV", "*.tsv"), ("Text", "*.txt")])
parser.add_option_group(advanced)
opt = None
while True:
if 'exit' in error_kwargs:
try:
opt, args = parser.parse_args(opts=opt)
except UserCancelledError:
sys.exit(0)
else:
opt, args = parser.parse_args()
break
progress = None
if not opt.output:
opt.quiet = True
progress = ProgressText(quiet=opt.quiet)
from pysamimport import pysam
from dataset import XLSFileTable, CSVFileTable, TSVFileTable, XLSXFileTable, TXTFileTable, BEDFile, VCFFile
progress.stage("Read SNV data", len(opt.snvs))
snvheaders = [_f for _f in """
CHROM POS REF ALT
""".split() if _f]
snvdata = {}
extrasnvheaders = []
usedsnvheaders = set()
for filename in opt.snvs:
base, extn = filename.rsplit('.', 1)
extn = extn.lower()
if extn == 'csv':
snvs = CSVFileTable(filename=filename)
elif extn == 'vcf':
snvs = VCFFile(filename=filename)
elif extn == 'tsv':
snvs = TSVFileTable(filename=filename)
elif extn == 'xls':
snvs = XLSFileTable(filename=filename)
elif extn == 'xlsx':
snvs = XLSXFileTable(filename=filename)
elif extn == 'txt':
snvs = TXTFileTable(filename=filename, headers=snvheaders)
else:
raise RuntimeError("Unexpected SNV file extension: %s" % filename)
for h in snvheaders:
if h not in snvs.headers():
raise RuntimeError(
"Required header: %s missing from SNV file %s" % (h, filename))
for h in snvs.headers():
if h in snvheaders:
continue
if h not in extrasnvheaders:
extrasnvheaders.append(h)
for r in snvs:
chr = r[snvheaders[0]]
locus = int(r[snvheaders[1]])
ref = r[snvheaders[2]]
alt = r[snvheaders[3]]
if r.get('INFO:INDEL'):
continue
if len(ref) != 1:
continue
if not re.search(r'^[ACGT](,[ACGT])*$', alt):
continue
for h in r:
if r.get(h):
usedsnvheaders.add(h)
cannonr = (",".join(["%s:%s" % t for t in sorted(r.items())]))
snvkey = (chr, locus, ref, alt, cannonr)
if snvkey not in snvdata:
snvdata[snvkey] = (chr, locus, ref, alt, r)
progress.update()
progress.done()
snvdata = sorted(snvdata.values())
extrasnvheaders = [h for h in extrasnvheaders if h in usedsnvheaders]
progress.message("SNVs: %d" % len(snvdata))
samfiles = []
for al in opt.alignments:
if al.lower().endswith('.bam'):
samfile = pysam.Samfile(al, "rb")
elif al.lower().endswith('.sam'):
samfile = pysam.Samfile(al, "r")
else:
raise RuntimeError("Unexpected alignments file extension: %s." % al)
samfiles.append(samfile)
outheaders = snvheaders + [_f for _f in """
SNVCount
NoSNVCount
Prob
LogOdds
P-Value
Bonferroni
FDR
""".split() if _f]
debugging = [_f for _f in """
OtherCount
GoodReads
RemovedDuplicateReads
FilteredSNVLociReads
SNVLociReads
""".split() if _f]
outheaders.extend(debugging)
pos = outheaders.index("SNVCount")
for h in reversed(extrasnvheaders):
outheaders.insert(pos, h)
outheaders1 = copy.copy(outheaders)
if not opt.full:
for dh in debugging:
if dh in outheaders1:
outheaders1.remove(dh)
emptysym = None
if opt.output:
filename = opt.output
base, extn = filename.rsplit('.', 1)
extn = extn.lower()
if extn == 'csv':
output = CSVFileTable(filename=filename, headers=outheaders1)
elif extn == 'tsv':
output = TSVFileTable(filename=filename, headers=outheaders1)
elif extn == 'xls':
output = XLSFileTable(
filename=filename, headers=outheaders1, sheet='Results')
elif extn == 'xlsx':
output = XLSXFileTable(
filename=filename, headers=outheaders1, sheet='Results')
elif extn == 'txt':
output = TXTFileTable(filename=filename, headers=outheaders1)
else:
raise RuntimeError("Unexpected output file extension: %s" % filename)
else:
output = TXTFileTable(filename=sys.stdout, headers=outheaders1)
emptysym = "-"
outrows = []
from fisher import fisher_exact, bonferroni, fdr, lod, binom_test
pvalues = []
progress.stage("Count reads per SNV", len(snvdata))
filter = SNVPileupReadFilter()
for snvchr, snvpos, ref, alt, snvextra in snvdata:
reads = []
total = 0
snvpos1 = snvpos - 1
for i, samfile in enumerate(samfiles):
for pileupcolumn in samfile.pileup(snvchr, snvpos1, snvpos1 + 1, truncate=True):
total += pileupcolumn.n
for pileupread in pileupcolumn.pileups:
try:
al, pos, base = filter.test(pileupread)
except BadRead:
continue
reads.append((al, pos, base, i))
goodreads = defaultdict(list)
for al, pos, base, si in reads:
goodreads[base].append((si, al))
# Deduplicate the reads based on the read sequence or the
# start and end of the alignment or ???
duplicates_removed = 0
if opt.unique:
for base in goodreads:
seen = set()
retain = list()
for si, al in goodreads[base]:
if (si, al.seq) not in seen:
retain.append((si, al))
seen.add((si, al.seq))
else:
duplicates_removed += 1
goodreads[base] = retain
# goodreads now contains the relevant read alignments.
if len(goodreads[ref]) < opt.mincount or len(goodreads[alt]) < opt.mincount:
progress.update()
continue
counts = defaultdict(int)
for base in goodreads:
for si, al in goodreads[base]:
counts[base] += 1
nsnv = sum([counts[nuc] for nuc in alt.split(',')])
nref = counts[ref]
nother = sum(counts.values()) - nsnv - nref
p = emptysym
pval = emptysym
logprob = emptysym
psnv = nsnv / float(nsnv + nref)
pref = nref / float(nsnv + nref)
p = psnv / (psnv + pref)
logodds = math.log(float(nsnv) / float(nref), 2.0)
pval = binom_test(nsnv, nsnv + nref, 0.5)
pvalues.append(pval)
row = [ snvchr, snvpos, ref, alt ] + \
[ snvextra.get(k, emptysym) for k in extrasnvheaders ] + \
[nsnv,
nref,
p,
logodds,
pval,
nother,
sum(counts.values()),
duplicates_removed,
len(reads),
total]
outrows.append(row)
progress.update()
progress.done()
progress.stage('Multiple-test correction and FDR computation')
bonf = bonferroni(pvalues)
fdr = fdr(pvalues)
i = 0
pvalpos = outheaders.index('P-Value')
for r in outrows:
if len(r) > pvalpos:
if r[pvalpos] != emptysym:
r.insert(pvalpos + 1, fdr[i])
r.insert(pvalpos + 1, bonf[i])
i += 1
else:
r.insert(pvalpos + 1, emptysym)
r.insert(pvalpos + 1, emptysym)
progress.done()
progress.stage('Output results')
output.from_rows(
[dict(list(zip(outheaders, r + [emptysym] * 50))) for r in outrows])
progress.done()
| HorvathLab/NGS | RNA2DNAlign/src/LoH.py | Python | mit | 10,447 | [
"pysam"
] | 9ea46797d2c4be7d21f252742e12328d975d574557869d938ee0a99a65ff74f1 |
#!/usr/bin/env python
# Extract fasta files by their descriptors stored in a separate file.
# Requires biopython
# TODO:
# - Create more sophisticated logic for matching IDs/Descriptions/Partial matches etc.
# - Create a mode variable to encapsulate invert/partial/description/id etc?
from Bio import SeqIO
import sys
import argparse
def get_keys(args):
"""Turns the input key file into a list. May be memory intensive."""
with open(args.keyfile, "r") as kfh:
keys = [line.rstrip("\n").lstrip(">") for line in kfh]
return keys
def get_args():
try:
parser = argparse.ArgumentParser(
description="Retrieve one or more fastas from a given multifasta."
)
parser.add_argument(
"-f",
"--fasta",
action="store",
required=True,
help="The multifasta to search.",
)
parser.add_argument(
"-k",
"--keyfile",
action="store",
help="A file of header strings to search the multifasta for. Must be one per line.",
)
parser.add_argument(
"-s",
"--string",
action="store",
help="Provide a string to look for directly, instead of a file (can accept a comma separated list of strings).",
)
parser.add_argument(
"-o",
"--outfile",
action="store",
default=None,
help="Output file to store the new fasta sequences in. Just prints to screen by default.",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Set whether to print the key list out before the fasta sequences. Useful for debugging.",
)
parser.add_argument(
"-i",
"--invert",
action="store_true",
help="Invert the search, and retrieve all sequences NOT specified in the keyfile.",
)
parser.add_argument(
"-m",
"--method",
action="store",
choices=["exact", "partial"],
default="exact",
help="Search the headers as exact matches, or as partial substring matches. "
"The latter is dangerous, as headers may be matched twice, so be sure "
"your headers/keys are unique to their respective sequences."
)
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
except NameError:
sys.stderr.write(
"An exception occured with argument parsing. Check your provided options."
)
sys.exit(1)
return parser.parse_args()
def main():
"""Takes a string or list of strings in a text file (one per line) and retreives them and their sequences from a provided multifasta."""
args = get_args()
# Call getKeys() to create the list of keys from the provided file:
if not (args.keyfile or args.string):
sys.stderr.write("No key source provided. Exiting.")
sys.exit(1)
if args.keyfile:
keys = get_keys(args)
else:
keys = args.string.split(",")
if args.verbose:
if args.invert is False:
sys.stderr.write("Fetching the following keys:\n")
for key in keys:
sys.stderr.write(key + "\n")
elif args.invert is True:
sys.stderr.write(
"Ignoring the following keys, and retreiving everything else from: {}\n".format(
args.fasta
)
)
for key in keys:
sys.stderr.write(key + "\n")
sys.stderr.write(
"-" * 80 + "\n"
)
# Parse in the multifasta and assign an iterable variable:
to_write = []
for rec in SeqIO.parse(args.fasta, "fasta"):
if args.invert is False:
if args.method == "exact":
if rec.id in keys:
print(rec.format("fasta"))
to_write.append(rec)
elif args.method == "partial":
if any(key in rec.description for key in keys):
print(rec.format("fasta"))
to_write.append(rec)
elif args.invert is True:
if args.method == "exact":
if rec.id not in keys:
print(rec.format("fasta"))
to_write.append(rec)
elif args.method == "partial":
if all(key not in rec.description for key in keys):
print(rec.format("fasta"))
to_write.append(rec)
if args.outfile:
SeqIO.write(to_write, args.outfile, "fasta")
if __name__ == "__main__":
main()
| jrjhealey/bioinfo-tools | fastafetcher.py | Python | gpl-3.0 | 4,793 | [
"Biopython"
] | bf34fbde69ffe09d20fdb8b4d845c00cba0ba4ee87fb85ae96a8ece485587181 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Simple volume rendering example.
reader = vtk.vtkSLCReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/sphere.slc")
# Create transfer functions for opacity and color
opacityTransferFunction = vtk.vtkPiecewiseFunction()
opacityTransferFunction.AddPoint(0, 0.0)
opacityTransferFunction.AddPoint(30, 0.0)
opacityTransferFunction.AddPoint(80, 0.5)
opacityTransferFunction.AddPoint(255, 0.5)
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddRGBPoint(0.0, 0.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(64.0, 1.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(128.0, 0.0, 0.0, 1.0)
colorTransferFunction.AddRGBPoint(192.0, 0.0, 1.0, 0.0)
colorTransferFunction.AddRGBPoint(255.0, 0.0, 0.2, 0.0)
# Create properties, mappers, volume actors, and ray cast function
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorTransferFunction)
volumeProperty.SetScalarOpacity(opacityTransferFunction)
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(600, 300)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.SetBackground(0.1, 0.2, 0.4)
i = 0
while i < 2:
j = 0
while j < 4:
idx = str(i) + "_" + str(j)
exec("volumeMapper_" + idx + " = vtk.vtkVolumeTextureMapper2D()")
eval("volumeMapper_" + idx).SetInputConnection(reader.GetOutputPort())
eval("volumeMapper_" + idx).CroppingOn()
eval("volumeMapper_" + idx).SetCroppingRegionPlanes(
17, 33, 17, 33, 17, 33)
exec("volume_" + idx + " = vtk.vtkVolume()")
eval("volume_" + idx).SetMapper(eval("volumeMapper_" + idx))
eval("volume_" + idx).SetProperty(volumeProperty)
exec("userMatrix_" + idx + " = vtk.vtkTransform()")
eval("userMatrix_" + idx).PostMultiply()
eval("userMatrix_" + idx).Identity()
eval("userMatrix_" + idx).Translate(-25, -25, -25)
if (i == 0):
eval("userMatrix_" + idx).RotateX(j * 90 + 20)
eval("userMatrix_" + idx).RotateY(20)
else:
eval("userMatrix_" + idx).RotateX(20)
eval("userMatrix_" + idx).RotateY(j * 90 + 20)
eval("userMatrix_" + idx).Translate(j * 55 + 25, i * 55 + 25, 0)
eval("volume_" + idx).SetUserTransform(eval("userMatrix_" + idx))
ren1.AddViewProp(eval("volume_" + idx))
j += 1
i += 1
volumeMapper_0_0.SetCroppingRegionFlagsToSubVolume()
volumeMapper_0_1.SetCroppingRegionFlagsToCross()
volumeMapper_0_2.SetCroppingRegionFlagsToInvertedCross()
volumeMapper_0_3.SetCroppingRegionFlags(24600)
volumeMapper_1_0.SetCroppingRegionFlagsToFence()
volumeMapper_1_1.SetCroppingRegionFlagsToInvertedFence()
volumeMapper_1_2.SetCroppingRegionFlags(1)
volumeMapper_1_3.SetCroppingRegionFlags(67117057)
ren1.GetCullers().InitTraversal()
culler = ren1.GetCullers().GetNextItem()
culler.SetSortingStyleToBackToFront()
ren1.ResetCamera()
ren1.GetActiveCamera().Zoom(3.0)
renWin.Render()
def TkCheckAbort (object_binding, event_name):
foo = renWin.GetEventPending()
if (foo != 0):
renWin.SetAbortRender(1)
renWin.AddObserver("AbortCheckEvent", TkCheckAbort)
iren.Initialize()
#iren.Start()
| hlzz/dotfiles | graphics/VTK-7.0.0/Rendering/Volume/Testing/Python/volTM2DCropRegions.py | Python | bsd-3-clause | 3,520 | [
"VTK"
] | 4a00635b55ebb8e992e072d6855a6a9eafcd0c7a1ec99b5d33731c4676463c9f |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from abc import ABCMeta
from pyspark import keyword_only, since
from pyspark.ml import Predictor, PredictionModel
from pyspark.ml.base import _PredictorParams
from pyspark.ml.param.shared import HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol, \
Param, Params, TypeConverters, HasMaxIter, HasTol, HasFitIntercept, HasAggregationDepth, \
HasMaxBlockSizeInMB, HasRegParam, HasSolver, HasStepSize, HasSeed, HasElasticNetParam, \
HasStandardization, HasLoss, HasVarianceCol
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
_TreeEnsembleModel, _RandomForestParams, _GBTParams, _TreeRegressorParams
from pyspark.ml.util import JavaMLWritable, JavaMLReadable, HasTrainingSummary, \
GeneralJavaMLWritable
from pyspark.ml.wrapper import JavaEstimator, JavaModel, \
JavaPredictor, JavaPredictionModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
'RandomForestRegressor', 'RandomForestRegressionModel',
'FMRegressor', 'FMRegressionModel']
class Regressor(Predictor, _PredictorParams, metaclass=ABCMeta):
"""
Regressor for regression tasks.
.. versionadded:: 3.0.0
"""
pass
class RegressionModel(PredictionModel, _PredictorParams, metaclass=ABCMeta):
"""
Model produced by a ``Regressor``.
.. versionadded:: 3.0.0
"""
pass
class _JavaRegressor(Regressor, JavaPredictor, metaclass=ABCMeta):
"""
Java Regressor for regression tasks.
.. versionadded:: 3.0.0
"""
pass
class _JavaRegressionModel(RegressionModel, JavaPredictionModel, metaclass=ABCMeta):
"""
Java Model produced by a ``_JavaRegressor``.
To be mixed in with :class:`pyspark.ml.JavaModel`
.. versionadded:: 3.0.0
"""
pass
class _LinearRegressionParams(_PredictorParams, HasRegParam, HasElasticNetParam, HasMaxIter,
HasTol, HasFitIntercept, HasStandardization, HasWeightCol, HasSolver,
HasAggregationDepth, HasLoss, HasMaxBlockSizeInMB):
"""
Params for :py:class:`LinearRegression` and :py:class:`LinearRegressionModel`.
.. versionadded:: 3.0.0
"""
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: auto, normal, l-bfgs.", typeConverter=TypeConverters.toString)
loss = Param(Params._dummy(), "loss", "The loss function to be optimized. Supported " +
"options: squaredError, huber.", typeConverter=TypeConverters.toString)
epsilon = Param(Params._dummy(), "epsilon", "The shape parameter to control the amount of " +
"robustness. Must be > 1.0. Only valid when loss is huber",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_LinearRegressionParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, loss="squaredError", epsilon=1.35,
maxBlockSizeInMB=0.0)
@since("2.3.0")
def getEpsilon(self):
"""
Gets the value of epsilon or its default value.
"""
return self.getOrDefault(self.epsilon)
@inherit_doc
class LinearRegression(_JavaRegressor, _LinearRegressionParams, JavaMLWritable, JavaMLReadable):
"""
Linear regression.
The learning objective is to minimize the specified loss function, with regularization.
This supports two kinds of loss:
* squaredError (a.k.a squared loss)
* huber (a hybrid of squared error for relatively small errors and absolute error for \
relatively large ones, and we estimate the scale parameter from training data)
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
.. versionadded:: 1.4.0
Notes
-----
Fitting with huber loss only supports none and L2 regularization.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, 2.0, Vectors.dense(1.0)),
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> lr = LinearRegression(regParam=0.0, solver="normal", weightCol="weight")
>>> lr.setMaxIter(5)
LinearRegression...
>>> lr.getMaxIter()
5
>>> lr.setRegParam(0.1)
LinearRegression...
>>> lr.getRegParam()
0.1
>>> lr.setRegParam(0.0)
LinearRegression...
>>> model = lr.fit(df)
>>> model.setFeaturesCol("features")
LinearRegressionModel...
>>> model.setPredictionCol("newPrediction")
LinearRegressionModel...
>>> model.getMaxIter()
5
>>> model.getMaxBlockSizeInMB()
0.0
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> abs(model.predict(test0.head().features) - (-1.0)) < 0.001
True
>>> abs(model.transform(test0).head().newPrediction - (-1.0)) < 0.001
True
>>> abs(model.coefficients[0] - 1.0) < 0.001
True
>>> abs(model.intercept - 0.0) < 0.001
True
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> abs(model.transform(test1).head().newPrediction - 1.0) < 0.001
True
>>> lr.setParams(featuresCol="vector")
LinearRegression...
>>> lr_path = temp_path + "/lr"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.numFeatures
1
>>> model.write().format("pmml").save(model_path + "_2")
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0)
"""
super(LinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.LinearRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2,
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2, \
loss="squaredError", epsilon=1.35, maxBlockSizeInMB=0.0)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearRegressionModel(java_model)
@since("2.3.0")
def setEpsilon(self, value):
"""
Sets the value of :py:attr:`epsilon`.
"""
return self._set(epsilon=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
def setElasticNetParam(self, value):
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self._set(elasticNetParam=value)
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
def setLoss(self, value):
"""
Sets the value of :py:attr:`loss`.
"""
return self._set(lossType=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LinearRegressionModel(_JavaRegressionModel, _LinearRegressionParams, GeneralJavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`LinearRegression`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.3.0")
def scale(self):
r"""
The value by which :math:`\|y - X'w\|` is scaled down when loss is "huber", otherwise 1.0.
"""
return self._call_java("scale")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (residuals, MSE, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise TypeError("dataset must be a DataFrame but got %s." % type(dataset))
java_lr_summary = self._call_java("evaluate", dataset)
return LinearRegressionSummary(java_lr_summary)
class LinearRegressionSummary(JavaWrapper):
"""
Linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in "predictions" which gives the predicted value of
the label at each instance.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@property
@since("2.0.0")
def explainedVariance(self):
r"""
Returns the explained variance regression score.
explainedVariance = :math:`1 - \frac{variance(y - \hat{y})}{variance(y)}`
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
For additional information see
`Explained variation on Wikipedia \
<http://en.wikipedia.org/wiki/Explained_variation>`_
"""
return self._call_java("explainedVariance")
@property
@since("2.0.0")
def meanAbsoluteError(self):
"""
Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanAbsoluteError")
@property
@since("2.0.0")
def meanSquaredError(self):
"""
Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanSquaredError")
@property
@since("2.0.0")
def rootMeanSquaredError(self):
"""
Returns the root mean squared error, which is defined as the
square root of the mean squared error.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("rootMeanSquaredError")
@property
@since("2.0.0")
def r2(self):
"""
Returns R^2, the coefficient of determination.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
See also `Wikipedia coefficient of determination \
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
"""
return self._call_java("r2")
@property
@since("2.4.0")
def r2adj(self):
"""
Returns Adjusted R^2, the adjusted coefficient of determination.
Notes
-----
This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark versions.
`Wikipedia coefficient of determination, Adjusted R^2 \
<https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_
"""
return self._call_java("r2adj")
@property
@since("2.0.0")
def residuals(self):
"""
Residuals (label - predicted value)
"""
return self._call_java("residuals")
@property
@since("2.0.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions
"""
return self._call_java("numInstances")
@property
@since("2.2.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def devianceResiduals(self):
"""
The weighted residuals, the usual residuals rescaled by the
square root of the instance weights.
"""
return self._call_java("devianceResiduals")
@property
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("coefficientStandardErrors")
@property
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("tValues")
@property
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("pValues")
@inherit_doc
class LinearRegressionTrainingSummary(LinearRegressionSummary):
"""
Linear regression training results. Currently, the training summary ignores the
training weights except for the objective trace.
.. versionadded:: 2.0.0
"""
@property
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("objectiveHistory")
@property
def totalIterations(self):
"""
Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. versionadded:: 2.0.0
See Also
--------
LinearRegression.solver
"""
return self._call_java("totalIterations")
class _IsotonicRegressionParams(HasFeaturesCol, HasLabelCol, HasPredictionCol, HasWeightCol):
"""
Params for :py:class:`IsotonicRegression` and :py:class:`IsotonicRegressionModel`.
.. versionadded:: 3.0.0
"""
isotonic = Param(
Params._dummy(), "isotonic",
"whether the output sequence should be isotonic/increasing (true) or" +
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
featureIndex = Param(
Params._dummy(), "featureIndex",
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_IsotonicRegressionParams, self).__init__(*args)
self._setDefault(isotonic=True, featureIndex=0)
def getIsotonic(self):
"""
Gets the value of isotonic or its default value.
"""
return self.getOrDefault(self.isotonic)
def getFeatureIndex(self):
"""
Gets the value of featureIndex or its default value.
"""
return self.getOrDefault(self.featureIndex)
@inherit_doc
class IsotonicRegression(JavaEstimator, _IsotonicRegressionParams, HasWeightCol,
JavaMLWritable, JavaMLReadable):
"""
Currently implemented using parallelized pool adjacent violators algorithm.
Only univariate (single feature) algorithm supported.
.. versionadded:: 1.6.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> ir = IsotonicRegression()
>>> model = ir.fit(df)
>>> model.setFeaturesCol("features")
IsotonicRegressionModel...
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.predict(test0.head().features[model.getFeatureIndex()])
0.0
>>> model.boundaries
DenseVector([0.0, 1.0])
>>> ir_path = temp_path + "/ir"
>>> ir.save(ir_path)
>>> ir2 = IsotonicRegression.load(ir_path)
>>> ir2.getIsotonic()
True
>>> model_path = temp_path + "/ir_model"
>>> model.save(model_path)
>>> model2 = IsotonicRegressionModel.load(model_path)
>>> model.boundaries == model2.boundaries
True
>>> model.predictions == model2.predictions
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
"""
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return IsotonicRegressionModel(java_model)
def setIsotonic(self, value):
"""
Sets the value of :py:attr:`isotonic`.
"""
return self._set(isotonic=value)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
@since("1.6.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.6.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.6.0")
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
@since("1.6.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
class IsotonicRegressionModel(JavaModel, _IsotonicRegressionParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`IsotonicRegression`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
@property
@since("1.6.0")
def boundaries(self):
"""
Boundaries in increasing order for which predictions are known.
"""
return self._call_java("boundaries")
@property
@since("1.6.0")
def predictions(self):
"""
Predictions associated with the boundaries at the same index, monotone because of isotonic
regression.
"""
return self._call_java("predictions")
@property
@since("3.0.0")
def numFeatures(self):
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
return self._call_java("numFeatures")
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
class _DecisionTreeRegressorParams(_DecisionTreeParams, _TreeRegressorParams, HasVarianceCol):
"""
Params for :py:class:`DecisionTreeRegressor` and :py:class:`DecisionTreeRegressionModel`.
.. versionadded:: 3.0.0
"""
def __init__(self, *args):
super(_DecisionTreeRegressorParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", leafCol="", minWeightFractionPerNode=0.0)
@inherit_doc
class DecisionTreeRegressor(_JavaRegressor, _DecisionTreeRegressorParams, JavaMLWritable,
JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for regression.
It supports both continuous and categorical features.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> dt = DecisionTreeRegressor(maxDepth=2)
>>> dt.setVarianceCol("variance")
DecisionTreeRegressor...
>>> model = dt.fit(df)
>>> model.getVarianceCol()
'variance'
>>> model.setLeafCol("leafId")
DecisionTreeRegressionModel...
>>> model.depth
1
>>> model.numNodes
3
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> model.predictLeaf(test0.head().features)
0.0
>>> result.leafId
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtr_path = temp_path + "/dtr"
>>> dt.save(dtr_path)
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtr_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeRegressionModel.load(model_path)
>>> model.numNodes == model2.numNodes
True
>>> model.depth == model2.depth
True
>>> model.transform(test1).head().variance
0.0
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> df3 = spark.createDataFrame([
... (1.0, 0.2, Vectors.dense(1.0)),
... (1.0, 0.8, Vectors.dense(1.0)),
... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> dt3 = DecisionTreeRegressor(maxDepth=2, weightCol="weight", varianceCol="variance")
>>> model3 = dt3.fit(df3)
>>> print(model3.toDebugString)
DecisionTreeRegressionModel...depth=1, numNodes=3...
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
seed=None, varianceCol=None, weightCol=None, leafCol="",
minWeightFractionPerNode=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
leafCol="", minWeightFractionPerNode=0.0)
"""
super(DecisionTreeRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", seed=None, varianceCol=None, weightCol=None,
leafCol="", minWeightFractionPerNode=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None, weightCol=None, \
leafCol="", minWeightFractionPerNode=0.0)
Sets params for the DecisionTreeRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeRegressionModel(java_model)
@since("1.4.0")
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
@since("1.4.0")
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
@since("1.4.0")
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
@since("1.4.0")
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
@since("1.4.0")
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
@since("1.4.0")
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setVarianceCol(self, value):
"""
Sets the value of :py:attr:`varianceCol`.
"""
return self._set(varianceCol=value)
@inherit_doc
class DecisionTreeRegressionModel(
_JavaRegressionModel, _DecisionTreeModel, _DecisionTreeRegressorParams,
JavaMLWritable, JavaMLReadable
):
"""
Model fitted by :class:`DecisionTreeRegressor`.
.. versionadded:: 1.4.0
"""
@since("3.0.0")
def setVarianceCol(self, value):
"""
Sets the value of :py:attr:`varianceCol`.
"""
return self._set(varianceCol=value)
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. versionadded:: 2.0.0
Notes
-----
Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
class _RandomForestRegressorParams(_RandomForestParams, _TreeRegressorParams):
"""
Params for :py:class:`RandomForestRegressor` and :py:class:`RandomForestRegressionModel`.
.. versionadded:: 3.0.0
"""
def __init__(self, *args):
super(_RandomForestRegressorParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
bootstrap=True)
@inherit_doc
class RandomForestRegressor(_JavaRegressor, _RandomForestRegressorParams, JavaMLWritable,
JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for regression.
It supports both continuous and categorical features.
.. versionadded:: 1.4.0
Examples
--------
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2)
>>> rf.getMinWeightFractionPerNode()
0.0
>>> rf.setSeed(42)
RandomForestRegressor...
>>> model = rf.fit(df)
>>> model.getBootstrap()
True
>>> model.getSeed()
42
>>> model.setLeafCol("leafId")
RandomForestRegressionModel...
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictLeaf(test0.head().features)
DenseVector([0.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0])
>>> model.numFeatures
1
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
0.5
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
weightCol=None, bootstrap=True):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto", leafCol=", minWeightFractionPerNode=0.0", \
weightCol=None, bootstrap=True)
"""
super(RandomForestRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0,
weightCol=None, bootstrap=True):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto", leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None, bootstrap=True)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestRegressionModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("3.0.0")
def setBootstrap(self, value):
"""
Sets the value of :py:attr:`bootstrap`.
"""
return self._set(bootstrap=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class RandomForestRegressionModel(
_JavaRegressionModel, _TreeEnsembleModel, _RandomForestRegressorParams,
JavaMLWritable, JavaMLReadable
):
"""
Model fitted by :class:`RandomForestRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
Examples
--------
DecisionTreeRegressionModel.featureImportances
"""
return self._call_java("featureImportances")
class _GBTRegressorParams(_GBTParams, _TreeRegressorParams):
"""
Params for :py:class:`GBTRegressor` and :py:class:`GBTRegressorModel`.
.. versionadded:: 3.0.0
"""
supportedLossTypes = ["squared", "absolute"]
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(supportedLossTypes),
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_GBTRegressorParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
leafCol="", minWeightFractionPerNode=0.0)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@inherit_doc
class GBTRegressor(_JavaRegressor, _GBTRegressorParams, JavaMLWritable, JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for regression.
It supports both continuous and categorical features.
.. versionadded:: 1.4.0
Examples
--------
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> gbt = GBTRegressor(maxDepth=2, seed=42, leafCol="leafId")
>>> gbt.setMaxIter(5)
GBTRegressor...
>>> gbt.setMinWeightFractionPerNode(0.049)
GBTRegressor...
>>> gbt.getMaxIter()
5
>>> print(gbt.getImpurity())
variance
>>> print(gbt.getFeatureSubsetStrategy())
all
>>> model = gbt.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictLeaf(test0.head().features)
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> gbtr_path = temp_path + "gbtr"
>>> gbt.save(gbtr_path)
>>> gbt2 = GBTRegressor.load(gbtr_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtr_model"
>>> model.save(model_path)
>>> model2 = GBTRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0))],
... ["label", "features"])
>>> model.evaluateEachIteration(validation, "squared")
[0.0, 0.0, 0.0, 0.0, 0.0]
>>> gbt = gbt.setValidationIndicatorCol("validationIndicator")
>>> gbt.getValidationIndicatorCol()
'validationIndicator'
>>> gbt.getValidationTol()
0.01
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None)
"""
super(GBTRegressor, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
Sets params for Gradient Boosted Tree Regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTRegressionModel(java_model)
@since("1.4.0")
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
@since("1.4.0")
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
@since("1.4.0")
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("1.4.0")
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
@since("1.4.0")
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
@since("1.4.0")
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("3.0.0")
def setValidationIndicatorCol(self, value):
"""
Sets the value of :py:attr:`validationIndicatorCol`.
"""
return self._set(validationIndicatorCol=value)
@since("1.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("1.4.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.4.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class GBTRegressionModel(
_JavaRegressionModel, _TreeEnsembleModel, _GBTRegressorParams,
JavaMLWritable, JavaMLReadable
):
"""
Model fitted by :class:`GBTRegressor`.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
Examples
--------
DecisionTreeRegressionModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
def evaluateEachIteration(self, dataset, loss):
"""
Method to compute error or loss for every iteration of gradient boosting.
.. versionadded:: 2.4.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
loss : str
The loss function used to compute error.
Supported options: squared, absolute
"""
return self._call_java("evaluateEachIteration", dataset, loss)
class _AFTSurvivalRegressionParams(_PredictorParams, HasMaxIter, HasTol, HasFitIntercept,
HasAggregationDepth, HasMaxBlockSizeInMB):
"""
Params for :py:class:`AFTSurvivalRegression` and :py:class:`AFTSurvivalRegressionModel`.
.. versionadded:: 3.0.0
"""
censorCol = Param(
Params._dummy(), "censorCol",
"censor column name. The value of this column could be 0 or 1. " +
"If the value is 1, it means the event has occurred i.e. " +
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
quantileProbabilities = Param(
Params._dummy(), "quantileProbabilities",
"quantile probabilities array. Values of the quantile probabilities array " +
"should be in the range (0, 1) and the array should be non-empty.",
typeConverter=TypeConverters.toListFloat)
quantilesCol = Param(
Params._dummy(), "quantilesCol",
"quantiles column name. This column will output quantiles of " +
"corresponding quantileProbabilities if it is set.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_AFTSurvivalRegressionParams, self).__init__(*args)
self._setDefault(censorCol="censor",
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
maxIter=100, tol=1E-6, maxBlockSizeInMB=0.0)
@since("1.6.0")
def getCensorCol(self):
"""
Gets the value of censorCol or its default value.
"""
return self.getOrDefault(self.censorCol)
@since("1.6.0")
def getQuantileProbabilities(self):
"""
Gets the value of quantileProbabilities or its default value.
"""
return self.getOrDefault(self.quantileProbabilities)
@since("1.6.0")
def getQuantilesCol(self):
"""
Gets the value of quantilesCol or its default value.
"""
return self.getOrDefault(self.quantilesCol)
@inherit_doc
class AFTSurvivalRegression(_JavaRegressor, _AFTSurvivalRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Accelerated Failure Time (AFT) Model Survival Regression
Fit a parametric AFT survival regression model based on the Weibull distribution
of the survival time.
Notes
-----
For more information see Wikipedia page on
`AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0), 1.0),
... (1e-40, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
>>> aftsr = AFTSurvivalRegression()
>>> aftsr.setMaxIter(10)
AFTSurvivalRegression...
>>> aftsr.getMaxIter()
10
>>> aftsr.clear(aftsr.maxIter)
>>> model = aftsr.fit(df)
>>> model.getMaxBlockSizeInMB()
0.0
>>> model.setFeaturesCol("features")
AFTSurvivalRegressionModel...
>>> model.predict(Vectors.dense(6.3))
1.0
>>> model.predictQuantiles(Vectors.dense(6.3))
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
>>> model.transform(df).show()
+-------+---------+------+----------+
| label| features|censor|prediction|
+-------+---------+------+----------+
| 1.0| [1.0]| 1.0| 1.0|
|1.0E-40|(1,[],[])| 0.0| 1.0|
+-------+---------+------+----------+
...
>>> aftsr_path = temp_path + "/aftsr"
>>> aftsr.save(aftsr_path)
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
>>> aftsr2.getMaxIter()
100
>>> model_path = temp_path + "/aftsr_model"
>>> model.save(model_path)
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
>>> model.coefficients == model2.coefficients
True
>>> model.intercept == model2.intercept
True
>>> model.scale == model2.scale
True
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
.. versionadded:: 1.6.0
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), # noqa: B005
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0)
"""
super(AFTSurvivalRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]), # noqa: B005
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return AFTSurvivalRegressionModel(java_model)
@since("1.6.0")
def setCensorCol(self, value):
"""
Sets the value of :py:attr:`censorCol`.
"""
return self._set(censorCol=value)
@since("1.6.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("1.6.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@since("1.6.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.6.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("1.6.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.1.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class AFTSurvivalRegressionModel(_JavaRegressionModel, _AFTSurvivalRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`AFTSurvivalRegression`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("3.0.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.6.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("1.6.0")
def scale(self):
"""
Model scale parameter.
"""
return self._call_java("scale")
@since("2.0.0")
def predictQuantiles(self, features):
"""
Predicted Quantiles
"""
return self._call_java("predictQuantiles", features)
class _GeneralizedLinearRegressionParams(_PredictorParams, HasFitIntercept, HasMaxIter,
HasTol, HasRegParam, HasWeightCol, HasSolver,
HasAggregationDepth):
"""
Params for :py:class:`GeneralizedLinearRegression` and
:py:class:`GeneralizedLinearRegressionModel`.
.. versionadded:: 3.0.0
"""
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
"the error distribution to be used in the model. Supported options: " +
"gaussian (default), binomial, poisson, gamma and tweedie.",
typeConverter=TypeConverters.toString)
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
"relationship between the linear predictor and the mean of the distribution " +
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
"and sqrt.", typeConverter=TypeConverters.toString)
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
"predictor) column name", typeConverter=TypeConverters.toString)
variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " +
"of the Tweedie distribution which characterizes the relationship " +
"between the variance and mean of the distribution. Only applicable " +
"for the Tweedie family. Supported values: 0 and [1, Inf).",
typeConverter=TypeConverters.toFloat)
linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " +
"Only applicable to the Tweedie family.",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: irls.", typeConverter=TypeConverters.toString)
offsetCol = Param(Params._dummy(), "offsetCol", "The offset column name. If this is not set " +
"or empty, we treat all instance offsets as 0.0",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_GeneralizedLinearRegressionParams, self).__init__(*args)
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls",
variancePower=0.0, aggregationDepth=2)
@since("2.0.0")
def getFamily(self):
"""
Gets the value of family or its default value.
"""
return self.getOrDefault(self.family)
@since("2.0.0")
def getLinkPredictionCol(self):
"""
Gets the value of linkPredictionCol or its default value.
"""
return self.getOrDefault(self.linkPredictionCol)
@since("2.0.0")
def getLink(self):
"""
Gets the value of link or its default value.
"""
return self.getOrDefault(self.link)
@since("2.2.0")
def getVariancePower(self):
"""
Gets the value of variancePower or its default value.
"""
return self.getOrDefault(self.variancePower)
@since("2.2.0")
def getLinkPower(self):
"""
Gets the value of linkPower or its default value.
"""
return self.getOrDefault(self.linkPower)
@since("2.3.0")
def getOffsetCol(self):
"""
Gets the value of offsetCol or its default value.
"""
return self.getOrDefault(self.offsetCol)
@inherit_doc
class GeneralizedLinearRegression(_JavaRegressor, _GeneralizedLinearRegressionParams,
JavaMLWritable, JavaMLReadable):
"""
Generalized Linear Regression.
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
predictor (link function) and a description of the error distribution (family). It supports
"gaussian", "binomial", "poisson", "gamma" and "tweedie" as family. Valid link functions for
each family is listed below. The first link function of each family is the default one.
* "gaussian" -> "identity", "log", "inverse"
* "binomial" -> "logit", "probit", "cloglog"
* "poisson" -> "log", "identity", "sqrt"
* "gamma" -> "inverse", "identity", "log"
* "tweedie" -> power link function specified through "linkPower". \
The default link power in the tweedie family is 1 - variancePower.
.. versionadded:: 2.0.0
Notes
-----
For more information see Wikipedia page on
`GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(0.0, 0.0)),
... (1.0, Vectors.dense(1.0, 2.0)),
... (2.0, Vectors.dense(0.0, 0.0)),
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
>>> glr.setRegParam(0.1)
GeneralizedLinearRegression...
>>> glr.getRegParam()
0.1
>>> glr.clear(glr.regParam)
>>> glr.setMaxIter(10)
GeneralizedLinearRegression...
>>> glr.getMaxIter()
10
>>> glr.clear(glr.maxIter)
>>> model = glr.fit(df)
>>> model.setFeaturesCol("features")
GeneralizedLinearRegressionModel...
>>> model.getMaxIter()
25
>>> model.getAggregationDepth()
2
>>> transformed = model.transform(df)
>>> abs(transformed.head().prediction - 1.5) < 0.001
True
>>> abs(transformed.head().p - 1.5) < 0.001
True
>>> model.coefficients
DenseVector([1.5..., -1.0...])
>>> model.numFeatures
2
>>> abs(model.intercept - 1.5) < 0.001
True
>>> glr_path = temp_path + "/glr"
>>> glr.save(glr_path)
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
>>> glr.getFamily() == glr2.getFamily()
True
>>> model_path = temp_path + "/glr_model"
>>> model.save(model_path)
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
>>> model.intercept == model2.intercept
True
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
"""
__init__(self, \\*, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
"""
super(GeneralizedLinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2):
"""
setParams(self, \\*, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None, offsetCol=None, aggregationDepth=2)
Sets params for generalized linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GeneralizedLinearRegressionModel(java_model)
@since("2.0.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@since("2.0.0")
def setLink(self, value):
"""
Sets the value of :py:attr:`link`.
"""
return self._set(link=value)
@since("2.2.0")
def setVariancePower(self, value):
"""
Sets the value of :py:attr:`variancePower`.
"""
return self._set(variancePower=value)
@since("2.2.0")
def setLinkPower(self, value):
"""
Sets the value of :py:attr:`linkPower`.
"""
return self._set(linkPower=value)
@since("2.3.0")
def setOffsetCol(self, value):
"""
Sets the value of :py:attr:`offsetCol`.
"""
return self._set(offsetCol=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GeneralizedLinearRegressionModel(_JavaRegressionModel, _GeneralizedLinearRegressionParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`GeneralizedLinearRegression`.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("2.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (residuals, deviance, p-values) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return GeneralizedLinearRegressionTrainingSummary(
super(GeneralizedLinearRegressionModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise TypeError("dataset must be a DataFrame but got %s." % type(dataset))
java_glr_summary = self._call_java("evaluate", dataset)
return GeneralizedLinearRegressionSummary(java_glr_summary)
class GeneralizedLinearRegressionSummary(JavaWrapper):
"""
Generalized linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Predictions output by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in :py:attr:`predictions` which gives the predicted value of each instance.
This is set to a new column name if the original model's `predictionCol` is not set.
"""
return self._call_java("predictionCol")
@property
@since("2.2.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions.
"""
return self._call_java("numInstances")
@property
@since("2.0.0")
def rank(self):
"""
The numeric rank of the fitted linear model.
"""
return self._call_java("rank")
@property
@since("2.0.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedom(self):
"""
The residual degrees of freedom.
"""
return self._call_java("residualDegreeOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedomNull(self):
"""
The residual degrees of freedom for the null model.
"""
return self._call_java("residualDegreeOfFreedomNull")
def residuals(self, residualsType="deviance"):
"""
Get the residuals of the fitted model by type.
.. versionadded:: 2.0.0
Parameters
----------
residualsType : str, optional
The type of residuals which should be returned.
Supported options: deviance (default), pearson, working, and response.
"""
return self._call_java("residuals", residualsType)
@property
@since("2.0.0")
def nullDeviance(self):
"""
The deviance for the null model.
"""
return self._call_java("nullDeviance")
@property
@since("2.0.0")
def deviance(self):
"""
The deviance for the fitted model.
"""
return self._call_java("deviance")
@property
@since("2.0.0")
def dispersion(self):
"""
The dispersion of the fitted model.
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
"""
return self._call_java("dispersion")
@property
@since("2.0.0")
def aic(self):
"""
Akaike's "An Information Criterion"(AIC) for the fitted model.
"""
return self._call_java("aic")
@inherit_doc
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
"""
Generalized linear regression training results.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def numIterations(self):
"""
Number of training iterations.
"""
return self._call_java("numIterations")
@property
@since("2.0.0")
def solver(self):
"""
The numeric solver used for training.
"""
return self._call_java("solver")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("pValues")
def __repr__(self):
return self._call_java("toString")
class _FactorizationMachinesParams(_PredictorParams, HasMaxIter, HasStepSize, HasTol,
HasSolver, HasSeed, HasFitIntercept, HasRegParam, HasWeightCol):
"""
Params for :py:class:`FMRegressor`, :py:class:`FMRegressionModel`, :py:class:`FMClassifier`
and :py:class:`FMClassifierModel`.
.. versionadded:: 3.0.0
"""
factorSize = Param(Params._dummy(), "factorSize", "Dimensionality of the factor vectors, " +
"which are used to get pairwise interactions between variables",
typeConverter=TypeConverters.toInt)
fitLinear = Param(Params._dummy(), "fitLinear", "whether to fit linear term (aka 1-way term)",
typeConverter=TypeConverters.toBoolean)
miniBatchFraction = Param(Params._dummy(), "miniBatchFraction", "fraction of the input data " +
"set that should be used for one iteration of gradient descent",
typeConverter=TypeConverters.toFloat)
initStd = Param(Params._dummy(), "initStd", "standard deviation of initial coefficients",
typeConverter=TypeConverters.toFloat)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: gd, adamW. (Default adamW)", typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_FactorizationMachinesParams, self).__init__(*args)
self._setDefault(factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW")
@since("3.0.0")
def getFactorSize(self):
"""
Gets the value of factorSize or its default value.
"""
return self.getOrDefault(self.factorSize)
@since("3.0.0")
def getFitLinear(self):
"""
Gets the value of fitLinear or its default value.
"""
return self.getOrDefault(self.fitLinear)
@since("3.0.0")
def getMiniBatchFraction(self):
"""
Gets the value of miniBatchFraction or its default value.
"""
return self.getOrDefault(self.miniBatchFraction)
@since("3.0.0")
def getInitStd(self):
"""
Gets the value of initStd or its default value.
"""
return self.getOrDefault(self.initStd)
@inherit_doc
class FMRegressor(_JavaRegressor, _FactorizationMachinesParams, JavaMLWritable, JavaMLReadable):
"""
Factorization Machines learning algorithm for regression.
solver Supports:
* gd (normal mini-batch gradient descent)
* adamW (default)
.. versionadded:: 3.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.regression import FMRegressor
>>> df = spark.createDataFrame([
... (2.0, Vectors.dense(2.0)),
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>>
>>> fm = FMRegressor(factorSize=2)
>>> fm.setSeed(16)
FMRegressor...
>>> model = fm.fit(df)
>>> model.getMaxIter()
100
>>> test0 = spark.createDataFrame([
... (Vectors.dense(-2.0),),
... (Vectors.dense(0.5),),
... (Vectors.dense(1.0),),
... (Vectors.dense(4.0),)], ["features"])
>>> model.transform(test0).show(10, False)
+--------+-------------------+
|features|prediction |
+--------+-------------------+
|[-2.0] |-1.9989237712341565|
|[0.5] |0.4956682219523814 |
|[1.0] |0.994586620589689 |
|[4.0] |3.9880970124135344 |
+--------+-------------------+
...
>>> model.intercept
-0.0032501766849261557
>>> model.linear
DenseVector([0.9978])
>>> model.factors
DenseMatrix(1, 2, [0.0173, 0.0021], 1)
>>> model_path = temp_path + "/fm_model"
>>> model.save(model_path)
>>> model2 = FMRegressionModel.load(model_path)
>>> model2.intercept
-0.0032501766849261557
>>> model2.linear
DenseVector([0.9978])
>>> model2.factors
DenseMatrix(1, 2, [0.0173, 0.0021], 1)
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", seed=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", seed=None)
"""
super(FMRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.FMRegressor", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", seed=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", seed=None)
Sets Params for FMRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return FMRegressionModel(java_model)
@since("3.0.0")
def setFactorSize(self, value):
"""
Sets the value of :py:attr:`factorSize`.
"""
return self._set(factorSize=value)
@since("3.0.0")
def setFitLinear(self, value):
"""
Sets the value of :py:attr:`fitLinear`.
"""
return self._set(fitLinear=value)
@since("3.0.0")
def setMiniBatchFraction(self, value):
"""
Sets the value of :py:attr:`miniBatchFraction`.
"""
return self._set(miniBatchFraction=value)
@since("3.0.0")
def setInitStd(self, value):
"""
Sets the value of :py:attr:`initStd`.
"""
return self._set(initStd=value)
@since("3.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("3.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("3.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
class FMRegressionModel(_JavaRegressionModel, _FactorizationMachinesParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`FMRegressor`.
.. versionadded:: 3.0.0
"""
@property
@since("3.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("3.0.0")
def linear(self):
"""
Model linear term.
"""
return self._call_java("linear")
@property
@since("3.0.0")
def factors(self):
"""
Model factor term.
"""
return self._call_java("factors")
if __name__ == "__main__":
import doctest
import pyspark.ml.regression
from pyspark.sql import SparkSession
globs = pyspark.ml.regression.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.regression tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| maropu/spark | python/pyspark/ml/regression.py | Python | apache-2.0 | 91,559 | [
"Gaussian"
] | ef98bce5ef3ecb36b4b49069e5aef7d15f1fd879e41a512a0e1b55659c7282ae |
# Compute a particle concentration
# and save as netCDF
import datetime
import numpy as np
from netCDF4 import Dataset
from postladim import ParticleFile
# import gridmap
# ----------------
# User settings
# ----------------
particle_file = "/hexagon/vol1/bjorn_rhea/out.nc"
grid_file = "/data/model_data006/anneds/Lusedata/Gridfiler/norkyst_800m_grid_full.nc"
output_file = "c2.nc"
# Min, max day-degrees to consider
ddmin, ddmax = 50, 150
# First/last day to consider
date0 = datetime.datetime(2017, 3, 1)
date1 = datetime.datetime(2017, 3, 11)
# date1 = datetime.datetime(2017, 3, 1)
# ----------------
# Read grid file
# ----------------
f = Dataset(grid_file)
H = f.variables["h"][:, :]
M = f.variables["mask_rho"][:, :]
lon = f.variables["lon_rho"][:, :]
lat = f.variables["lat_rho"][:, :]
f.close()
jmax, imax = H.shape
# ---------------------
# Read particle file
# ---------------------
pf = ParticleFile(particle_file)
# Find record numbers
n0 = -99
# n1 = -99
for n in range(pf.num_times):
if pf.time(n) < date0:
continue
if n0 < 0: # First time
n0 = n
n1 = n
if pf.time(n) < date1:
n1 = n
print("start: ", n0, pf.time(n0))
print("stop : ", n1, pf.time(n1))
C = np.zeros((jmax, imax))
for n in range(n0, n1 + 1):
print(n)
X0, Y0 = pf.position(n)
S0 = pf["super", n]
A = pf["age", n]
I = (ddmin <= A) & (A < ddmax)
C0, Yb, Xb = np.histogram2d(
Y0[I],
X0[I],
weights=S0[I],
bins=(jmax, imax),
range=[[-0.5, jmax - 0.5], [-0.5, imax - 0.5]],
)
C += C0
# pf.close()
C /= n1 + 1 - n0
# --------------------------
# Define output NetCDF file
# --------------------------
nc = Dataset(output_file, mode="w", format="NETCDF3_CLASSIC")
# Dimensions
nc.createDimension("xi_rho", imax)
nc.createDimension("eta_rho", jmax)
# nc.createDimension('release_locations', N_release)
# Variables
v = nc.createVariable("conc", "f", ("eta_rho", "xi_rho"))
v.long_name = "Particle concentration"
v.units = "number of particles in grid cell"
# Global variables
nc.institution = "Institute of Marine Research"
nc.grid_file = grid_file
nc.particle_file = particle_file
nc.history = "Created %s by spreading2nc.py" % datetime.date.today()
# ------------------
# Save variables
# ------------------
nc.variables["conc"][:, :] = C
# -------------
# Clean up
# -------------
nc.close()
| bjornaa/ladim | models/zladim/agg2nc.py | Python | mit | 2,415 | [
"NetCDF"
] | 0ae3533efc2af9bdc014394a3ecbdf2109ad4396c7a762e1960a0586ffa86e18 |
import sys
import logging
import ast
import constants as c
import copy
import numpy
import os
import qcck
import qcgf
import qcio
import qcrp
import qcts
import qcutils
import time
import xlrd
import meteorologicalfunctions as mf
log = logging.getLogger('qc.ls')
def l1qc(cf):
# get the data series from the Excel file
in_filename = qcio.get_infilenamefromcf(cf)
if not qcutils.file_exists(in_filename,mode="quiet"):
msg = " Input file "+in_filename+" not found ..."
log.error(msg)
ds1 = qcio.DataStructure()
ds1.returncodes = {"value":1,"message":msg}
return ds1
file_name,file_extension = os.path.splitext(in_filename)
if "csv" in file_extension.lower():
ds1 = qcio.csv_read_series(cf)
if ds1.returncodes["value"] != 0:
return ds1
# get a series of Excel datetime from the Python datetime objects
qcutils.get_xldatefromdatetime(ds1)
else:
ds1 = qcio.xl_read_series(cf)
if ds1.returncodes["value"] != 0:
return ds1
# get a series of Python datetime objects from the Excel datetime
qcutils.get_datetimefromxldate(ds1)
# get the netCDF attributes from the control file
qcts.do_attributes(cf,ds1)
# round the Python datetime to the nearest second
qcutils.round_datetime(ds1,mode="nearest_second")
#check for gaps in the Python datetime series and fix if present
fixtimestepmethod = qcutils.get_keyvaluefromcf(cf,["options"],"FixTimeStepMethod",default="round")
if qcutils.CheckTimeStep(ds1): qcutils.FixTimeStep(ds1,fixtimestepmethod=fixtimestepmethod)
# recalculate the Excel datetime
qcutils.get_xldatefromdatetime(ds1)
# get the Year, Month, Day etc from the Python datetime
qcutils.get_ymdhmsfromdatetime(ds1)
# write the processing level to a global attribute
ds1.globalattributes['nc_level'] = str("L1")
# get the start and end date from the datetime series unless they were
# given in the control file
if 'start_date' not in ds1.globalattributes.keys():
ds1.globalattributes['start_date'] = str(ds1.series['DateTime']['Data'][0])
if 'end_date' not in ds1.globalattributes.keys():
ds1.globalattributes['end_date'] = str(ds1.series['DateTime']['Data'][-1])
# calculate variances from standard deviations and vice versa
qcts.CalculateStandardDeviations(cf,ds1)
# create new variables using user defined functions
qcts.DoFunctions(cf,ds1)
# create a series of synthetic downwelling shortwave radiation
qcts.get_synthetic_fsd(ds1)
return ds1
def l2qc(cf,ds1):
"""
Perform initial QA/QC on flux data
Generates L2 from L1 data
* check parameters specified in control file
Functions performed:
qcck.do_rangecheck*
qcck.do_CSATcheck
qcck.do_7500check
qcck.do_diurnalcheck*
qcck.do_excludedates*
qcck.do_excludehours*
qcts.albedo
"""
# make a copy of the L1 data
ds2 = copy.deepcopy(ds1)
# set some attributes for this level
qcutils.UpdateGlobalAttributes(cf,ds2,"L2")
ds2.globalattributes['Functions'] = ''
# put the control file name into the global attributes
ds2.globalattributes['controlfile_name'] = cf['controlfile_name']
# apply the quality control checks (range, diurnal, exclude dates and exclude hours
qcck.do_qcchecks(cf,ds2)
# do the CSAT diagnostic check
qcck.do_CSATcheck(cf,ds2)
# do the IRGA diagnostic check
qcck.do_IRGAcheck(cf,ds2)
# constrain albedo estimates to full sun angles
#qcts.albedo(cf,ds2)
#log.info(' Finished the albedo constraints') # apply linear corrections to the data
#log.info(' Applying linear corrections ...')
qcck.do_linear(cf,ds2)
# write series statistics to file
qcio.get_seriesstats(cf,ds2)
# write the percentage of good data as a variable attribute
qcutils.get_coverage_individual(ds2)
return ds2
def l3qc(cf,ds2):
"""
Corrections
Generates L3 from L2 data
Functions performed:
qcts.AddMetVars (optional)
qcts.CorrectSWC (optional*)
qcck.do_linear (all sites)
qcutils.GetMergeList + qcts.MergeSeries Ah_EC (optional)x
qcts.TaFromTv (optional)
qcutils.GetMergeList + qcts.MergeSeries Ta_EC (optional)x
qcts.CoordRotation2D (all sites)
qcts.MassmanApprox (optional*)y
qcts.Massman (optional*)y
qcts.CalculateFluxes (used if Massman not optioned)x
qcts.CalculateFluxesRM (used if Massman optioned)y
qcts.FhvtoFh (all sites)
qcts.Fe_WPL (WPL computed on fluxes, as with Campbell algorithm)+x
qcts.Fc_WPL (WPL computed on fluxes, as with Campbell algorithm)+x
qcts.Fe_WPLcov (WPL computed on kinematic fluxes (ie, covariances), as with WPL80)+y
qcts.Fc_WPLcov (WPL computed on kinematic fluxes (ie, covariances), as with WPL80)+y
qcts.CalculateNetRadiation (optional)
qcutils.GetMergeList + qcts.MergeSeries Fsd (optional)
qcutils.GetMergeList + qcts.MergeSeries Fn (optional*)
qcts.InterpolateOverMissing (optional)
AverageSeriesByElements (optional)
qcts.CorrectFgForStorage (all sites)
qcts.Average3SeriesByElements (optional)
qcts.CalculateAvailableEnergy (optional)
qcck.do_qcchecks (all sites)
qcck.gaps (optional)
*: requires ancillary measurements for paratmerisation
+: each site requires one pair, either Fe_WPL & Fc_WPL (default) or Fe_WPLCov & FcWPLCov
x: required together in option set
y: required together in option set
"""
# make a copy of the L2 data
ds3 = copy.deepcopy(ds2)
# set some attributes for this level
qcutils.UpdateGlobalAttributes(cf,ds3,"L3")
# initialise the global attribute to document the functions used
ds3.globalattributes['Functions'] = ''
# put the control file name into the global attributes
ds3.globalattributes['controlfile_name'] = cf['controlfile_name']
# check to see if we have any imports
qcgf.ImportSeries(cf,ds3)
# correct measured soil water content using empirical relationship to collected samples
qcts.CorrectSWC(cf,ds3)
# apply linear corrections to the data
qcck.do_linear(cf,ds3)
# merge whatever humidities are available
qcts.MergeHumidities(cf,ds3,convert_units=True)
# get the air temperature from the CSAT virtual temperature
qcts.TaFromTv(cf,ds3)
# merge the HMP and corrected CSAT data
qcts.MergeSeries(cf,ds3,'Ta',[0,10],convert_units=True)
qcutils.CheckUnits(ds3,"Ta","C",convert_units=True)
# calculate humidities (absolute, specific and relative) from whatever is available
qcts.CalculateHumidities(ds3)
# merge the 7500 CO2 concentration
qcts.MergeSeries(cf,ds3,'Cc',[0,10],convert_units=True)
# PRI - disable CO2 units conversion from whatever to mg/m3
# - this step is, as far as I can see, redundant, see qcts.Fc_WPL()
#qcutils.CheckUnits(ds3,"Cc","mg/m3",convert_units=True)
# add relevant meteorological values to L3 data
qcts.CalculateMeteorologicalVariables(ds3)
# check to see if the user wants to use the fluxes in the L2 file
if not qcutils.cfoptionskeylogical(cf,Key="UseL2Fluxes",default=False):
# check the covariancve units and change if necessary
qcts.CheckCovarianceUnits(ds3)
# do the 2D coordinate rotation
qcts.CoordRotation2D(cf,ds3)
# do the Massman frequency attenuation correction
qcts.MassmanStandard(cf,ds3)
# calculate the fluxes
qcts.CalculateFluxes(cf,ds3)
# approximate wT from virtual wT using wA (ref: Campbell OPECSystem manual)
qcts.FhvtoFh(cf,ds3)
# correct the H2O & CO2 flux due to effects of flux on density measurements
qcts.Fe_WPL(cf,ds3)
qcts.Fc_WPL(cf,ds3)
# convert CO2 units if required
qcutils.ConvertCO2Units(cf,ds3,Cc='Cc')
# calculate Fc storage term - single height only at present
qcts.CalculateFcStorage(cf,ds3)
# convert Fc and Fc_storage units if required
qcutils.ConvertFcUnits(cf,ds3,Fc='Fc',Fc_storage='Fc_storage')
# correct Fc for storage term - only recommended if storage calculated from profile available
qcts.CorrectFcForStorage(cf,ds3)
# merge the incoming shortwave radiation
qcts.MergeSeries(cf,ds3,'Fsd',[0,10])
# calculate the net radiation from the Kipp and Zonen CNR1
qcts.CalculateNetRadiation(cf,ds3,Fn_out='Fn_KZ',Fsd_in='Fsd',Fsu_in='Fsu',Fld_in='Fld',Flu_in='Flu')
qcts.MergeSeries(cf,ds3,'Fn',[0,10])
# combine wind speed from the Wind Sentry and the CSAT
qcts.MergeSeries(cf,ds3,'Ws',[0,10])
# combine wind direction from the Wind Sentry and the CSAT
qcts.MergeSeries(cf,ds3,'Wd',[0,10])
# correct soil heat flux for storage
# ... either average the raw ground heat flux, soil temperature and moisture
# and then do the correction (OzFlux "standard")
qcts.AverageSeriesByElements(cf,ds3,'Ts')
qcts.AverageSeriesByElements(cf,ds3,'Sws')
if qcutils.cfoptionskeylogical(cf,Key='CorrectIndividualFg'):
# ... or correct the individual ground heat flux measurements (James' method)
qcts.CorrectIndividualFgForStorage(cf,ds3)
qcts.AverageSeriesByElements(cf,ds3,'Fg')
else:
qcts.AverageSeriesByElements(cf,ds3,'Fg')
qcts.CorrectFgForStorage(cf,ds3,Fg_out='Fg',Fg_in='Fg',Ts_in='Ts',Sws_in='Sws')
# calculate the available energy
qcts.CalculateAvailableEnergy(ds3,Fa_out='Fa',Fn_in='Fn',Fg_in='Fg')
# create new series using MergeSeries or AverageSeries
qcck.CreateNewSeries(cf,ds3)
# create a series of daily averaged soil moisture interpolated back to the time step
#qcts.DailyAverageSws_Interpolated(cf,ds3,Sws_out='Sws_daily',Sws_in='Sws')
# re-apply the quality control checks (range, diurnal and rules)
qcck.do_qcchecks(cf,ds3)
# coordinate gaps in the three main fluxes
qcck.CoordinateFluxGaps(cf,ds3)
# coordinate gaps in Ah_7500_Av with Fc
qcck.CoordinateAh7500AndFcGaps(cf,ds3)
# get the statistics for the QC flags and write these to an Excel spreadsheet
qcio.get_seriesstats(cf,ds3)
# write the percentage of good data as a variable attribute
qcutils.get_coverage_individual(ds3)
# write the percentage of good data for groups
qcutils.get_coverage_groups(ds3)
return ds3
def l4qc(cf,ds3):
# !!! code here to use existing L4 file
# logic
# if the L4 doesn't exist
# - create ds4 by using copy.deepcopy(ds3)
# if the L4 does exist and the "UseExistingL4File" option is False
# - create ds4 by using copy.deepcopy(ds3)
# if the L4 does exist and the "UseExistingL4File" option is True
# - read the contents of the L4 netCDF file
# - check the start and end dates of the L3 and L4 data
# - if these are the same then tell the user there is nothing to do
# - copy the L3 data to the L4 data structure
# - replace the L3 data with the L4 data
#ds4 = copy.deepcopy(ds3)
ds4 = qcio.copy_datastructure(cf,ds3)
# ds4 will be empty (logical false) if an error occurs in copy_datastructure
# return from this routine if this is the case
if not ds4: return ds4
# set some attributes for this level
qcutils.UpdateGlobalAttributes(cf,ds4,"L4")
ds4.cf = cf
# calculate the available energy
if "Fa" not in ds4.series.keys():
qcts.CalculateAvailableEnergy(ds4,Fa_out='Fa',Fn_in='Fn',Fg_in='Fg')
# create a dictionary to hold the gap filling data
ds_alt = {}
# check to see if we have any imports
qcgf.ImportSeries(cf,ds4)
# re-apply the quality control checks (range, diurnal and rules)
qcck.do_qcchecks(cf,ds4)
# now do the meteorological driver gap filling
for ThisOne in cf["Drivers"].keys():
if ThisOne not in ds4.series.keys(): log.error("Series "+ThisOne+" not in data structure"); continue
# parse the control file for information on how the user wants to do the gap filling
qcgf.GapFillParseControlFile(cf,ds4,ThisOne,ds_alt)
# *** start of the section that does the gap filling of the drivers ***
# fill short gaps using interpolation
qcgf.GapFillUsingInterpolation(cf,ds4)
# gap fill using climatology
qcgf.GapFillFromClimatology(ds4)
# do the gap filling using the ACCESS output
qcgf.GapFillFromAlternate(cf,ds4,ds_alt)
if ds4.returncodes["alternate"]=="quit": return ds4
# gap fill using SOLO
qcgf.GapFillUsingSOLO(cf,ds3,ds4)
if ds4.returncodes["solo"]=="quit": return ds4
# merge the first group of gap filled drivers into a single series
qcts.MergeSeriesUsingDict(ds4,merge_order="prerequisite")
# re-calculate the ground heat flux but only if requested in control file
opt = qcutils.get_keyvaluefromcf(cf,["Options"],"CorrectFgForStorage",default="No",mode="quiet")
if opt.lower()!="no":
qcts.CorrectFgForStorage(cf,ds4,Fg_out='Fg',Fg_in='Fg_Av',Ts_in='Ts',Sws_in='Sws')
# re-calculate the net radiation
qcts.CalculateNetRadiation(cf,ds4,Fn_out='Fn',Fsd_in='Fsd',Fsu_in='Fsu',Fld_in='Fld',Flu_in='Flu')
# re-calculate the available energy
qcts.CalculateAvailableEnergy(ds4,Fa_out='Fa',Fn_in='Fn',Fg_in='Fg')
# merge the second group of gap filled drivers into a single series
qcts.MergeSeriesUsingDict(ds4,merge_order="standard")
# re-calculate the water vapour concentrations
qcts.CalculateHumiditiesAfterGapFill(ds4)
# re-calculate the meteorological variables
qcts.CalculateMeteorologicalVariables(ds4)
# the Tumba rhumba
qcts.CalculateComponentsFromWsWd(ds4)
# check for any missing data
qcutils.get_missingingapfilledseries(ds4)
# write the percentage of good data as a variable attribute
qcutils.get_coverage_individual(ds4)
# write the percentage of good data for groups
qcutils.get_coverage_groups(ds4)
return ds4
def l5qc(cf,ds4):
ds5 = qcio.copy_datastructure(cf,ds4)
# ds4 will be empty (logical false) if an error occurs in copy_datastructure
# return from this routine if this is the case
if not ds5: return ds5
# set some attributes for this level
qcutils.UpdateGlobalAttributes(cf,ds5,"L5")
ds5.cf = cf
# create a dictionary to hold the gap filling data
ds_alt = {}
# check to see if we have any imports
qcgf.ImportSeries(cf,ds5)
# re-apply the quality control checks (range, diurnal and rules)
qcck.do_qcchecks(cf,ds5)
# now do the flux gap filling methods
label_list = qcutils.get_label_list_from_cf(cf)
for ThisOne in label_list:
# parse the control file for information on how the user wants to do the gap filling
qcgf.GapFillParseControlFile(cf,ds5,ThisOne,ds_alt)
# *** start of the section that does the gap filling of the fluxes ***
# apply the turbulence filter (if requested)
qcck.ApplyTurbulenceFilter(cf,ds5)
# fill short gaps using interpolation
#qcgf.GapFillUsingInterpolation(cf,ds5)
# do the gap filling using SOLO
qcgf.GapFillUsingSOLO(cf,ds4,ds5)
if ds5.returncodes["solo"]=="quit": return ds5
## gap fill using marginal distribution sampling
#qcgf.GapFillFluxUsingMDS(cf,ds5)
## gap fill using ratios
#qcgf.GapFillFluxFromDayRatio(cf,ds5)
# gap fill using climatology
qcgf.GapFillFromClimatology(ds5)
# merge the gap filled drivers into a single series
qcts.MergeSeriesUsingDict(ds5,merge_order="standard")
# write the percentage of good data as a variable attribute
qcutils.get_coverage_individual(ds5)
# write the percentage of good data for groups
qcutils.get_coverage_groups(ds5)
return ds5
def l6qc(cf,ds5):
ds6 = qcio.copy_datastructure(cf,ds5)
# ds6 will be empty (logical false) if an error occurs in copy_datastructure
# return from this routine if this is the case
if not ds6: return ds6
# set some attributes for this level
qcutils.UpdateGlobalAttributes(cf,ds6,"L6")
# parse the control file
qcrp.ParseL6ControlFile(cf,ds6)
# check to see if we have any imports
qcgf.ImportSeries(cf,ds6)
# check units
qcutils.CheckUnits(ds6,"Fc","umol/m2/s",convert_units=True)
## filter Fc for night time and ustar threshold, write to ds as "ER"
#result = qcrp.GetERFromFc(cf,ds6)
#if result==0: return
# apply the turbulence filter (if requested)
qcck.ApplyTurbulenceFilter(cf,ds6)
qcrp.GetERFromFc2(cf,ds6)
# estimate ER using SOLO
qcrp.ERUsingSOLO(cf,ds6)
# estimate ER using FFNET
qcrp.ERUsingFFNET(cf,ds6)
# estimate ER using Lloyd-Taylor
qcrp.ERUsingLloydTaylor(cf,ds6)
# estimate ER using Lasslop et al
qcrp.ERUsingLasslop(cf,ds6)
# merge the estimates of ER with the observations
qcts.MergeSeriesUsingDict(ds6,merge_order="standard")
# calculate NEE from Fc and ER
qcrp.CalculateNEE(cf,ds6)
# calculate NEP from NEE
qcrp.CalculateNEP(cf,ds6)
# calculate ET from Fe
qcrp.CalculateET(ds6)
# partition NEE into GPP and ER
qcrp.PartitionNEE(cf,ds6)
# write the percentage of good data as a variable attribute
qcutils.get_coverage_individual(ds6)
# write the percentage of good data for groups
qcutils.get_coverage_groups(ds6)
# do the L6 summary
qcrp.L6_summary(cf,ds6)
return ds6
| OzFlux/OzFluxQC | scripts/qcls.py | Python | gpl-3.0 | 17,765 | [
"NetCDF"
] | f6c50d3f23fd390d0d8ab6b29bcc9cf2c616ac2a4198e774346d94aec3d1a563 |
"""
Views for user API
"""
from django.shortcuts import redirect
from django.utils import dateparse
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from opaque_keys.edx.keys import UsageKey
from opaque_keys import InvalidKeyError
from courseware.access import is_mobile_available_for_user
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views import get_current_child, save_positions_recursively_up
from student.models import CourseEnrollment, User
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .serializers import CourseEnrollmentSerializer, UserSerializer
from .. import errors
from ..utils import mobile_view, mobile_course_access
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and
access other resources the user has permissions for.
Users are redirected to this endpoint after logging in.
You can use the **course_enrollments** value in
the response to get a list of courses the user is enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}
**Response Values**
* id: The ID of the user.
* username: The username of the currently logged in user.
* email: The email address of the currently logged in user.
* name: The full name of the currently logged in user.
* course_enrollments: The URI to list the courses the currently logged
in user is enrolled in.
"""
queryset = (
User.objects.all()
.select_related('profile', 'course_enrollments')
)
serializer_class = UserSerializer
lookup_field = 'username'
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Case**
Get or update the ID of the module that the specified user last visited in the specified course.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
body:
last_visited_module_id={module_id}
modification_date={date}
The modification_date is optional. If it is present, the update will only take effect
if the modification_date is later than the modification_date saved on the server.
**Response Values**
* last_visited_module_id: The ID of the last module visited by the user in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(
request.user, request, module_descriptor, field_data_cache, course.id, course=course
)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module, course=course)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.DATA.get("last_visited_module_id")
modification_date_string = request.DATA.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses the currently logged in user is
enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_enrollments/
**Response Values**
* created: The date the course was created.
* mode: The type of certificate registration for this course: honor or
certified.
* is_active: Whether the course is currently active; true or false.
* certificate: Information about the user's earned certificate in the course.
* url: URL to the downloadable version of the certificate, if exists.
* course: A collection of data about the course:
* course_updates: The URI to get data for course updates.
* number: The course number.
* org: The organization that created the course.
* video_outline: The URI to get the list of all vides the user can
access in the course.
* id: The unique ID of the course.
* subscription_id: A unique "clean" (alphanumeric with '_') ID of the course.
* latest_updates: Reserved for future use.
* end: The end date of the course.
* name: The name of the course.
* course_handouts: The URI to get data for course handouts.
* start: The data and time the course starts.
* course_image: The path to the course image.
"""
queryset = CourseEnrollment.objects.all()
serializer_class = CourseEnrollmentSerializer
lookup_field = 'username'
def get_queryset(self):
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
return [
enrollment for enrollment in enrollments
if enrollment.course and is_mobile_available_for_user(self.request.user, enrollment.course)
]
@api_view(["GET"])
@mobile_view()
def my_user_info(request):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", username=request.user.username)
| kamalx/edx-platform | lms/djangoapps/mobile_api/users/views.py | Python | agpl-3.0 | 9,333 | [
"VisIt"
] | 47eeebff832d135724609e26afe59291ba189d35b557c55bda112496470aa6a9 |
"""
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import pylab as pl
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
pl.figure(figsize=(6, 5))
pl.title("Weights of the model")
pl.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
pl.plot(w, 'g-', label="Ground truth")
pl.plot(ols.coef_, 'r--', label="OLS estimate")
pl.xlabel("Features")
pl.ylabel("Values of the weights")
pl.legend(loc="best", prop=dict(size=12))
pl.figure(figsize=(6, 5))
pl.title("Histogram of the weights")
pl.hist(clf.coef_, bins=n_features, log=True)
pl.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
pl.ylabel("Features")
pl.xlabel("Values of the weights")
pl.legend(loc="lower left")
pl.figure(figsize=(6, 5))
pl.title("Marginal log-likelihood")
pl.plot(clf.scores_)
pl.ylabel("Score")
pl.xlabel("Iterations")
pl.show()
| JT5D/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | Python | bsd-3-clause | 2,553 | [
"Gaussian"
] | e0bda6d2f77ae647e003c518a1535d011b7927ca06e9d12579da3f83ab55a5bc |
from __future__ import division, absolute_import, print_function
import numpy as np
from .common import run_monitored, set_mem_rlimit, Benchmark
try:
from scipy.stats import spearmanr
except ImportError:
pass
try:
import scipy.interpolate as interpolate
except ImportError:
pass
class Leaks(Benchmark):
unit = "relative increase with repeats"
def track_leaks(self):
set_mem_rlimit()
# Setup temp file, make it fit in memory
repeats = [2, 5, 10, 50, 200]
peak_mems = []
for repeat in repeats:
code = """
import numpy as np
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range(%(repeat)d):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
""" % dict(repeat=repeat)
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
corr, p = spearmanr(repeats, peak_mems)
if p < 0.05:
print("*"*79)
print("PROBABLE MEMORY LEAK")
print("*"*79)
else:
print("PROBABLY NO MEMORY LEAK")
return max(peak_mems) / min(peak_mems)
class BenchPPoly(Benchmark):
def setup(self):
np.random.seed(1234)
m, k = 55, 3
x = np.sort(np.random.random(m+1))
c = np.random.random((3, m))
self.pp = interpolate.PPoly(c, x)
npts = 100
self.xp = np.linspace(0, 1, npts)
def time_evaluation(self):
self.pp(self.xp)
class GridData(Benchmark):
param_names = ['n_grids', 'method']
params = [
[10j, 100j, 1000j],
['nearest', 'linear', 'cubic']
]
def setup(self, n_grids, method):
self.func = lambda x, y: x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
self.grid_x, self.grid_y = np.mgrid[0:1:n_grids, 0:1:n_grids]
self.points = np.random.rand(1000, 2)
self.values = self.func(self.points[:,0], self.points[:,1])
def time_evaluation(self, n_grids, method):
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y), method=method)
class Interpolate1d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'],
]
def setup(self, n_samples, method):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
def time_interpolate(self, n_samples, method):
interpolate.interp1d(self.x, self.y, kind=method)
class Interpolate2d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'cubic', 'quintic'],
]
def setup(self, n_samples, method):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.arange(-r_samples, r_samples, 0.25)
self.xx, self.yy = np.meshgrid(self.x, self.y)
self.z = np.sin(self.xx**2+self.yy**2)
def time_interpolate(self, n_samples, method):
interpolate.interp2d(self.x, self.y, self.z, kind=method)
class Rbf(Benchmark):
param_names = ['n_samples', 'function']
params = [
[10, 50, 100],
['multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', 'thin_plate']
]
def setup(self, n_samples, function):
self.x = np.arange(n_samples)
self.y = np.sin(self.x)
r_samples = n_samples / 2.
self.X = np.arange(-r_samples, r_samples, 0.25)
self.Y = np.arange(-r_samples, r_samples, 0.25)
self.z = np.exp(-self.X**2-self.Y**2)
def time_rbf_1d(self, n_samples, function):
interpolate.Rbf(self.x, self.y, function=function)
def time_rbf_2d(self, n_samples, function):
interpolate.Rbf(self.X, self.Y, self.z, function=function)
class UnivariateSpline(Benchmark):
param_names = ['n_samples', 'degree']
params = [
[10, 50, 100],
[3, 4, 5]
]
def setup(self, n_samples, degree):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.exp(-self.x**2) + 0.1 * np.random.randn(*self.x.shape)
def time_univariate_spline(self, n_samples, degree):
interpolate.UnivariateSpline(self.x, self.y, k=degree)
class BivariateSpline(Benchmark):
"""
Author: josef-pktd and scipy mailinglist example
'http://scipy-user.10969.n7.nabble.com/BivariateSpline-examples\
-and-my-crashing-python-td14801.html'
"""
param_names = ['n_samples']
params = [
[10, 20, 30]
]
def setup(self, n_samples):
x = np.arange(0, n_samples, 0.5)
y = np.arange(0, n_samples, 0.5)
x, y = np.meshgrid(x, y)
x = x.ravel()
y = y.ravel()
xmin = x.min()-1
xmax = x.max()+1
ymin = y.min()-1
ymax = y.max()+1
s = 1.1
self.yknots = np.linspace(ymin+s,ymax-s,10)
self.xknots = np.linspace(xmin+s,xmax-s,10)
self.z = np.sin(x) + 0.1*np.random.normal(size=x.shape)
self.x = x
self.y = y
def time_smooth_bivariate_spline(self, n_samples):
interpolate.SmoothBivariateSpline(self.x, self.y, self.z)
def time_lsq_bivariate_spline(self, n_samples):
interpolate.LSQBivariateSpline(self.x, self.y, self.z, self.xknots.flat, self.yknots.flat)
class Interpolate(Benchmark):
"""
Linear Interpolate in scipy and numpy
"""
param_names = ['n_samples', 'module']
params = [
[10, 50, 100],
['numpy', 'scipy']
]
def setup(self, n_samples, module):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
self.z = np.random.normal(size=self.x.shape)
def time_interpolate(self, n_samples, module):
if module == 'scipy':
interpolate.interp1d(self.x, self.y, kind="linear")
else:
np.interp(self.z, self.x, self.y)
| pnedunuri/scipy | benchmarks/benchmarks/interpolate.py | Python | bsd-3-clause | 6,314 | [
"Gaussian"
] | db6d2dec72ba08c7c3cb2a39f3eeff5f6b45f3f615ad0880f08c48006588c406 |
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing CP2K, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Luca Marsella (CSCS)
@author: Damian Alvarez (Forschungszentrum Juelich GmbH)
@author: Alan O'Cais (Forschungszentrum Juelich GmbH)
@author: Balazs Hajgato (Free University Brussels (VUB))
"""
import fileinput
import glob
import re
import os
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import write_file
from easybuild.tools.config import build_option
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_avail_core_count
from easybuild.tools.toolchain.compiler import OPTARCH_GENERIC
# CP2K needs this version of libxc
LIBXC_MIN_VERSION = '2.0.1'
class EB_CP2K(EasyBlock):
"""
Support for building CP2K
- prepare module include files if required
- generate custom config file in 'arch' directory
- build CP2K
- run regression test if desired
- install by copying binary executables
"""
def __init__(self, *args, **kwargs):
super(EB_CP2K, self).__init__(*args, **kwargs)
self.typearch = None
# this should be set to False for old versions of GCC (e.g. v4.1)
self.compilerISO_C_BINDING = True
# compiler options that need to be set in Makefile
self.debug = ''
self.fpic = ''
# used for both libsmm and libxsmm
self.libsmm = ''
self.modincpath = ''
self.openmp = ''
self.make_instructions = ''
@staticmethod
def extra_options():
extra_vars = {
'type': ['popt', "Type of build ('popt' or 'psmp')", CUSTOM],
'typeopt': [True, "Enable optimization", CUSTOM],
'modincprefix': ['', "IMKL prefix for modinc include dir", CUSTOM],
'modinc': [[], ("List of modinc's to use (*.f90], or 'True' to use "
"all found at given prefix"), CUSTOM],
'extracflags': ['', "Extra CFLAGS to be added", CUSTOM],
'extradflags': ['', "Extra DFLAGS to be added", CUSTOM],
'ignore_regtest_fails': [False, ("Ignore failures in regression test "
"(should be used with care)"), CUSTOM],
'maxtasks': [4, ("Maximum number of CP2K instances run at "
"the same time during testing"), CUSTOM],
'runtest': [True, "Build and run CP2K tests", CUSTOM],
'plumed': [None, "Enable PLUMED support", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def _generate_makefile(self, options):
"""Generate Makefile based on options dictionary and optional make instructions"""
text = "# Makefile generated by CP2K easyblock in EasyBuild\n"
for key, value in sorted(options.iteritems()):
text += "%s = %s\n" % (key, value)
return text + self.make_instructions
def configure_step(self):
"""Configure build
- build Libint wrapper
- generate Makefile
"""
known_types = ['popt', 'psmp']
if self.cfg['type'] not in known_types:
raise EasyBuildError("Unknown build type specified: '%s', known types are %s",
self.cfg['type'], known_types)
# correct start dir, if needed
# recent CP2K versions have a 'cp2k' dir in the unpacked 'cp2k' dir
cp2k_path = os.path.join(self.cfg['start_dir'], 'cp2k')
if os.path.exists(cp2k_path):
self.cfg['start_dir'] = cp2k_path
self.log.info("Corrected start_dir to %s" % self.cfg['start_dir'])
# set compilers options according to toolchain config
# full debug: -g -traceback -check all -fp-stack-check
# -g links to mpi debug libs
if self.toolchain.options['debug']:
self.debug = '-g'
self.log.info("Debug build")
if self.toolchain.options['pic']:
self.fpic = "-fPIC"
self.log.info("Using fPIC")
# report on extra flags being used
if self.cfg['extracflags']:
self.log.info("Using extra CFLAGS: %s" % self.cfg['extracflags'])
if self.cfg['extradflags']:
self.log.info("Using extra CFLAGS: %s" % self.cfg['extradflags'])
# lib(x)smm support
libsmm = get_software_root('libsmm')
libxsmm = get_software_root('libxsmm')
if libxsmm:
self.cfg.update('extradflags', '-D__LIBXSMM')
self.libsmm = '-lxsmm -lxsmmf'
self.log.debug('Using libxsmm %s' % libxsmm)
elif libsmm:
libsmms = glob.glob(os.path.join(libsmm, 'lib', 'libsmm_*nn.a'))
dfs = [os.path.basename(os.path.splitext(x)[0]).replace('lib', '-D__HAS_') for x in libsmms]
moredflags = ' ' + ' '.join(dfs)
self.cfg.update('extradflags', moredflags)
self.libsmm = ' '.join(libsmms)
self.log.debug('Using libsmm %s (extradflags %s)' % (self.libsmm, moredflags))
# obtain list of modinc's to use
if self.cfg["modinc"]:
self.modincpath = self.prepmodinc()
# set typearch
self.typearch = "Linux-x86-64-%s" % self.toolchain.name
# extra make instructions
self.make_instructions = '' # "graphcon.o: graphcon.F\n\t$(FC) -c $(FCFLAGS2) $<\n"
# compiler toolchain specific configuration
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.INTELCOMP:
options = self.configure_intel_based()
elif comp_fam == toolchain.GCC:
options = self.configure_GCC_based()
else:
raise EasyBuildError("Don't know how to tweak configuration for compiler family %s" % comp_fam)
# BLAS/FFTW
if get_software_root('IMKL'):
options = self.configure_MKL(options)
else:
# BLAS
if get_software_root('ACML'):
options = self.configure_ACML(options)
else:
options = self.configure_BLAS_lib(options)
# FFTW (no MKL involved)
if 'fftw3' in os.getenv('LIBFFT', ''):
options = self.configure_FFTW3(options)
# LAPACK
if os.getenv('LIBLAPACK_MT', None) is not None:
options = self.configure_LAPACK(options)
if os.getenv('LIBSCALAPACK', None) is not None:
options = self.configure_ScaLAPACK(options)
# PLUMED
plumed = get_software_root('PLUMED')
if self.cfg['plumed'] and not plumed:
raise EasyBuildError("The PLUMED module needs to be loaded to build CP2K with PLUMED support")
# enable PLUMED support if PLUMED is listed as a dependency
# and PLUMED support is either explicitly enabled (plumed = True) or unspecified ('plumed' not defined)
if plumed and (self.cfg['plumed'] or self.cfg['plumed'] is None):
options['LIBS'] += ' -lplumed'
options['DFLAGS'] += ' -D__PLUMED2'
# ELPA
elpa = get_software_root('ELPA')
if elpa:
options['LIBS'] += ' -lelpa'
options['DFLAGS'] += ' -D__ELPA3'
elpa_inc_dir = os.path.join(elpa, 'include', 'elpa-%s' % get_software_version('ELPA'), 'modules')
options['FCFLAGSOPT'] += ' -I%s ' % elpa_inc_dir
# CUDA
cuda = get_software_root('CUDA')
if cuda:
options['DFLAGS'] += ' -D__ACC -D__DBCSR_ACC'
options['LIBS'] += ' -lcudart -lcublas -lcufft -lrt'
options['NVCC'] = ' nvcc'
# avoid group nesting
options['LIBS'] = options['LIBS'].replace('-Wl,--start-group', '').replace('-Wl,--end-group', '')
options['LIBS'] = "-Wl,--start-group %s -Wl,--end-group" % options['LIBS']
# create arch file using options set
archfile = os.path.join(self.cfg['start_dir'], 'arch', '%s.%s' % (self.typearch, self.cfg['type']))
txt = self._generate_makefile(options)
write_file(archfile, txt)
self.log.info("Content of makefile (%s):\n%s" % (archfile, txt))
def prepmodinc(self):
"""Prepare list of module files"""
self.log.debug("Preparing module files")
imkl = get_software_root('IMKL')
if imkl:
# prepare modinc target path
modincpath = os.path.join(os.path.dirname(os.path.normpath(self.cfg['start_dir'])), 'modinc')
self.log.debug("Preparing module files in %s" % modincpath)
try:
os.mkdir(modincpath)
except OSError, err:
raise EasyBuildError("Failed to create directory for module include files: %s", err)
# get list of modinc source files
modincdir = os.path.join(imkl, self.cfg["modincprefix"], 'include')
if type(self.cfg["modinc"]) == list:
modfiles = [os.path.join(modincdir, x) for x in self.cfg["modinc"]]
elif type(self.cfg["modinc"]) == bool and type(self.cfg["modinc"]):
modfiles = glob.glob(os.path.join(modincdir, '*.f90'))
else:
raise EasyBuildError("prepmodinc: Please specify either a boolean value or a list of files in modinc "
"(found: %s).", self.cfg["modinc"])
f77 = os.getenv('F77')
if not f77:
raise EasyBuildError("F77 environment variable not set, can't continue.")
# create modinc files
for f in modfiles:
if f77.endswith('ifort'):
cmd = "%s -module %s -c %s" % (f77, modincpath, f)
elif f77 in ['gfortran', 'mpif77']:
cmd = "%s -J%s -c %s" % (f77, modincpath, f)
else:
raise EasyBuildError("prepmodinc: Unknown value specified for F77 (%s)", f77)
run_cmd(cmd, log_all=True, simple=True)
return modincpath
else:
raise EasyBuildError("Don't know how to prepare modinc, IMKL not found")
def configure_common(self):
"""Common configuration for all toolchains"""
# openmp introduces 2 major differences
# -automatic is default: -noautomatic -auto-scalar
# some mem-bandwidth optimisation
if self.cfg['type'] == 'psmp':
self.openmp = self.toolchain.get_flag('openmp')
# determine which opt flags to use
if self.cfg['typeopt']:
optflags = 'OPT'
regflags = 'OPT2'
else:
optflags = 'NOOPT'
regflags = 'NOOPT'
# make sure a MPI-2 able MPI lib is used
mpi2 = False
if hasattr(self.toolchain, 'MPI_FAMILY') and self.toolchain.MPI_FAMILY is not None:
known_mpi2_fams = [toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2, toolchain.OPENMPI,
toolchain.INTELMPI]
mpi_fam = self.toolchain.mpi_family()
if mpi_fam in known_mpi2_fams:
mpi2 = True
self.log.debug("Determined MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam)
else:
self.log.debug("Cannot determine MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam)
else:
# can't use toolchain.mpi_family, because of dummy toolchain
mpi2libs = ['impi', 'MVAPICH2', 'OpenMPI', 'MPICH2', 'MPICH']
for mpi2lib in mpi2libs:
if get_software_root(mpi2lib):
mpi2 = True
self.log.debug("Determined MPI2 compatibility based on loaded MPI module: %s")
else:
self.log.debug("MPI-2 supporting MPI library %s not loaded.")
if not mpi2:
raise EasyBuildError("CP2K needs MPI-2, no known MPI-2 supporting library loaded?")
# pick up optarch value from toolchain, when optarch toolchain option is enabled or --optarch=GENERIC is used
optarch = ''
if self.toolchain.options.get('optarch', False) or build_option('optarch') == OPTARCH_GENERIC:
# take into account that a '-' is missing for the first compiler flag, but also that optarch may be empty
if self.toolchain.options.option('optarch'):
optarch = '-%s' % self.toolchain.options.option('optarch')
options = {
'CC': os.getenv('MPICC'),
'CPP': '',
'FC': '%s %s' % (os.getenv('MPIF90'), self.openmp),
'LD': '%s %s' % (os.getenv('MPIF90'), self.openmp),
'AR': 'ar -r',
'CPPFLAGS': '',
'FPIC': self.fpic,
'DEBUG': self.debug,
'FCFLAGS': '$(FCFLAGS%s)' % optflags,
'FCFLAGS2': '$(FCFLAGS%s)' % regflags,
'CFLAGS': ' %s %s $(FPIC) $(DEBUG) %s ' % (os.getenv('CPPFLAGS'), os.getenv('LDFLAGS'),
self.cfg['extracflags']),
'DFLAGS': ' -D__parallel -D__BLACS -D__SCALAPACK -D__FFTSG %s' % self.cfg['extradflags'],
'LIBS': os.getenv('LIBS', ''),
'FCFLAGSNOOPT': '$(DFLAGS) $(CFLAGS) -O0 $(FREE) $(FPIC) $(DEBUG)',
'FCFLAGSOPT': '-O2 $(FREE) $(SAFE) $(FPIC) $(DEBUG) %s' % optarch,
'FCFLAGSOPT2': '-O1 $(FREE) $(SAFE) $(FPIC) $(DEBUG) %s' % optarch,
}
libint = get_software_root('LibInt')
if libint:
options['DFLAGS'] += ' -D__LIBINT'
libintcompiler = "%s %s" % (os.getenv('CC'), os.getenv('CFLAGS'))
# Build libint-wrapper, if required
libint_wrapper = ''
# required for old versions of GCC
if not self.compilerISO_C_BINDING:
options['DFLAGS'] += ' -D__HAS_NO_ISO_C_BINDING'
# determine path for libint_tools dir
libinttools_paths = ['libint_tools', 'tools/hfx_tools/libint_tools']
libinttools_path = None
for path in libinttools_paths:
path = os.path.join(self.cfg['start_dir'], path)
if os.path.isdir(path):
libinttools_path = path
os.chdir(libinttools_path)
if not libinttools_path:
raise EasyBuildError("No libinttools dir found")
# build libint wrapper
cmd = "%s -c libint_cpp_wrapper.cpp -I%s/include" % (libintcompiler, libint)
if not run_cmd(cmd, log_all=True, simple=True):
raise EasyBuildError("Building the libint wrapper failed")
libint_wrapper = '%s/libint_cpp_wrapper.o' % libinttools_path
# determine LibInt libraries based on major version number
libint_maj_ver = get_software_version('LibInt').split('.')[0]
if libint_maj_ver == '1':
libint_libs = "$(LIBINTLIB)/libderiv.a $(LIBINTLIB)/libint.a $(LIBINTLIB)/libr12.a"
elif libint_maj_ver == '2':
libint_libs = "$(LIBINTLIB)/libint2.a"
else:
raise EasyBuildError("Don't know how to handle libint version %s", libint_maj_ver)
self.log.info("Using LibInt version %s" % (libint_maj_ver))
options['LIBINTLIB'] = '%s/lib' % libint
options['LIBS'] += ' %s -lstdc++ %s' % (libint_libs, libint_wrapper)
else:
# throw a warning, since CP2K without LibInt doesn't make much sense
self.log.warning("LibInt module not loaded, so building without LibInt support")
libxc = get_software_root('libxc')
if libxc:
cur_libxc_version = get_software_version('libxc')
if LooseVersion(cur_libxc_version) < LooseVersion(LIBXC_MIN_VERSION):
raise EasyBuildError("CP2K only works with libxc v%s (or later)", LIBXC_MIN_VERSION)
options['DFLAGS'] += ' -D__LIBXC2'
if LooseVersion(cur_libxc_version) >= LooseVersion('2.2'):
options['LIBS'] += ' -L%s/lib -lxcf90 -lxc' % libxc
else:
options['LIBS'] += ' -L%s/lib -lxc' % libxc
self.log.info("Using Libxc-%s" % cur_libxc_version)
else:
self.log.info("libxc module not loaded, so building without libxc support")
return options
def configure_intel_based(self):
"""Configure for Intel based toolchains"""
# based on guidelines available at
# http://software.intel.com/en-us/articles/build-cp2k-using-intel-fortran-compiler-professional-edition/
intelurl = ''.join(["http://software.intel.com/en-us/articles/",
"build-cp2k-using-intel-fortran-compiler-professional-edition/"])
options = self.configure_common()
extrainc = ''
if self.modincpath:
extrainc = '-I%s' % self.modincpath
options.update({
# -Vaxlib : older options
'FREE': '-fpp -free',
# SAFE = -assume protect_parens -fp-model precise -ftz # causes problems, so don't use this
'SAFE': '-assume protect_parens -no-unroll-aggressive',
'INCFLAGS': '$(DFLAGS) -I$(INTEL_INC) -I$(INTEL_INCF) %s' % extrainc,
'LDFLAGS': '$(INCFLAGS) ',
'OBJECTS_ARCHITECTURE': 'machine_intel.o',
})
options['DFLAGS'] += ' -D__INTEL'
options['FCFLAGSOPT'] += ' $(INCFLAGS) -heap-arrays 64'
options['FCFLAGSOPT2'] += ' $(INCFLAGS) -heap-arrays 64'
ifortver = LooseVersion(get_software_version('ifort'))
# -i-static has been deprecated prior to 2013, but was still usable. From 2015 it is not.
if ifortver < LooseVersion("2013"):
options['LDFLAGS'] += ' -i-static '
else:
options['LDFLAGS'] += ' -static-intel '
# Otherwise it fails on linking, since there are 2 definitions of main
if LooseVersion(self.version) >= LooseVersion('4.1'):
options['LDFLAGS'] += ' -nofor-main '
failmsg = "CP2K won't build correctly with the Intel %%s compilers prior to %%s, see %s" % intelurl
if ifortver >= LooseVersion("2011") and ifortver < LooseVersion("2012"):
# don't allow using Intel compiler 2011 prior to release 8, because of known issue (see Intel URL)
if ifortver >= LooseVersion("2011.8"):
# add additional make instructions to Makefile
self.make_instructions += "et_coupling.o: et_coupling.F\n\t$(FC) -c $(FCFLAGS2) $<\n"
self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n"
else:
raise EasyBuildError(failmsg, "v12", "v2011.8")
elif ifortver >= LooseVersion("11"):
if LooseVersion(get_software_version('ifort')) >= LooseVersion("11.1.072"):
self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n"
else:
raise EasyBuildError(failmsg, "v11", "v11.1.072")
else:
raise EasyBuildError("Intel compilers version %s not supported yet.", ifortver)
return options
def configure_GCC_based(self):
"""Configure for GCC based toolchains"""
options = self.configure_common()
options.update({
# need this to prevent "Unterminated character constant beginning" errors
'FREE': '-ffree-form -ffree-line-length-none',
'LDFLAGS': '$(FCFLAGS)',
'OBJECTS_ARCHITECTURE': 'machine_gfortran.o',
})
options['DFLAGS'] += ' -D__GFORTRAN'
options['FCFLAGSOPT'] += ' $(DFLAGS) $(CFLAGS) -fmax-stack-var-size=32768'
options['FCFLAGSOPT2'] += ' $(DFLAGS) $(CFLAGS)'
return options
def configure_ACML(self, options):
"""Configure for AMD Math Core Library (ACML)"""
openmp_suffix = ''
if self.openmp:
openmp_suffix = '_mp'
options['ACML_INC'] = '%s/gfortran64%s/include' % (get_software_root('ACML'), openmp_suffix)
options['CFLAGS'] += ' -I$(ACML_INC) -I$(FFTW_INC)'
options['DFLAGS'] += ' -D__FFTACML'
blas = os.getenv('LIBBLAS', '')
blas = blas.replace('gfortran64', 'gfortran64%s' % openmp_suffix)
options['LIBS'] += ' %s %s %s' % (self.libsmm, os.getenv('LIBSCALAPACK', ''), blas)
return options
def configure_BLAS_lib(self, options):
"""Configure for BLAS library."""
options['LIBS'] += ' %s %s' % (self.libsmm, os.getenv('LIBBLAS', ''))
return options
def configure_MKL(self, options):
"""Configure for Intel Math Kernel Library (MKL)"""
options.update({
'INTEL_INC': '$(MKLROOT)/include',
})
options['DFLAGS'] += ' -D__FFTW3'
extra = ''
if self.modincpath:
extra = '-I%s' % self.modincpath
options['CFLAGS'] += ' -I$(INTEL_INC) %s $(FPIC) $(DEBUG)' % extra
options['LIBS'] += ' %s %s' % (self.libsmm, os.getenv('LIBSCALAPACK', ''))
# only use Intel FFTW wrappers if FFTW is not loaded
if not get_software_root('FFTW'):
options.update({
'INTEL_INCF': '$(INTEL_INC)/fftw',
})
options['DFLAGS'] += ' -D__FFTMKL'
options['CFLAGS'] += ' -I$(INTEL_INCF)'
options['LIBS'] = '%s %s' % (os.getenv('LIBFFT', ''), options['LIBS'])
return options
def configure_FFTW3(self, options):
"""Configure for FFTW3"""
options.update({
'FFTW_INC': os.getenv('FFT_INC_DIR', ''), # GCC
'FFTW3INC': os.getenv('FFT_INC_DIR', ''), # Intel
'FFTW3LIB': os.getenv('FFT_LIB_DIR', ''), # Intel
})
options['DFLAGS'] += ' -D__FFTW3'
if self.cfg['type'] == 'psmp':
libfft = os.getenv('LIBFFT_MT', '')
else:
libfft = os.getenv('LIBFFT', '')
options['LIBS'] += ' -L%s %s' % (os.getenv('FFT_LIB_DIR', '.'), libfft)
return options
def configure_LAPACK(self, options):
"""Configure for LAPACK library"""
options['LIBS'] += ' %s' % os.getenv('LIBLAPACK_MT', '')
return options
def configure_ScaLAPACK(self, options):
"""Configure for ScaLAPACK library"""
options['LIBS'] += ' %s' % os.getenv('LIBSCALAPACK', '')
return options
def build_step(self):
"""Start the actual build
- go into makefiles dir
- patch Makefile
-build_and_install
"""
makefiles = os.path.join(self.cfg['start_dir'], 'makefiles')
try:
os.chdir(makefiles)
except OSError, err:
raise EasyBuildError("Can't change to makefiles dir %s: %s", makefiles, err)
# modify makefile for parallel build
parallel = self.cfg['parallel']
if parallel:
try:
for line in fileinput.input('Makefile', inplace=1, backup='.orig.patchictce'):
line = re.sub(r"^PMAKE\s*=.*$", "PMAKE\t= $(SMAKE) -j %s" % parallel, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Can't modify/write Makefile in %s: %s", makefiles, err)
# update make options with MAKE
self.cfg.update('buildopts', 'MAKE="make -j %s" all' % self.cfg['parallel'])
# update make options with ARCH and VERSION
self.cfg.update('buildopts', 'ARCH=%s VERSION=%s' % (self.typearch, self.cfg['type']))
cmd = "make %s" % self.cfg['buildopts']
# clean first
run_cmd(cmd + " clean", log_all=True, simple=True, log_output=True)
#build_and_install
run_cmd(cmd, log_all=True, simple=True, log_output=True)
def test_step(self):
"""Run regression test."""
if self.cfg['runtest']:
if not build_option('mpi_tests'):
self.log.info("Skipping testing of CP2K since MPI testing is disabled")
return
# change to root of build dir
try:
os.chdir(self.builddir)
except OSError, err:
raise EasyBuildError("Failed to change to %s: %s", self.builddir, err)
# use regression test reference output if available
# try and find an unpacked directory that starts with 'LAST-'
regtest_refdir = None
for d in os.listdir(self.builddir):
if d.startswith("LAST-"):
regtest_refdir = d
break
# location of do_regtest script
cfg_fn = "cp2k_regtest.cfg"
regtest_script = os.path.join(self.cfg['start_dir'], 'tools', 'regtesting', 'do_regtest')
regtest_cmd = "%s -nosvn -nobuild -config %s" % (regtest_script, cfg_fn)
# older version of CP2K
if not os.path.exists(regtest_script):
regtest_script = os.path.join(self.cfg['start_dir'], 'tools', 'do_regtest')
regtest_cmd = "%s -nocvs -quick -nocompile -config %s" % (regtest_script, cfg_fn)
# patch do_regtest so that reference output is used
if regtest_refdir:
self.log.info("Using reference output available in %s" % regtest_refdir)
try:
for line in fileinput.input(regtest_script, inplace=1, backup='.orig.refout'):
line = re.sub(r"^(dir_last\s*=\${dir_base})/.*$", r"\1/%s" % regtest_refdir, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to modify '%s': %s", regtest_script, err)
else:
self.log.info("No reference output found for regression test, just continuing without it...")
test_core_cnt = min(self.cfg.get('parallel', sys.maxint), 2)
if get_avail_core_count() < test_core_cnt:
raise EasyBuildError("Cannot run MPI tests as not enough cores (< %s) are available", test_core_cnt)
else:
self.log.info("Using %s cores for the MPI tests" % test_core_cnt)
# configure regression test
cfg_txt = '\n'.join([
'FORT_C_NAME="%(f90)s"',
'dir_base=%(base)s',
'cp2k_version=%(cp2k_version)s',
'dir_triplet=%(triplet)s',
'export ARCH=${dir_triplet}',
'cp2k_dir=%(cp2k_dir)s',
'leakcheck="YES"',
'maxtasks=%(maxtasks)s',
'cp2k_run_prefix="%(mpicmd_prefix)s"',
]) % {
'f90': os.getenv('F90'),
'base': os.path.dirname(os.path.normpath(self.cfg['start_dir'])),
'cp2k_version': self.cfg['type'],
'triplet': self.typearch,
'cp2k_dir': os.path.basename(os.path.normpath(self.cfg['start_dir'])),
'maxtasks': self.cfg['maxtasks'],
'mpicmd_prefix': self.toolchain.mpi_cmd_for('', test_core_cnt),
}
write_file(cfg_fn, cfg_txt)
self.log.debug("Contents of %s: %s" % (cfg_fn, cfg_txt))
# run regression test
(regtest_output, ec) = run_cmd(regtest_cmd, log_all=True, simple=False, log_output=True)
if ec == 0:
self.log.info("Regression test output:\n%s" % regtest_output)
else:
raise EasyBuildError("Regression test failed (non-zero exit code): %s", regtest_output)
# pattern to search for regression test summary
re_pattern = "number\s+of\s+%s\s+tests\s+(?P<cnt>[0-9]+)"
# find total number of tests
regexp = re.compile(re_pattern % "", re.M | re.I)
res = regexp.search(regtest_output)
tot_cnt = None
if res:
tot_cnt = int(res.group('cnt'))
else:
raise EasyBuildError("Finding total number of tests in regression test summary failed")
# function to report on regtest results
def test_report(test_result):
"""Report on tests with given result."""
postmsg = ''
test_result = test_result.upper()
regexp = re.compile(re_pattern % test_result, re.M | re.I)
cnt = None
res = regexp.search(regtest_output)
if not res:
raise EasyBuildError("Finding number of %s tests in regression test summary failed",
test_result.lower())
else:
cnt = int(res.group('cnt'))
logmsg = "Regression test reported %s / %s %s tests"
logmsg_values = (cnt, tot_cnt, test_result.lower())
# failed tests indicate problem with installation
# wrong tests are only an issue when there are excessively many
if (test_result == "FAILED" and cnt > 0) or (test_result == "WRONG" and (cnt / tot_cnt) > 0.1):
if self.cfg['ignore_regtest_fails']:
self.log.warning(logmsg, *logmsg_values)
self.log.info("Ignoring failures in regression test, as requested.")
else:
raise EasyBuildError(logmsg, *logmsg_values)
elif test_result == "CORRECT" or cnt == 0:
self.log.info(logmsg, *logmsg_values)
else:
self.log.warning(logmsg, *logmsg_values)
return postmsg
# number of failed/wrong tests, will report error if count is positive
self.postmsg += test_report("FAILED")
self.postmsg += test_report("WRONG")
# number of new tests, will be high if a non-suitable regtest reference was used
# will report error if count is positive (is that what we want?)
self.postmsg += test_report("NEW")
# number of correct tests: just report
test_report("CORRECT")
def install_step(self):
"""Install built CP2K
- copy from exe to bin
- copy data dir (if exists)
- copy tests
"""
# copy executables
targetdir = os.path.join(self.installdir, 'bin')
exedir = os.path.join(self.cfg['start_dir'], 'exe/%s' % self.typearch)
try:
if not os.path.exists(targetdir):
os.makedirs(targetdir)
os.chdir(exedir)
for exefile in os.listdir(exedir):
if os.path.isfile(exefile):
shutil.copy2(exefile, targetdir)
except OSError, err:
raise EasyBuildError("Copying executables from %s to bin dir %s failed: %s", exedir, targetdir, err)
# copy data dir
datadir = os.path.join(self.cfg['start_dir'], 'data')
targetdir = os.path.join(self.installdir, 'data')
if os.path.exists(targetdir):
self.log.info("Won't copy data dir. Destination directory %s already exists" % targetdir)
elif os.path.exists(datadir):
try:
shutil.copytree(datadir, targetdir)
except:
raise EasyBuildError("Copying data dir from %s to %s failed", datadir, targetdir)
else:
self.log.info("Won't copy data dir. Source directory %s does not exist" % datadir)
# copy tests
srctests = os.path.join(self.cfg['start_dir'], 'tests')
targetdir = os.path.join(self.installdir, 'tests')
if os.path.exists(targetdir):
self.log.info("Won't copy tests. Destination directory %s already exists" % targetdir)
else:
try:
shutil.copytree(srctests, targetdir)
except:
raise EasyBuildError("Copying tests from %s to %s failed", srctests, targetdir)
# copy regression test results
if self.cfg['runtest']:
try:
testdir = os.path.dirname(os.path.normpath(self.cfg['start_dir']))
for d in os.listdir(testdir):
if d.startswith('TEST-%s-%s' % (self.typearch, self.cfg['type'])):
path = os.path.join(testdir, d)
target = os.path.join(self.installdir, d)
shutil.copytree(path, target)
self.log.info("Regression test results dir %s copied to %s" % (d, self.installdir))
break
except (OSError, IOError), err:
raise EasyBuildError("Failed to copy regression test results dir: %s", err)
def sanity_check_step(self):
"""Custom sanity check for CP2K"""
cp2k_type = self.cfg['type']
custom_paths = {
'files': ["bin/%s.%s" % (x, cp2k_type) for x in ["cp2k", "cp2k_shell"]],
'dirs': ["tests"]
}
super(EB_CP2K, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Set up a CP2K_DATA_DIR environment variable to find CP2K provided basis sets"""
txt = super(EB_CP2K, self).make_module_extra()
datadir = os.path.join(self.installdir, 'data')
if os.path.exists(datadir):
txt += self.module_generator.set_environment('CP2K_DATA_DIR', datadir)
return txt
| ULHPC/easybuild-easyblocks | easybuild/easyblocks/c/cp2k.py | Python | gpl-2.0 | 35,231 | [
"CP2K"
] | ea20fbf891e152848d3c52eefee03765ecf5ffe24f9650bfbde72d955f58972a |
# Rekall Memory Forensics
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
The Rekall Entity Layer.
"""
__author__ = "Adam Sindelar <adamsh@google.com>"
import logging
from rekall.entities import entity as entity_module
from rekall.entities import identity as entity_id
from efilter import engine
from efilter import expression
from efilter import query as entity_query
from rekall.entities.query import matcher
class EntityQuerySearch(engine.VisitorEngine):
"""Tries to solve the query using available indexing."""
def search(self, entities, lookup_tables):
self.entities = entities
self.lookup_tables = lookup_tables
return list(self.run())
def visit_ComponentLiteral(self, expr):
return self._as_entities(
self.lookup_tables["components"].table.get(expr.value, []))
def visit_Intersection(self, expr):
results = set(self.visit(expr.children[0]))
for child in expr.children[1:]:
results.intersection_update(self.visit(child))
return results
def visit_Union(self, expr):
results = set()
for child in expr.children:
results.update(self.visit(child))
return results
def _subquery(self, expr):
return entity_query.Query(root=expr,
source=self.query.source)
def _slow_solve(self, expr, seed):
slow_matcher = matcher.ObjectMatcher(self._subquery(expr))
entities = set()
for entity in seed:
if slow_matcher.run(entity):
entities.add(entity)
return entities
def _as_entities(self, identities):
entities = set()
for identity in identities:
# identity.indices is a set, hence the loop.
for index in identity.indices:
entities.add(self.entities[index])
break
return entities
def _solve_equivalence(self, expr, binding, literal):
literal_value = literal.value
if isinstance(literal_value, entity_id.Identity):
results = set()
for index in literal_value.indices:
results |= self._solve_equivalence(
expr, binding, expression.Literal(index))
return results
table = self.lookup_tables.get(binding.value, None)
if table:
# Sweet, we have exact index for this.
return self._as_entities(table.table.get(literal_value, set()))
# Don't have an exact index, but can prefilter by component index.
component, _ = binding.value.split("/", 1)
slow_matcher = matcher.ObjectMatcher(self._subquery(expr))
entities = set()
candidates = self.lookup_tables["components"].table.get(component, [])
for identity in candidates:
entity = self.entities[identity.first_index]
if slow_matcher.run(entity):
entities.add(entity)
return entities
def visit_Equivalence(self, expr):
if len(expr.children) != 2:
return self._slow_solve(expr, self.entities.itervalues())
x, y = expr.children
if (isinstance(x, expression.Binding) and
isinstance(y, expression.Literal)):
return self._solve_equivalence(expr, x, y)
elif (isinstance(x, expression.Literal) and
isinstance(y, expression.Binding)):
return self._solve_equivalence(expr, y, x)
return self._slow_solve(expr, self.entities.itervalues())
def visit_Membership(self, expr):
collection = self.visit(expr.set)
return self._slow_solve(expr, collection)
def visit_Expression(self, expr):
logging.debug("Fallthrough to filter-based search (%s).", expr)
return self._slow_solve(expr, self.entities.itervalues())
def _slow_Let(self, expr):
logging.debug("Fallthrough to filter-based search (%s).", expr)
# Prefiltering the slow solve to just entities that actually have the
# relevant attribute usually shaves off about 200 ms.
seed = self.visit(expr.context)
return self._slow_solve(expr, seed)
def visit_LetEach(self, expr):
return self._slow_Let(expr)
def visit_LetAny(self, expr):
return self._slow_Let(expr)
def visit_Let(self, expr):
# Do we have an index for the context attribute?
table = self.lookup_tables.get(expr.context.value)
if not table:
return self._slow_Let(expr)
# We have an index - this means we can run the subquery, get the
# identities that match and then get their intersection with the
# index we just found.
results = set()
subquery_hits = self.visit(expr.expression)
for subquery_result in subquery_hits:
for index in subquery_result.indices:
# Need to check every index in case the lookup table is
# stale.
matching_entities = table.table.get(index)
if not matching_entities:
continue
for matching_entity in matching_entities:
results.add(matching_entity)
return self._as_entities(results)
engine.Engine.register_engine(EntityQuerySearch, "indexed_search")
class EntityLookupTable(object):
"""Lookup table for entities."""
@property
def cost_per_search(self):
return self.updates / self.searches
def __init__(self, key_name, key_func, entity_manager):
self.searches = 0.0
self.updates = 0.0
self.key_name = key_name
self.key_func = key_func
self.manager = entity_manager
self.table = {}
def update_index(self, entities):
for entity in entities:
for key in self.key_func(entity):
self.updates += 1
# Identities need to be stored at each of their indices instead
# of by just one hash.
if isinstance(key, entity_id.Identity):
for index in key.indices:
self.table.setdefault(
index, set()).add(entity.identity)
else:
self.table.setdefault(key, set()).add(entity.identity)
def lookup(self, *keys):
unique_results = set()
self.searches += 1
for key in keys:
for identity in self.table.get(key, []):
for entity in self.manager.find_by_identity(identity):
unique_results.add(entity)
return unique_results
class AttributeLookupTable(EntityLookupTable):
"""Lookup table by attribute value."""
def __init__(self, attribute, entity_manager):
field = entity_module.Entity.reflect_attribute(attribute)
coerce_fn = field.typedesc.coerce
def key_func(entity):
return (coerce_fn(entity.get_raw(attribute)), )
super(AttributeLookupTable, self).__init__(attribute, key_func,
entity_manager)
| chen0031/rekall | rekall-core/rekall/entities/lookup_table.py | Python | gpl-2.0 | 7,824 | [
"VisIt"
] | 38faceb8e86d244bd87b6f6b27a39003f7025a62ff8f9177324eebd3be461140 |
"""
Import functions for EPW data files.
"""
import io
from urllib.request import urlopen, Request
import pandas as pd
def read_epw(filename, coerce_year=None):
r'''
Read an EPW file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the EPW file.
EPW files are commonly used by building simulation professionals
and are widely available on the web. For example via:
https://energyplus.net/weather , http://climate.onebuilding.org or
http://www.ladybug.tools/epwmap/
Parameters
----------
filename : String
Can be a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value. This can
be a useful feature because EPW data is composed of data from
different years.
Warning: EPW files always have 365*24 = 8760 data rows;
be careful with the use of leap years.
Returns
-------
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the EnergyPlus Auxiliary Programs documentation [1]_
metadata : dict
The site metadata available in the file.
See Also
--------
pvlib.iotools.parse_epw
Notes
-----
The returned structures have the following fields.
=============== ====== =========================================
key format description
=============== ====== =========================================
loc String default identifier, not used
city String site loccation
state-prov String state, province or region (if available)
country String site country code
data_type String type of original data source
WMO_code String WMO identifier
latitude Float site latitude
longitude Float site longitude
TZ Float UTC offset
altitude Float site elevation
=============== ====== =========================================
+-------------------------------+-----------------------------------------+
| EPWData field | description |
+===============================+=========================================+
| index | A pandas datetime index. NOTE, times are|
| | set to local standard time (daylight |
| | savings is not included). Days run from |
| | 0-23h to comply with PVLIB's convention.|
+-------------------------------+-----------------------------------------+
| year | Year, from original EPW file. Can be |
| | overwritten using coerce function. |
+-------------------------------+-----------------------------------------+
| month | Month, from original EPW file. |
+-------------------------------+-----------------------------------------+
| day | Day of the month, from original EPW |
| | file. |
+-------------------------------+-----------------------------------------+
| hour | Hour of the day from original EPW file. |
| | Note that EPW's convention of 1-24h is |
| | not taken over in the index dataframe |
| | used in PVLIB. |
+-------------------------------+-----------------------------------------+
| minute | Minute, from original EPW file. Not |
| | used. |
+-------------------------------+-----------------------------------------+
| data_source_unct | Data source and uncertainty flags. See |
| | [1]_, chapter 2.13 |
+-------------------------------+-----------------------------------------+
| temp_air | Dry bulb temperature at the time |
| | indicated, deg C |
+-------------------------------+-----------------------------------------+
| temp_dew | Dew-point temperature at the time |
| | indicated, deg C |
+-------------------------------+-----------------------------------------+
| relative_humidity | Relative humidity at the time indicated,|
| | percent |
+-------------------------------+-----------------------------------------+
| atmospheric_pressure | Station pressure at the time indicated, |
| | Pa |
+-------------------------------+-----------------------------------------+
| etr | Extraterrestrial horizontal radiation |
| | recv'd during 60 minutes prior to |
| | timestamp, Wh/m^2 |
+-------------------------------+-----------------------------------------+
| etrn | Extraterrestrial normal radiation recv'd|
| | during 60 minutes prior to timestamp, |
| | Wh/m^2 |
+-------------------------------+-----------------------------------------+
| ghi_infrared | Horizontal infrared radiation recv'd |
| | during 60 minutes prior to timestamp, |
| | Wh/m^2 |
+-------------------------------+-----------------------------------------+
| ghi | Direct and diffuse horizontal radiation |
| | recv'd during 60 minutes prior to |
| | timestamp, Wh/m^2 |
+-------------------------------+-----------------------------------------+
| dni | Amount of direct normal radiation |
| | (modeled) recv'd during 60 minutes prior|
| | to timestamp, Wh/m^2 |
+-------------------------------+-----------------------------------------+
| dhi | Amount of diffuse horizontal radiation |
| | recv'd during 60 minutes prior to |
| | timestamp, Wh/m^2 |
+-------------------------------+-----------------------------------------+
| global_hor_illum | Avg. total horizontal illuminance recv'd|
| | during the 60 minutes prior to |
| | timestamp, lx |
+-------------------------------+-----------------------------------------+
| direct_normal_illum | Avg. direct normal illuminance recv'd |
| | during the 60 minutes prior to |
| | timestamp, lx |
+-------------------------------+-----------------------------------------+
| diffuse_horizontal_illum | Avg. horizontal diffuse illuminance |
| | recv'd during the 60 minutes prior to |
| | timestamp, lx |
+-------------------------------+-----------------------------------------+
| zenith_luminance | Avg. luminance at the sky's zenith |
| | during the 60 minutes prior to |
| | timestamp, cd/m^2 |
+-------------------------------+-----------------------------------------+
| wind_direction | Wind direction at time indicated, |
| | degrees from north (360 = north; 0 = |
| | undefined,calm) |
+-------------------------------+-----------------------------------------+
| wind_speed | Wind speed at the time indicated, m/s |
+-------------------------------+-----------------------------------------+
| total_sky_cover | Amount of sky dome covered by clouds or |
| | obscuring phenomena at time stamp, |
| | tenths of sky |
+-------------------------------+-----------------------------------------+
| opaque_sky_cover | Amount of sky dome covered by clouds or |
| | obscuring phenomena that prevent |
| | observing the sky at time stamp, tenths |
| | of sky |
+-------------------------------+-----------------------------------------+
| visibility | Horizontal visibility at the time |
| | indicated, km |
+-------------------------------+-----------------------------------------+
| ceiling_height | Height of cloud base above local terrain|
| | (7777=unlimited), meter |
+-------------------------------+-----------------------------------------+
| present_weather_observation | Indicator for remaining fields: If 0, |
| | then the observed weather codes are |
| | taken from the following field. If 9, |
| | then missing weather is assumed. |
+-------------------------------+-----------------------------------------+
| present_weather_codes | Present weather code, see [1], chapter |
| | 2.9.1.28 |
+-------------------------------+-----------------------------------------+
| precipitable_water | Total precipitable water contained in a |
| | column of unit cross section from earth |
| | to top of atmosphere, cm. Note that some|
| | old \*_TMY3.epw files may have incorrect|
| | unit if it was retrieved from |
| | www.energyplus.net. |
+-------------------------------+-----------------------------------------+
| aerosol_optical_depth | The broadband aerosol optical depth per |
| | unit of air mass due to extinction by |
| | aerosol component of atmosphere, |
| | unitless |
+-------------------------------+-----------------------------------------+
| snow_depth | Snow depth in centimeters on the day |
| | indicated, (999 = missing data) |
+-------------------------------+-----------------------------------------+
| days_since_last_snowfall | Number of days since last snowfall |
| | (maximum value of 88, where 88 = 88 or |
| | greater days; 99 = missing data) |
+-------------------------------+-----------------------------------------+
| albedo | The ratio of reflected solar irradiance |
| | to global horizontal irradiance, |
| | unitless |
+-------------------------------+-----------------------------------------+
| liquid_precipitation_depth | The amount of liquid precipitation |
| | observed at indicated time for the |
| | period indicated in the liquid |
| | precipitation quantity field, |
| | millimeter |
+-------------------------------+-----------------------------------------+
| liquid_precipitation_quantity | The period of accumulation for the |
| | liquid precipitation depth field, hour |
+-------------------------------+-----------------------------------------+
References
----------
.. [1] `EnergyPlus documentation, Auxiliary Programs
<https://energyplus.net/documentation>`_
'''
if str(filename).startswith('http'):
# Attempts to download online EPW file
# See comments above for possible online sources
request = Request(filename, headers={'User-Agent': (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 '
'Safari/537.36')})
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
else:
# Assume it's accessible via the file system
csvdata = open(str(filename), 'r')
try:
data, meta = parse_epw(csvdata, coerce_year)
finally:
csvdata.close()
return data, meta
def parse_epw(csvdata, coerce_year=None):
"""
Given a file-like buffer with data in Energy Plus Weather (EPW) format,
parse the data into a dataframe.
Parameters
----------
csvdata : file-like buffer
a file-like buffer containing data in the EPW format
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value. This can
be a useful feature because EPW data is composed of data from
different years.
Warning: EPW files always have 365*24 = 8760 data rows;
be careful with the use of leap years.
Returns
-------
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the EnergyPlus Auxiliary Programs documentation
available at: https://energyplus.net/documentation.
metadata : dict
The site metadata available in the file.
See Also
--------
pvlib.iotools.read_epw
"""
# Read line with metadata
firstline = csvdata.readline()
head = ['loc', 'city', 'state-prov', 'country', 'data_type', 'WMO_code',
'latitude', 'longitude', 'TZ', 'altitude']
meta = dict(zip(head, firstline.rstrip('\n').split(",")))
meta['altitude'] = float(meta['altitude'])
meta['latitude'] = float(meta['latitude'])
meta['longitude'] = float(meta['longitude'])
meta['TZ'] = float(meta['TZ'])
colnames = ['year', 'month', 'day', 'hour', 'minute', 'data_source_unct',
'temp_air', 'temp_dew', 'relative_humidity',
'atmospheric_pressure', 'etr', 'etrn', 'ghi_infrared', 'ghi',
'dni', 'dhi', 'global_hor_illum', 'direct_normal_illum',
'diffuse_horizontal_illum', 'zenith_luminance',
'wind_direction', 'wind_speed', 'total_sky_cover',
'opaque_sky_cover', 'visibility', 'ceiling_height',
'present_weather_observation', 'present_weather_codes',
'precipitable_water', 'aerosol_optical_depth', 'snow_depth',
'days_since_last_snowfall', 'albedo',
'liquid_precipitation_depth', 'liquid_precipitation_quantity']
# We only have to skip 6 rows instead of 7 because we have already used
# the realine call above.
data = pd.read_csv(csvdata, skiprows=6, header=0, names=colnames)
# Change to single year if requested
if coerce_year is not None:
data["year"] = coerce_year
# create index that supplies correct date and time zone information
dts = data[['month', 'day']].astype(str).apply(lambda x: x.str.zfill(2))
hrs = (data['hour'] - 1).astype(str).str.zfill(2)
dtscat = data['year'].astype(str) + dts['month'] + dts['day'] + hrs
idx = pd.to_datetime(dtscat, format='%Y%m%d%H')
idx = idx.dt.tz_localize(int(meta['TZ'] * 3600))
data.index = idx
return data, meta
| mikofski/pvlib-python | pvlib/iotools/epw.py | Python | bsd-3-clause | 17,255 | [
"EPW"
] | 3029268c3287ef3031b54801c8b56ea3e9979787ed8fc06dadfc01157970b7d4 |
# -*- coding: utf-8 -*-
"""
=====================================================================
Extracting artifact and evoked response atoms from the sample dataset
=====================================================================
This example illustrates how to learn rank-1 [1]_ atoms on the multivariate
sample dataset from :code:`mne`. We display a selection of atoms, featuring
heartbeat and eyeblink artifacts, two atoms of evoked responses, and a
non-sinusoidal oscillation.
.. [1] Dupré La Tour, T., Moreau, T., Jas, M., & Gramfort, A. (2018).
`Multivariate Convolutional Sparse Coding for Electromagnetic Brain Signals
<https://arxiv.org/abs/1805.09654v2>`_. Advances in Neural Information
Processing Systems (NIPS).
"""
# Authors: Thomas Moreau <thomas.moreau@inria.fr>
# Mainak Jas <mainak.jas@telecom-paristech.fr>
# Tom Dupre La Tour <tom.duprelatour@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
###############################################################################
# Let us first define the parameters of our model.
# sampling frequency. The signal will be resampled to match this.
sfreq = 150.
# Define the shape of the dictionary
n_atoms = 40
n_times_atom = int(round(sfreq * 1.0)) # 1000. ms
# Regularization parameter which control sparsity
reg = 0.1
# number of processors for parallel computing
n_jobs = 5
# To accelerate the run time of this example, we split the signal in n_slits.
# The number of splits should actually be the smallest possible to avoid
# introducing border artifacts in the learned atoms and it should be not much
# larger than n_jobs.
n_splits = 10
###############################################################################
# Next, we define the parameters for multivariate CSC
from alphacsc import GreedyCDL
cdl = GreedyCDL(
# Shape of the dictionary
n_atoms=n_atoms,
n_times_atom=n_times_atom,
# Request a rank1 dictionary with unit norm temporal and spatial maps
rank1=True,
uv_constraint='separate',
# apply a temporal window reparametrization
window=True,
# at the end, refit the activations with fixed support and no reg to unbias
unbiased_z_hat=True,
# Initialize the dictionary with random chunk from the data
D_init='chunk',
# rescale the regularization parameter to be a percentage of lambda_max
lmbd_max="scaled",
reg=reg,
# Number of iteration for the alternate minimization and cvg threshold
n_iter=100,
eps=1e-4,
# solver for the z-step
solver_z="lgcd",
solver_z_kwargs={'tol': 1e-3,
'max_iter': 100000},
# solver for the d-step
solver_d='alternate_adaptive',
solver_d_kwargs={'max_iter': 300},
# sort atoms by explained variances
sort_atoms=True,
# Technical parameters
verbose=1,
random_state=0,
n_jobs=n_jobs)
###############################################################################
# Load the sample data from MNE-python and select the gradiometer channels.
# The MNE sample data contains MEG recordings of a subject with visual and
# auditory stimuli. We load the data using utilities from MNE-python as a Raw
# object and select the gradiometers from the signal.
import os
import mne
import numpy as np
print("Loading the data...", end='', flush=True)
data_path = mne.datasets.sample.data_path()
subjects_dir = os.path.join(data_path, "subjects")
data_dir = os.path.join(data_path, 'MEG', 'sample')
file_name = os.path.join(data_dir, 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(file_name, preload=True, verbose=False)
raw.pick_types(meg='grad', eeg=False, eog=False, stim=True)
print('done')
###############################################################################
# Then, we remove the powerline artifacts and high-pass filter to remove the
# drift which can impact the CSC technique. The signal is also resampled to
# 150 Hz to reduce the computationnal burden.
print("Preprocessing the data...", end='', flush=True)
raw.notch_filter(np.arange(60, 181, 60), n_jobs=n_jobs, verbose=False)
raw.filter(2, None, n_jobs=n_jobs, verbose=False)
raw = raw.resample(sfreq, npad='auto', n_jobs=n_jobs, verbose=False)
print('done')
###############################################################################
# Load the data as an array and split it in chunks to allow parallel processing
# during the model fit. Each split is considered as independent.
# To reduce the impact of border artifacts, we use `apply_window=True`
# which scales down the border of each split with a tukey window.
from alphacsc.utils import split_signal
X = raw.get_data(picks=['meg'])
info = raw.copy().pick_types(meg=True).info # info of the loaded channels
X_split = split_signal(X, n_splits=n_splits, apply_window=True)
###############################################################################
# Fit the model and learn rank1 atoms
cdl.fit(X_split)
###############################################################################
# Then we call the `transform` method, which returns the sparse codes
# associated with X, without changing the dictionary learned during the `fit`.
# Note that we transform on the *unsplit* data so that the sparse codes
# reflect the original data and not the windowed data.
z_hat = cdl.transform(X[None, :])
###############################################################################
# Display a selection of atoms
# ----------------------------
#
# We recognize a heartbeat artifact, an eyeblink artifact, two atoms of evoked
# responses, and a non-sinusoidal oscillation.
import matplotlib.pyplot as plt
# preselected atoms of interest
plotted_atoms = [0, 1, 2, 6, 4]
n_plots = 3 # number of plots by atom
n_columns = min(6, len(plotted_atoms))
split = int(np.ceil(len(plotted_atoms) / n_columns))
figsize = (4 * n_columns, 3 * n_plots * split)
fig, axes = plt.subplots(n_plots * split, n_columns, figsize=figsize)
for ii, kk in enumerate(plotted_atoms):
# Select the axes to display the current atom
print("\rDisplaying {}-th atom".format(kk), end='', flush=True)
i_row, i_col = ii // n_columns, ii % n_columns
it_axes = iter(axes[i_row * n_plots:(i_row + 1) * n_plots, i_col])
# Select the current atom
u_k = cdl.u_hat_[kk]
v_k = cdl.v_hat_[kk]
# Plot the spatial map of the atom using mne topomap
ax = next(it_axes)
mne.viz.plot_topomap(u_k, info, axes=ax, show=False)
ax.set(title="Spatial pattern %d" % (kk, ))
# Plot the temporal pattern of the atom
ax = next(it_axes)
t = np.arange(n_times_atom) / sfreq
ax.plot(t, v_k)
ax.set_xlim(0, n_times_atom / sfreq)
ax.set(xlabel='Time (sec)', title="Temporal pattern %d" % kk)
# Plot the power spectral density (PSD)
ax = next(it_axes)
psd = np.abs(np.fft.rfft(v_k, n=256)) ** 2
frequencies = np.linspace(0, sfreq / 2.0, len(psd))
ax.semilogy(frequencies, psd, label='PSD', color='k')
ax.set(xlabel='Frequencies (Hz)', title="Power spectral density %d" % kk)
ax.grid(True)
ax.set_xlim(0, 30)
ax.set_ylim(1e-4, 1e2)
ax.legend()
print("\rDisplayed {} atoms".format(len(plotted_atoms)).rjust(40))
fig.tight_layout()
###############################################################################
# Display the evoked reconstructed envelope
# -----------------------------------------
#
# The MNE sample data contains data for auditory (event_id=1 and 2) and
# visual stimuli (event_id=3 and 4). We extract the events now so that we can
# later identify the atoms related to different events. Note that the
# convolutional sparse coding method does not need to know the events for
# learning atoms.
event_id = [1, 2, 3, 4]
events = mne.find_events(raw, stim_channel='STI 014')
events = mne.pick_events(events, include=event_id)
events[:, 0] -= raw.first_samp
###############################################################################
# For each atom (columns), and for each event (rows), we compute the envelope
# of the reconstructed signal, align it with respect to the event onsets, and
# take the average. For some atoms, the activations are correlated with the
# events, leading to a large evoked envelope. The gray area corresponds to
# not statistically significant values, computing with sampling.
from alphacsc.utils.signal import fast_hilbert
from alphacsc.viz.epoch import plot_evoked_surrogates
from alphacsc.utils.convolution import construct_X_multi
# time window around the events. Note that for the sample datasets, the time
# inter-event is around 0.5s
t_lim = (-0.1, 0.5)
n_plots = len(event_id)
n_columns = min(6, len(plotted_atoms))
split = int(np.ceil(len(plotted_atoms) / n_columns))
figsize = (4 * n_columns, 3 * n_plots * split)
fig, axes = plt.subplots(n_plots * split, n_columns, figsize=figsize)
for ii, kk in enumerate(plotted_atoms):
# Select the axes to display the current atom
print("\rDisplaying {}-th atom envelope".format(kk), end='', flush=True)
i_row, i_col = ii // n_columns, ii % n_columns
it_axes = iter(axes[i_row * n_plots:(i_row + 1) * n_plots, i_col])
# Select the current atom
v_k = cdl.v_hat_[kk]
v_k_1 = np.r_[[1], v_k][None]
z_k = z_hat[:, kk:kk + 1]
X_k = construct_X_multi(z_k, v_k_1, n_channels=1)[0, 0]
# compute the 'envelope' of the reconstructed signal X_k
correlation = np.abs(fast_hilbert(X_k))
# loop over all events IDs
for this_event_id in event_id:
this_events = events[events[:, 2] == this_event_id]
# plotting function
ax = next(it_axes)
this_info = info.copy()
event_info = dict(event_id = this_event_id, events=events)
this_info['temp'] = event_info
plot_evoked_surrogates(correlation, info=this_info, t_lim=t_lim, ax=ax,
n_jobs=n_jobs, label='event %d' % this_event_id)
ax.set(xlabel='Time (sec)', title="Evoked envelope %d" % kk)
print("\rDisplayed {} atoms".format(len(plotted_atoms)).rjust(40))
fig.tight_layout()
###############################################################################
# Display the equivalent dipole for a learned topomap
# ---------------------------------------------------
#
# Finally, let us fit a dipole to one of the atoms. To fit a dipole,
# we need the following:
#
# * BEM solution: Obtained by running the cortical reconstruction pipeline
# of Freesurfer and describes the conductivity of different tissues in
# the head.
# * Trans: An affine transformation matrix needed to bring the data
# from sensor space to head space. This is usually done by coregistering
# the fiducials with the MRI.
# * Noise covariance matrix: To whiten the data so that the assumption
# of Gaussian noise model with identity covariance matrix is satisfied.
#
# We recommend users to consult the MNE documentation for further information.
#
subjects_dir = os.path.join(data_path, 'subjects')
fname_bem = os.path.join(subjects_dir, 'sample', 'bem',
'sample-5120-bem-sol.fif')
fname_trans = os.path.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_cov = os.path.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
###############################################################################
# Let us construct an evoked object for MNE with the spatial pattern of the
# atoms.
#
evoked = mne.EvokedArray(cdl.u_hat_.T, info)
###############################################################################
# Fit a dipole to each of the atoms.
#
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans,
n_jobs=n_jobs, verbose=False)[0]
###############################################################################
# Plot the dipole fit from the 3rd atom, linked to mu-wave and display the
# goodness of fit.
#
atom_dipole_idx = 4
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(10, 4))
# Display the dipole fit
ax = fig.add_subplot(1, 3, 1, projection='3d')
dip.plot_locations(fname_trans, 'sample', subjects_dir, idx=atom_dipole_idx,
ax=ax)
ax.set_title('Atom #{} (GOF {:.2f}%)'.format(atom_dipole_idx,
dip.gof[atom_dipole_idx]))
# Plot the spatial map
ax = fig.add_subplot(1, 3, 2)
mne.viz.plot_topomap(cdl.u_hat_[atom_dipole_idx], info, axes=ax)
# Plot the temporal atom
ax = fig.add_subplot(1, 3, 3)
t = np.arange(n_times_atom) / sfreq
ax.plot(t, cdl.v_hat_[atom_dipole_idx])
ax.set_xlim(0, n_times_atom / sfreq)
ax.set(xlabel='Time (sec)', title="Temporal pattern {}"
.format(atom_dipole_idx))
fig.suptitle('')
fig.tight_layout()
| alphacsc/alphacsc | examples/multicsc/plot_sample_evoked_response.py | Python | bsd-3-clause | 12,769 | [
"Gaussian"
] | bb79d4ba0c37cdd00c748862ac7e5687c4538a6bbaf555f4a27beb1f620618be |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import numpy
import numpy.ma as ma
import os
import urllib
import netCDF4
import datetime
import inspect
import test_local # Import test_local so we can use inspect to get the path
import ocw.data_source.local as local
class test_load_file(unittest.TestCase):
def setUp(self):
#Read netCDF file
self.file_path = create_netcdf_object()
self.netCDF_file = netCDF4.Dataset(self.file_path, 'r')
self.latitudes = self.netCDF_file.variables['latitude'][:]
self.longitudes = self.netCDF_file.variables['longitude'][:]
self.values = self.netCDF_file.variables['value'][:]
self.variable_name_list = ['latitude', 'longitude', 'time', 'level', 'value']
self.possible_value_name = ['latitude', 'longitude', 'time', 'level']
def tearDown(self):
os.remove(self.file_path)
def test_function_load_file_lats(self):
'''To test load_file function for latitudes'''
self.assertItemsEqual(local.load_file(self.file_path, "value").lats, self.latitudes)
def test_function_load_file_lons(self):
'''To test load_file function for longitudes'''
self.assertItemsEqual(local.load_file(self.file_path, "value").lons, self.longitudes)
def test_function_load_file_times(self):
'''To test load_file function for times'''
newTimes = datetime.datetime(2001,01,01), datetime.datetime(2001,02,01), datetime.datetime(2001,03,01)
self.assertItemsEqual(local.load_file(self.file_path, "value").times, newTimes)
def test_function_load_file_values(self):
'''To test load_file function for values'''
new_values = self.values[0,:,:,:]
self.assertTrue(numpy.allclose(local.load_file(self.file_path, "value").values, new_values))
def test_custom_dataset_name(self):
'''Test adding a custom name to a dataset'''
ds = local.load_file(self.file_path, 'value', name='foo')
self.assertEqual(ds.name, 'foo')
def test_dataset_origin(self):
ds = local.load_file(self.file_path, 'value', elevation_index=1)
expected_keys = set(['source', 'path', 'lat_name', 'lon_name',
'time_name', 'elevation_index' ])
self.assertEqual(set(ds.origin.keys()), expected_keys)
self.assertEqual(ds.origin['source'], 'local')
class test_get_netcdf_variable_names(unittest.TestCase):
file_path = "http://zipper.jpl.nasa.gov/dist/"
test_model = "AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_tasmax.nc"
def setUp(self):
urllib.urlretrieve(self.file_path + self.test_model, self.test_model)
self.invalid_netcdf_path = create_invalid_dimensions_netcdf_object()
self.netcdf = netCDF4.Dataset(self.test_model, mode='r')
def tearDown(self):
os.remove(self.invalid_netcdf_path)
os.remove(self.test_model)
def test_valid_latitude(self):
self.lat = local._get_netcdf_variable_name(
local.LAT_NAMES,
self.netcdf,
"tasmax")
self.assertEquals(self.lat, "rlat")
def test_invalid_dimension_latitude(self):
self.netcdf = netCDF4.Dataset(self.invalid_netcdf_path, mode='r')
self.lat = local._get_netcdf_variable_name(
local.LAT_NAMES,
self.netcdf,
"value")
self.assertEquals(self.lat, "latitude")
def test_dimension_variable_name_mismatch(self):
self.netcdf = netCDF4.Dataset(self.invalid_netcdf_path, mode='r')
self.lat = local._get_netcdf_variable_name(
["lat_dim"] + local.LAT_NAMES,
self.netcdf,
"value")
self.assertEquals(self.lat, "latitude")
def test_no_match_latitude(self):
with self.assertRaises(ValueError):
self.lat = local._get_netcdf_variable_name(
['notAVarName'],
self.netcdf,
"tasmax")
def create_netcdf_object():
#To create the temporary netCDF file
file_path = '/tmp/temporaryNetcdf.nc'
netCDF_file = netCDF4.Dataset(file_path, 'w', format='NETCDF4')
#To create dimensions
netCDF_file.createDimension('lat_dim', 5)
netCDF_file.createDimension('lon_dim', 5)
netCDF_file.createDimension('time_dim', 3)
netCDF_file.createDimension('level_dim', 2)
#To create variables
latitudes = netCDF_file.createVariable('latitude', 'd', ('lat_dim',))
longitudes = netCDF_file.createVariable('longitude', 'd', ('lon_dim',))
times = netCDF_file.createVariable('time', 'd', ('time_dim',))
levels = netCDF_file.createVariable('level', 'd', ('level_dim',))
values = netCDF_file.createVariable('value', 'd', ('level_dim', 'time_dim', 'lat_dim', 'lon_dim'))
#To latitudes and longitudes for five values
latitudes = range(0,5)
longitudes = range(200,205)
#Three months of data
#Two levels
levels = [100, 200]
#Create 150 values
values = numpy.array([i for i in range(150)])
#Reshape values to 4D array (level, time, lats, lons)
values = values.reshape(len(levels), len(times),len(latitudes),len(longitudes))
#Ingest values to netCDF file
latitudes[:] = latitudes
longitudes[:] = longitudes
times[:] = numpy.array(range(3))
levels[:] = levels
values[:] = values
#Assign time info to time variable
netCDF_file.variables['time'].units = 'months since 2001-01-01 00:00:00'
netCDF_file.variables['value'].units = 'foo_units'
netCDF_file.close()
return file_path
def create_invalid_dimensions_netcdf_object():
#To create the temporary netCDF file
file_path = '/tmp/temporaryNetcdf.nc'
netCDF_file = netCDF4.Dataset(file_path, 'w', format='NETCDF4')
#To create dimensions
netCDF_file.createDimension('lat_dim', 5)
netCDF_file.createDimension('lon_dim', 5)
netCDF_file.createDimension('time_dim', 3)
netCDF_file.createDimension('level_dim', 2)
#To create variables
latitudes = netCDF_file.createVariable('latitude', 'd', ('lat_dim',))
longitudes = netCDF_file.createVariable('longitude', 'd', ('lon_dim',))
times = netCDF_file.createVariable('time', 'd', ('time_dim',))
levels = netCDF_file.createVariable('level', 'd', ('level_dim',))
values = netCDF_file.createVariable('value', 'd', ('level_dim', 'time_dim', 'lat_dim', 'lon_dim'))
#To latitudes and longitudes for five values
latitudes = range(0,5)
longitudes = range(200,205)
#Three months of data
times = range(3)
#Two levels
levels = [100, 200]
#Create 150 values
values = numpy.array([i for i in range(150)])
#Reshape values to 4D array (level, time, lats, lons)
values = values.reshape(len(levels), len(times),len(latitudes),len(longitudes))
#Ingest values to netCDF file
latitudes[:] = latitudes
longitudes[:] = longitudes
times[:] = times
levels[:] = levels
values[:] = values
#Assign time info to time variable
netCDF_file.variables['time'].units = 'months since 2001-01-01 00:00:00'
netCDF_file.close()
return file_path
if __name__ == '__main__':
unittest.main()
| MJJoyce/climate | ocw/tests/test_local.py | Python | apache-2.0 | 8,555 | [
"NetCDF"
] | 6d52317dd896d695bc9b62f149961b6f8444ae794f1b8a523a3e9bedba946f27 |
"""
Test Logger Wrapper
"""
__RCSID__ = "$Id$"
import unittest
import logging
from StringIO import StringIO
from DIRAC.FrameworkSystem.private.standardLogging.LoggingRoot import LoggingRoot
from DIRAC.FrameworkSystem.private.standardLogging.Logging import Logging
gLogger = LoggingRoot()
def cleaningLog(log):
"""
Remove date and space from the log string
"""
log = log[20:]
log = log.replace(" ", "")
return log
class Test_Logging(unittest.TestCase):
"""
Test get and set levels.
"""
def setUp(self):
"""
Initialize at debug level with a sublogger and a special handler
"""
# Reinitialize the system/component name after other tests
# because LoggingRoot is a singleton and can not be reinstancied
Logging._componentName = 'Framework'
gLogger.setLevel('debug')
self.log = gLogger.getSubLogger('log')
self.buffer = StringIO()
gLogger.showHeaders(True)
gLogger.showThreadIDs(False)
# modify the output to capture the log into a buffer
if logging.getLogger('dirac').handlers:
logging.getLogger('dirac').handlers[0].stream = self.buffer
# reset the levels
logging.getLogger('dirac').getChild('log').setLevel(logging.NOTSET)
self.log._levelModified = False
| andresailer/DIRAC | FrameworkSystem/private/standardLogging/test/TestLoggingBase.py | Python | gpl-3.0 | 1,257 | [
"DIRAC"
] | a5ebdc938d6fcdaa51465765a28c11c934ca3c97354d4eb8db6bc6038efb6dd7 |
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not galaxy.options.ignore_certs
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
for role_path_dir in galaxy.roles_paths:
role_path = os.path.join(role_path_dir, self.name)
if os.path.exists(role_path):
self.path = role_path
break
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
# create list of possible paths
self.paths = [x for x in galaxy.roles_paths]
self.paths = [os.path.join(x, self.name) for x in self.paths]
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
local_file = False
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
# installing a local tar.gz
local_file = True
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
if role_data.get('role_type') == 'CON':
# Container Enabled
display.warning("%s is a Container Enabled role and should only be installed using "
"Ansible Container" % self.name)
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role and should only be installed using Ansible "
"Container" % self.name)
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
meta_file = member
break
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off the top-level directory for all of the files contained within
# the tar file here, since the default is 'github_repo-target', and change it
# to the specified role's name
installed = False
while not installed:
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop the leading directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.split(os.sep)[1:]
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
installed = True
except OSError as e:
error = True
if e[0] == 13 and len(self.paths) > 1:
current = self.paths.index(self.path)
nextidx = current + 1
if len(self.paths) >= current:
self.path = self.paths[nextidx]
error = False
if error:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % self.name)
if not local_file:
try:
os.unlink(tmp_file)
except (OSError,IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
| wenottingham/ansible | lib/ansible/galaxy/role.py | Python | gpl-3.0 | 14,007 | [
"Brian",
"Galaxy"
] | 465e4d37baaf34a47f5ec8b6859167139768d76a7a4f5f2774fe095149168dfa |
"""
This is the play
"""
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from functions import selection_algorithm, scl
from csl import CSL
plot = True
verbose = False
tracking = True
selection = False
# Generate the data
n_samples = 1500
random_state = 20 # Does not converge
random_state = 41
random_state = 105 # Does not converge
random_state = 325325
random_state = 1111
n_features = 2
centers = 7
X, y = make_blobs(n_samples, n_features, centers, random_state=random_state)
# The algorithm
N = centers
s = 2 # Number of neurons to change per round
eta = 0.1
T = 100
csl = CSL(n_clusters=N, n_iter=T, tol=0.001, eta=eta, s0=s, random_state=np.random)
csl.fit(X)
neurons = csl.centers_
if False:
kmeans = KMeans(n_clusters=N)
kmeans.fit(X)
neurons = kmeans.cluster_centers_
if plot:
# Visualize X
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
ax.plot(X[:, 0], X[:, 1], 'x', markersize=6)
ax.hold(True)
if True:
for n in range(N):
ax.plot(neurons[n, 0], neurons[n, 1], 'o', markersize=12, label='neuron ' + str(n))
ax.legend()
# fig.show()
plt.show()
| h-mayorquin/competitive_and_selective_learning | play.py | Python | mit | 1,250 | [
"NEURON"
] | 72c1c64460742a00eb20ce37ff8748a13573dce46eb491893bb94aa47638e1ef |
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import mf as nao_mf
mol = gto.M( verbose = 1, atom = '''Mn 0 0 0;''', basis = 'cc-pvdz', spin = 5, )
#gto_mf_rhf = scf.RHF(mol)
#gto_mf_rhf.kernel()
gto_mf_uhf = scf.UHF(mol)
gto_mf_uhf.kernel()
class KnowValues(unittest.TestCase):
def test_mn_mean_field_0084(self):
from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations
""" Spin-resolved case """
#print(__name__, dir(gto_mf_uhf))
#print(set(dir(gto_mf_uhf))-set(dir(gto_mf_rhf)))
mf = nao_mf(mf=gto_mf_uhf, gto=mol, verbosity=0)
self.assertEqual(mf.nspin, 2)
ne_occ = fermi_dirac_occupations(mf.telec, mf.mo_energy, mf.fermi_energy).sum()
self.assertAlmostEqual(ne_occ, 25.0)
o = mf.overlap_coo().toarray()
dm = mf.make_rdm1()
#print((dm[0,0,:,:,0]*o).sum())
#print((dm[0,1,:,:,0]*o).sum())
#mf.diag_check()
#dos = mf.dos(np.arange(-1.4, 1.0, 0.01)+1j*0.02)
#print(mf.norbs)
#print(mf.nspin)
#print(mf.fermi_energy)
#print(mf.mo_occ)
#print(mf.mo_energy)
#print(mf.nelectron)
#print((mf.mo_occ).sum())
if __name__ == "__main__": unittest.main()
| gkc1000/pyscf | pyscf/nao/test/test_0084_mn_mf.py | Python | apache-2.0 | 1,226 | [
"PySCF"
] | 7171f724bf8781e7e5064296b0f19fe388460e6a1dd549e78e6d6762cf9b5f5f |
# Copyright (C) 2011-2018 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Configure GDB using the ELinOS environment."""
import os
import glob
import gdb
def warn(msg):
print "warning: %s" % msg
def get_elinos_environment():
"""Return the ELinOS environment.
If the ELinOS environment is properly set up, return a dictionary
which contains:
* The path to the ELinOS project at key 'project';
* The path to the ELinOS CDK at key 'cdk';
* The ELinOS target name at key 'target' (Eg. 'i486-linux');
* A list of Xenomai install prefixes (which could be empty, if
the ELinOS project does not include Xenomai) at key 'xenomai'.
If one of these cannot be found, print a warning; the corresponding
value in the returned dictionary will be None.
"""
result = {}
for key in ("project", "cdk", "target"):
var = "ELINOS_" + key.upper()
if var in os.environ:
result[key] = os.environ[var]
else:
warn("%s not set" % var)
result[key] = None
if result["project"] is not None:
result["xenomai"] = glob.glob(result["project"] + "/xenomai-[0-9.]*")
else:
result["xenomai"] = []
return result
def elinos_init():
"""Initialize debugger environment for ELinOS.
Let the debugger know where to find the ELinOS libraries on host. This
assumes that an ELinOS environment is properly set up. If some environment
variables are missing, warn about which library may be missing.
"""
elinos_env = get_elinos_environment()
solib_dirs = []
# System libraries
if None in (elinos_env[key] for key in ("cdk", "target")):
warn("ELinOS system libraries will not be loaded")
else:
solib_prefix = "%s/%s" % (elinos_env["cdk"], elinos_env["target"])
solib_dirs += ["%s/%s" % (solib_prefix, "lib")]
gdb.execute("set solib-absolute-prefix %s" % solib_prefix)
# Xenomai libraries. Those are optional, so have a lighter warning
# if they cannot be located.
if elinos_env["project"] is None:
warn("Xenomai libraries may not be loaded")
else:
for dir in elinos_env['xenomai']:
solib_dirs += ["%s/%s"
% (dir, "xenomai-build/usr/realtime/lib")]
if len(solib_dirs) != 0:
gdb.execute("set solib-search-path %s" % ":".join(solib_dirs))
if __name__ == "__main__":
elinos_init()
| BPI-SINOVOIP/BPI-Mainline-kernel | toolchains/gcc-linaro-7.3.1-2018.05-x86_64_arm-linux-gnueabihf/share/gdb/system-gdbinit/elinos.py | Python | gpl-2.0 | 3,080 | [
"CDK"
] | 15c89f29a916366d71c7c1b8618e1ed0fa821c3ce9b1671cb74135241e9e6311 |
import os
from ast import parse
import pytest
from smother.python import PythonFile
from smother.python import Visitor
case_func = """
def a():
pass
"""
ctx_func = ['', 'a', 'a']
case_class = """
class A:
def method(self):
pass
x = 3
"""
ctx_class = ['', 'A', 'A', 'A.method', 'A.method', 'A', 'A']
case_decorated_function = """
x = 5
@dec
def a():
pass
"""
ctx_decorated_function = ['', '', '', 'a', 'a', 'a']
case_double_decorated_function = """
x = 5
@dec1
@dec2
def a():
pass
"""
ctx_double_decorated_function = ['', '', '', 'a', 'a', 'a', 'a']
case_inner_func = """
def a():
def b():
pass
x = None
"""
ctx_inner_func = ['', '', 'a', 'a.b', 'a.b', 'a']
case_decorated_method = """
class Foo:
@dec
def bar(self):
pass
"""
ctx_decorated_method = ['', 'Foo', 'Foo.bar', 'Foo.bar', 'Foo.bar']
VISITOR_CASES = [
(case_func, ctx_func),
(case_class, ctx_class),
(case_decorated_function, ctx_decorated_function),
(case_double_decorated_function, ctx_double_decorated_function),
(case_inner_func, ctx_inner_func),
(case_decorated_method, ctx_decorated_method),
]
IDS = ['func', 'class', 'decorated_func',
'dbl_dec', 'inner_func', 'decorated_method']
@pytest.mark.parametrize('code,expected', VISITOR_CASES, ids=IDS)
def test_visitor(code, expected):
ast = parse(code)
visitor = Visitor(prefix='')
visitor.visit(ast)
assert visitor.lines == expected
CONTEXT_CASES = [
(case_func, 'a', (2, 4)),
(case_class, 'A', (2, 8)),
(case_class, 'A.method', (4, 6)),
(case_decorated_method, 'Foo', (2, 6)),
]
CONTEXT_IDS = ['func', 'class', 'class_inner', 'decorated_method']
@pytest.mark.parametrize(
"code,context,expected", CONTEXT_CASES, ids=CONTEXT_IDS)
def test_context_range(code, context, expected):
pf = PythonFile('test.py', prefix='', source=code)
assert pf.context_range(context) == expected
def test_default_prefix():
assert PythonFile('test.py', source='').prefix == 'test'
assert PythonFile('a/b/c.py', source='').prefix == 'a.b.c'
assert PythonFile('a/b/c.pyc', source='').prefix == 'a.b.c'
assert PythonFile('a/b/c.pyo', source='').prefix == 'a.b.c'
assert PythonFile('a/b/c.pyw', source='').prefix == 'a.b.c'
assert PythonFile('a/b/c/__init__.py', source='').prefix == 'a.b.c'
def test_prefix_for_absolute_paths():
path = os.path.abspath('smother/tests/demo.py')
assert PythonFile(path).prefix == 'smother.tests.demo'
| ChrisBeaumont/smother | smother/tests/test_python.py | Python | mit | 2,513 | [
"VisIt"
] | 609d41da563193d085da7849aebb833de819c4e731fc57676d810f2b309ce54e |
import numpy as np
import netCDF4 as netCDF
from datetime import datetime
import pyroms
import pyroms_toolbox
# load 2-dimentional interannual discharge data
# from Hill and Beamer.
print 'Load interannual discharge data'
nc_data = netCDF.Dataset('runoff.nc', 'r')
time = nc_data.variables['time'][:]
data = nc_data.variables['runoff'][:]
## time: cyclic year (365.25 days)
#time = np.array([15.21875, 45.65625, 76.09375, 106.53125, 136.96875, 167.40625, \
# 197.84375, 228.28125, 258.71875, 289.15625, 319.59375, 350.03125])
# load CI grid object
grd = pyroms.grid.get_ROMS_grid('COOK_INLET_LYON')
# define some variables
wts_file = 'remap_weights_runoff_to_CI_conservative_nomask.nc'
nt = data.shape[0]
Mp, Lp = grd.hgrid.mask_rho.shape
spval = -1e30
runoff_raw = np.zeros((Mp,Lp))
runoff = np.zeros((Mp,Lp))
rspread = 6
# create runoff file
#runoff_file = 'runoff_CI_daitren_inter_annual_2002-2004.nc'
runoff_file = 'CI_runoff.nc'
nc = netCDF.Dataset(runoff_file, 'w', format='NETCDF3_64BIT')
nc.Description = 'Hill & Beamer monthly climatology river discharge'
nc.Author = 'make_runoff_clim.py'
nc.Created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nc.title = 'Hill & Beamer river discharge'
# create dimensions and variables
nc.createDimension('xi_rho', np.size(grd.hgrid.mask_rho,1))
nc.createDimension('eta_rho', np.size(grd.hgrid.mask_rho,0))
nc.createDimension('runoff_time', (365))
nc.createVariable('lon_rho', 'f8', ('eta_rho', 'xi_rho'))
nc.variables['lon_rho'].long_name = 'longitude of RHO-points'
nc.variables['lon_rho'].units = 'degree_east'
nc.variables['lon_rho'].field = 'lon_rho, scalar'
nc.variables['lon_rho'][:] = grd.hgrid.lon_rho
nc.createVariable('lat_rho', 'f8', ('eta_rho', 'xi_rho'))
nc.variables['lat_rho'].long_name = 'latitude of RHO-points'
nc.variables['lat_rho'].units = 'degree_north'
nc.variables['lat_rho'].field = 'lat_rho, scalar'
nc.variables['lat_rho'][:] = grd.hgrid.lat_rho
nc.createVariable('runoff_time', 'f8', ('runoff_time'))
nc.variables['runoff_time'].long_name = 'time'
nc.variables['runoff_time'].units = 'days since 1900-01-01 00:00:00'
nc.variables['runoff_time'].cycle_length = 365.25
nc.createVariable('Runoff_raw', 'f8', ('runoff_time', 'eta_rho', 'xi_rho'))
nc.variables['Runoff_raw'].long_name = 'Hill_Beamer River Runoff raw'
nc.variables['Runoff_raw'].missing_value = str(spval)
nc.variables['Runoff_raw'].units = 'kg/s/m^2'
nc.createVariable('Runoff', 'f8', ('runoff_time', 'eta_rho', 'xi_rho'))
nc.variables['Runoff'].long_name = 'Hill_Beamer River Runoff'
nc.variables['Runoff'].missing_value = str(spval)
nc.variables['Runoff'].units = 'kg/s/m^2'
# get littoral (here just 1 cell wide, no diagonals)
width = 1
idx = []
idy = []
maskl = grd.hgrid.mask_rho.copy()
for w in range(width):
lit = pyroms_toolbox.get_littoral2(maskl)
idx.extend(lit[0])
idy.extend(lit[1])
maskl[lit] = 0
littoral_idx = (np.array(idx), np.array(idy))
maskl = np.zeros(grd.hgrid.mask_rho.shape)
maskl[littoral_idx] = 1
mask_idx = np.where(grd.hgrid.mask_rho == 0)
nct=0
# Do January first
#for t in range(nt):
for t in range(nt-243,nt):
flow = np.sum(data[t,280:600,160:460])
print nct+1, 'Remapping runoff for time %f' %time[t]
# print 'Remapping runoff for time %f' %time[nct]
# conservative horizontal interpolation using scrip
runoff_raw = pyroms.remapping.remap(data[t,:,:], wts_file, \
spval=spval)
# Scale runoff to match incoming in Cook Inlet
nflow = np.sum(runoff_raw)
runoff_raw = runoff_raw*flow/nflow
idx = np.where(runoff_raw != 0)
runoff = pyroms_toolbox.move_runoff(runoff_raw, \
np.array(idx).T + 1, np.array(littoral_idx).T + 1, maskl, \
grd.hgrid.x_rho, grd.hgrid.y_rho, grd.hgrid.dx, grd.hgrid.dy)
# write data in destination file
nc.variables['Runoff'][nct] = runoff
nc.variables['Runoff_raw'][nct] = runoff_raw
nc.variables['runoff_time'][nct] = nct+1
# HACK nc.variables['runoff_time'][nct] = time[nct]
if t==180:
print 'Sum 2', np.sum(runoff_raw)
print 'Sum 3', np.sum(runoff)
if nct==180:
print 'Sum 2 new 180', np.sum(runoff_raw)
print 'Sum 3 new 180', np.sum(runoff)
nct = nct + 1
# Get rest of year
for t in range(nt-243):
flow = np.sum(data[t,280:600,160:460])
print nct+1, 'Remapping runoff for time %f' %time[t]
# conservative horizontal interpolation using scrip
runoff_raw = pyroms.remapping.remap(data[t,:,:], wts_file, \
spval=spval)
# Scale runoff to match incoming in Cook Inlet
nflow = np.sum(runoff_raw)
runoff_raw = runoff_raw*flow/nflow
idx = np.where(runoff_raw != 0)
runoff = pyroms_toolbox.move_runoff(runoff_raw, \
np.array(idx).T + 1, np.array(littoral_idx).T + 1, maskl, \
grd.hgrid.x_rho, grd.hgrid.y_rho, grd.hgrid.dx, grd.hgrid.dy)
# write data in destination file
nc.variables['Runoff'][nct] = runoff
nc.variables['Runoff_raw'][nct] = runoff_raw
nc.variables['runoff_time'][nct] = nct+1
# HACK nc.variables['runoff_time'][nct] = time[nct]
nct = nct + 1
if t==180:
print 'Sum 2', np.sum(runoff_raw)
print 'Sum 3', np.sum(runoff)
# close netcdf file
nc.close()
| dcherian/pyroms | examples/rivers/make_runoff_clim.py | Python | bsd-3-clause | 5,338 | [
"NetCDF"
] | 3f4b04c3e5d6d885105e838f29287d97f7f43df5421288eb95cbc1c71e699494 |
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by Jorge More' et al, available from NETLIB
http://www.netlib.org/
The algorithm is described here http://cds.cern.ch/record/126569
Craig Markwardt converted the FORTRAN code to IDL.
The information for the IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONS can be found on my WEB PAGE: http://purl.com/net/mpfit
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
UPDATED VERSIONS can be found at http://cars.uchicago.edu/software/python/
Sergey Koposov converted the Mark's Python version from Numeric to Numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
UPDATED VERSIONS can be found at https://code.google.com/p/astrolibpy/
Michele Cappellari (Astrophysics, University of Oxford) made it compatible
with Python 3 and fixed major instabilities with degenrate Jacobians
already present since the original MINPACK-1 Fortran code.
UPDATED VERSIONS can be found at http://purl.org/cappellari/software
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It can also return
an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac is None then partial derivatives should not be computed.
# It will always be None if MPFIT is called with default flag.
model = F(x, p)
resid = (y - model)/err
return resid
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return (y - model)/err, pderiv
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
for i in range(5)]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import np.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
- Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
- Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
- Converted from Numeric to numpy (Sergey Koposov, July 2008)
MODIFICATION HISTORY OF THIS FORKED VERSION:
V1.0.0: Included a key modification for mge_fit_sectors.
Michele Cappellari, Oxford, 8 February 2014
V1.1.0: Support both Python 2.6/2.7 and Python 3. MC, Oxford, 25 May 2014
V1.1.1: Removed Scipy dependency. MC, Oxford, 13 August 2014
V1.1.2: Replaced np.rank function with ndim attribute to avoid
deprecation warning in Numpy 1.9. MC, Utah, 9 September 2014
V1.2.0: Fixed *major* instabilities for degenerate Jacobians, especially
dramatic in double precision, by introducing thresholds to avoid
divisions by nearly zero diagonal elements.
- Made machar() always return constants for single precison numbers.
- Consequently removed now-unnecessary change introduced in V1.0.0.
- Removed requirement for user function to return `status` for consistency
with most alternative optimization software.
- Various testing and code simplifications.
MC, Oxford, 22 May 2017
V1.2.1: Altough I intentionally do not support outdated Numpy versions, I
fixed a small incompatibility with Numpy 1.11 due to changes in np.full.
This was reported by Russell Smith (Univ. of Durham).
MC, Oxford, 16 November 2017
V1.2.2: Dropped legacy Python 2.7 support. MC, Oxford, 12 May 2018
"""
import numpy as np
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
###############################################################################
def norm(x):
""" Euclidean norm """
return np.sqrt(x @ x)
###############################################################################
class mpfit:
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1e-8, xtol=1e-8, gtol=1e-8, damp=0., maxiter=200,
factor=100., nprint=1, iterfunct='default', iterkw={},
nocovar=False, rescale=False, autoderivative=True,
quiet=False, diag=None, epsfcn=None):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof = 0
if fcn is None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if not isinstance(parinfo, list):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if not isinstance(parinfo[0], dict):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
# Make sure parameters are numpy arrays
xall = np.asarray(xall, dtype=float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.ptied = np.asarray([a.strip() for a in ptied])
self.qanytied = np.any(self.ptied != '')
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=False, n=npar)
pfixed = (pfixed == 1) | (ptied != '') # Tied parameters are also fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if np.any(qmin & qmax & (maxstep < minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = qmin | qmax
qminmax = wh.sum()
# Finish up the free parameters
ifree = np.nonzero(pfixed != 1)[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[False, False], n=npar)
limited = limited == 1 # Ensure this is boolean
limits = self.parinfo(parinfo, 'limits', default=[0., 0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if np.any((limited[:, 0] & (xall < limits[:, 0]))
| (limited[:, 1] & (xall > limits[:, 1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if np.any((limited[:, 0] & limited[:, 1])
& (limits[:, 0] >= limits[:, 1]) & (pfixed == False)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = limited[ifree, 1]
qllim = limited[ifree, 0]
ulim = limits[ifree, 1]
llim = limits[ifree, 0]
if np.any(qulim | qllim):
qanylim = True
else:
qanylim = False
else:
# Fill in local variables with dummy values
qulim = np.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if diag is None:
return
if np.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
self.machar = machar()
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m - nfree
self.fnorm = norm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter - 1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = max(len(fvec) - len(x), 0)
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet, dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if not np.array_equal(xnew0, self.params):
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn, autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = np.flatnonzero(qllim & (x == llim))
nlpeg = len(whlpeg)
whupeg = np.flatnonzero(qulim & (x == ulim))
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for wh in whlpeg:
sum0 = np.sum(fvec * fjac[:, wh])
if sum0 > 0:
fjac[:, wh] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for wh in whupeg:
sum0 = np.sum(fvec * fjac[:, wh])
if sum0 < 0:
fjac[:, wh] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (not rescale) or (diag is None):
diag = wa2.copy()
# Introduced threshold (Michele Cappellari, 19 May 2017)
thresh = np.max(np.abs(diag)) * self.machar.machep
wh = np.abs(diag) < thresh
diag[wh] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = norm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wa4[j:] -= fj * np.sum(fj*wa4[j:]) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[:n, :n]
fjac = fjac[:, ipvt]
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = np.sum(fjac[:j+1, j]*qtf[:j+1])/self.fnorm
gnorm = max(gnorm, np.abs(sum0/wa2[l]))
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if not rescale:
diag = np.maximum(wa2, diag)
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = np.maximum(wa1[whlpeg], 0)
if nupeg > 0:
wa1[whupeg] = np.minimum(wa1[whupeg], 0)
dwa1 = np.abs(wa1) > machep
whl = dwa1 & qllim & (x + wa1 < llim)
if np.any(whl):
t = (llim[whl] - x[whl]) / wa1[whl]
alpha = min(alpha, np.min(t))
whu = dwa1 & qulim & (x + wa1 > ulim)
if np.any(whu):
t = (ulim[whu] - x[whu]) / wa1[whu]
alpha = min(alpha, np.min(t))
# Obey any max step values.
if qminmax:
nwa1 = wa1*alpha
whmax = qmax & (maxstep > 0)
if np.any(whmax):
mrat = np.max(np.abs(nwa1[whmax]/maxstep[whmax]))
if mrat > 1:
alpha /= mrat
# Scale the resulting vector
wa1 *= alpha
wa2 = x + wa1
# The previous step should be feasible by design, but this
# may not be true to machine precision. If any value is
# still outside the box, we put it at the boundary.
wh = qulim & (wa2 > ulim)
wa2[wh] = ulim[wh]
wh = qllim & (wa2 < llim)
wa2[wh] = llim[wh]
wa3 = diag*wa1
pnorm = norm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = min(delta, pnorm)
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = norm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = 1 - (fnorm1/self.fnorm)**2
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[:j+1] += fjac[:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = alpha*norm(wa3)/self.fnorm
temp2 = np.sqrt(alpha*par)*pnorm/self.fnorm
prered = temp1**2 + 2*temp2**2
dirder = -(temp1**2 + temp2**2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = actred/prered
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = 0.5
else:
temp = 0.5*dirder/(dirder + 0.5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*min(delta, 10*pnorm)
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = 2*pnorm
par = 0.5*par
# Test for successful iteration
if ratio >= 0.0001: # Successful iteration
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = norm(wa2)
self.fnorm = fnorm1
self.niter += 1
# Tests for convergence
if (np.abs(actred) <= ftol) and (prered <= ftol) \
and (ratio <= 2):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (np.abs(actred) <= ftol) and (prered <= ftol) \
and (ratio <= 2) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (np.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~np.all(np.isfinite(wa1) & np.isfinite(wa2) & \
np.isfinite(x)) or ~np.isfinite(ratio):
self.errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
break
if self.status != 0:
break;
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = norm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = max(self.fnorm, fnorm1)
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[:n,:n], ipvt[:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = np.zeros([nn, nn])
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = np.zeros(nn)
d = np.diagonal(self.covar)
wh = np.nonzero(d >= 0)[0]
if len(wh) > 0:
self.perror[wh] = np.sqrt(d[wh])
return
###############################################################################
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
}.__str__()
###############################################################################
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = norm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print("Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof))
for i in range(nprint):
if (parinfo is not None) and ('parname' in parinfo[i]):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and ('mpprint' in parinfo[i]):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print(p + (pformat % x[i]) + ' ')
return 0
###############################################################################
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if parinfo is None and n == 0:
values = default
return values
if key in parinfo[0]:
values = np.array([a[key] for a in parinfo])
else: # The extra dtype below is only needed in Numpy < 1.12
values = np.full(len(parinfo), default, dtype=np.array(default).dtype)
return values
###############################################################################
# Call user function or procedure, with derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev += 1
f = fcn(x, fjac=fjac, **functkw)
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
if self.damp > 0:
f = np.tanh(f/self.damp)
return [0, f]
###############################################################################
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None,
dside=None, epsfcn=None, autoderivative=1, functkw=None,
xall=None, ifree=None, dstep=None):
m = len(fvec)
n = len(x)
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
eps = np.sqrt(max(epsfcn, machep))
if xall is None:
xall = x
if ifree is None:
ifree = np.arange(len(xall))
if step is None:
step = np.full_like(x, eps)
nall = len(xall)
# Compute analytical derivative if requested
if autoderivative == 0:
fjac = np.zeros(nall)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print('ERROR: Derivative matrix was not computed properly.')
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = np.zeros([m, n])
h = eps*np.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = stepi > 0
if np.any(wh):
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = dstepi > 0
if np.any(wh):
h[wh] = np.abs(dstepi[wh]*x[wh])
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
wh = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
wh |= (ulimited != 0) & (x > ulimit - h)
if np.any(wh):
h[wh] = -h[wh]
# Loop through parameters, computing the derivative for each.
for j in range(n):
xp = xall.copy()
xp[ifree[j]] += h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if -1 <= dside[ifree[j]] <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
fjac[:,j] = (fp - fvec)/h[j]
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
fjac[:,j] = (fp - fm)/(2*h[j])
return fjac
###############################################################################
#
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = np.zeros(n)
for j in range(n):
acnorm[j] = norm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = np.arange(n)
# Reduce a to r with householder transformations
minmn = np.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
kmax = np.argmax(rdiag[j:])
kmax += j
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
ipvt[j], ipvt[kmax] = ipvt[kmax], ipvt[j]
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = norm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj /= ajnorm
ajj[0] += 1
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
if a[j,lj] != 0:
a[j:,lk] -= ajj * np.sum(a[j:,lk]*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = a[j,lk]/rdiag[k]
rdiag[k] *= np.sqrt(max(1 - temp**2, 0))
temp = rdiag[k]/wa[k]
if (0.05*temp**2) <= machep:
rdiag[k] = norm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
###############################################################################
#
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
sz = r.shape
m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = np.diagonal(r).copy()
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if np.abs(r[k,k]) < np.abs(sdiag[k]):
cotan = r[k,k]/sdiag[k]
sine = 0.5/np.sqrt(0.25 + 0.25*cotan**2)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/np.sqrt(0.25 + 0.25*tang**2)
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = cosine*sdiag[k+1:n] - sine*r[k+1:n,k]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
# Introduced threshold (Michele Cappellari, 19 May 2017)
thresh = np.max(np.abs(sdiag)) * self.machar.machep
wh = np.flatnonzero(np.abs(sdiag) < thresh)
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] /= sdiag[nsing-1] # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = np.sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j] - sum0)/sdiag[j]
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
###############################################################################
#
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
thresh = np.max(np.abs(np.diagonal(r))) * machep
wh = np.flatnonzero(np.abs(np.diagonal(r)) < thresh)
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] /= r[j,j]
if j-1 >= 0:
wa1[:j] -= r[:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = norm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] /= r[0,0] # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = np.sum(r[:j,j]*wa1[:j])
wa1[j] = (wa1[j] - sum0)/r[j,j]
temp = norm(wa1)
parl = fp/(delta*temp**2)
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = np.sum(r[:j+1,j]*qtb[:j+1])
wa1[j] = sum0/diag[ipvt[j]]
gnorm = norm(wa1)
paru = gnorm/delta
if paru == 0:
paru = dwarf/min(delta, 0.1)
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = max(par, parl)
par = min(par, paru)
if par == 0:
par = gnorm/dxnorm
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = max(dwarf, paru*0.001)
temp = np.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = norm(wa2)
temp = fp
fp = dxnorm - delta
if (np.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] /= sdiag[j]
wa1[j+1:n] -= r[j+1:n,j]*wa1[j]
wa1[n-1] /= sdiag[n-1] # Degenerate case
temp = norm(wa1)
parc = fp/(delta*temp**2)
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = max(parl, par)
if fp < 0:
paru = min(paru, par)
# Compute an improved estimate for par
par = max(parl, par + parc)
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if ptied is None:
return
for i, pti in enumerate(ptied):
if pti != '':
exec('p[' + str(i) + '] = ' + pti)
return p
###############################################################################
#
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1e-14):
if rr.ndim != 2:
print('ERROR: r must be a two-dimensional matrix')
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print('ERROR: r must be a square matrix')
return -1
if ipvt is None:
ipvt = np.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * np.abs(r[0,0])
for k in range(n):
if np.abs(r[k,k]) <= tolr:
break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[:j+1,k] -= temp*r[:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[:j+1,j] += temp*r[:j+1,k]
temp = r[k,k]
r[:k+1,k] *= temp
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = np.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[:j+1,j] = r[j,:j+1]
r[j,j] = wa[j]
return r
###############################################################################
class machar:
def __init__(self):
info = np.finfo(np.float32)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = np.log(self.maxnum)
self.minlog = np.log(self.minnum)
self.rdwarf = np.sqrt(self.minnum*1.5) * 10
self.rgiant = np.sqrt(self.maxnum) * 0.1
###############################################################################
| tomevans/wfc3 | mpfit.UNSTABLE.py | Python | mit | 90,154 | [
"Gaussian"
] | 1750bf85e8dd378219c9567914a6e7d8a41d1f41d38fe2d500463294a6dba27b |
#!/usr/bin/env python
# This uses jobTree (https://github.com/benedictpaten/jobTree) to run analysis on the CNAVG output.
from jobTree.scriptTree.target import Target
from jobTree.scriptTree.stack import Stack
from optparse import OptionGroup
from optparse import OptionParser
from sonLib.bioio import logger
from sonLib.bioio import system
import subprocess, os
import argparse
import numpy as np
import pysam
import re, glob
import cPickle as pickle
from cnavgpost.mergehistories.score_edges_within_pevents import *
from cnavgpost.mergehistories.score_and_link_cycles import *
import cnavgpost.mergehistories.event_cycles_module as histseg
import cnavgpost.mergehistories.create_pevnts_file_jobtree as pevntsjobtree
import cnavgpost.genehistory.pick_and_label_best_seghists as pick_and_label_best_seghists
import cnavgpost.genehistory.make_seghists_from_edges as make_seghists_from_edges
import cnavgpost.genehistory.seghists_to_gene_orders as seghists_to_gene_orders
import cnavgpost.diagnostics.mcmc_mixing_analysis_jobtree as mcmcjobtree
import cnavgpost.simulations.analyze_simulation as analyze_simulation
import cnavgpost.genehistory.annotate_events as annotate_events
#======= Main Setup ===================
class Setup(Target):
def __init__(self, options):
Target.__init__(self)
self.options=options
self.events=[]
self.totalp=0
self.edges=[]
self.historyScores=[]
def run(self):
self.logToMaster("Setting up...")
opts=self.options
histseg.Global_BINWIDTH=opts.binwidth
sampleid=opts.sampleid
outputdir=opts.outputdir
subprocess.call("mkdir -p %s" % outputdir, shell=True)
historystatsfile=os.path.join(outputdir, "historystats.txt")
if not os.path.exists(historystatsfile):
self.logToMaster("Creating historystats.txt...%s" % historystatsfile)
logger.info("historystatsfile: %s" % historystatsfile)
pevntsjobtree.CombineHistoryStatsfiles(opts, historystatsfile).run()
self.historyScores=np.loadtxt(historystatsfile, dtype=int)
self.totalp=histseg.compute_likelihood_histories(self.historyScores[:,0], self.historyScores)
logger.info("Global_BINWIDTH: %d" % histseg.Global_BINWIDTH)
logger.info("totalp is %s" % str(self.totalp))
pevntsfile=os.path.join(outputdir, opts.sampleid + ".pevnts")
if opts.pevnts or not os.path.exists(pevntsfile):
self.logToMaster("Creating pevntsfile...%s" % pevntsfile)
logger.info("pevntsfile: %s" % pevntsfile)
self.addChildTarget(pevntsjobtree.CreatePevntsFile(pevntsfile, self.historyScores, self.totalp, opts))
self.setFollowOnTarget(DoAnalysisOfMergedEvents(opts))
class DoAnalysisOfMergedEvents(Target):
def __init__(self, options):
Target.__init__(self)
self.options=options
self.events=[]
self.totalp=0
self.edges=[]
self.historyScores=[]
def run(self):
self.logToMaster("Setting up...")
opts=self.options
histseg.Global_BINWIDTH=opts.binwidth
sampleid=opts.sampleid
outputdir=opts.outputdir
historystatsfile=os.path.join(outputdir, "historystats.txt")
if not os.path.exists(historystatsfile):
self.logToMaster("Creating historystats.txt...%s" % historystatsfile)
logger.info("historystatsfile: %s" % historystatsfile)
pevntsjobtree.CombineHistoryStatsfiles(opts, historystatsfile).run()
self.historyScores=np.loadtxt(historystatsfile, dtype=int)
self.totalp=histseg.compute_likelihood_histories(self.historyScores[:,0], self.historyScores)
#check that the *.pevnts file exists.
pevntsfile=os.path.join(outputdir, opts.sampleid + ".pevnts")
if not os.path.exists(pevntsfile):
sys.exit("The required %s file does not exist." % pevntsfile)
pedgesfile=os.path.join(outputdir, sampleid + ".pedgs")
if opts.pedges or not os.path.exists(pedgesfile):
self.logToMaster("Creating pedgesfile...%s" % pedgesfile)
logger.info("pedgesfile: %s" % pedgesfile)
CreatePedgesFile(pickle.load(open(pevntsfile, 'rb')), pedgesfile, self.historyScores, self.totalp, False).run()
seghistfile=os.path.join(outputdir, "seghists.txt")
if opts.sgh or not os.path.exists(seghistfile):
self.logToMaster("Creating seghists file ... %s" % seghistfile)
make_seghists_from_edges.main(pickle.load(open(pedgesfile, 'rb')), self.historyScores, seghistfile)
# label the seghists if an annotation file is given
if opts.bedfile:
labeledfn=os.path.join(outputdir, "seghists.labeled")
if not os.path.exists(labeledfn):
pick_and_label_best_seghists.main(seghistfile, opts.bedfile, True, labeledfn)
geneordfn=os.path.join(outputdir, "geneords.txt")
if opts.geneords or not os.path.exists(geneordfn):
seghists_to_gene_orders.main(seghistfile, opts.bedfile, geneordfn)
mrgpeventsfile=os.path.join(outputdir, sampleid + ".pmevnts")
if not os.path.exists(mrgpeventsfile):
self.logToMaster("Creating mpevnts...%s" % mrgpeventsfile)
logger.info("mrgpeventsfile: %s" % mrgpeventsfile)
CreateMergedEventsFile(pickle.load(open(pevntsfile, 'rb')), mrgpeventsfile, self.historyScores).run()
mrgpedgesfile=os.path.join(outputdir, sampleid + ".pmedgs")
if not os.path.exists(mrgpedgesfile):
self.logToMaster("Creating mrgpegesfile...%s" % mrgpedgesfile)
logger.info("mrgpedgesfile: %s" % mrgpedgesfile)
CreateMergedEventsFile(pickle.load(open(pedgesfile, 'rb')), mrgpedgesfile, self.historyScores).run()
breaksfile=os.path.join(outputdir, "breakpoints.txt")
if not os.path.exists(breaksfile):
self.logToMaster("Creating breaksfile...%s" % breaksfile)
breaklocs=histseg.get_breakpoints(pickle.load(open(pedgesfile, 'rb')), opts.trueID)
breaklocs2=histseg.get_breakpoints(pickle.load(open(mrgpedgesfile, 'rb')), opts.trueID)
breaksfh=open(breaksfile, 'w')
for loc in sorted(breaklocs.keys()):
(n, t) = breaklocs[loc]
(n2, t2) = breaklocs2[loc]
breaksfh.write("%s\t%d\t%d\t%d\t%d\n" % (loc, n, t, n2, t2))
# Creating links is no longer an option.
#linksfile =os.path.join(outputdir, sampleid +".links")
#if opts.links and not os.path.exists(linksfile):
# self.logToMaster("Creating linksfile...%s" % linksfile)
# logger.info("linksfile: %s" % linksfile)
# self.addChildTarget(CreateLinksFile(pevntsfile, linksfile, self.totalp))
#Annotating Events is no longer an option. Seghists are annotated instead.
#annotationfile=os.path.join(outputdir, "evnts.ann")
#only create the annotations file here if we aren't doing gene ranking. Otherwise the gene rank option will create the annotation file for itself.
#if opts.ann and not opts.generank and not os.path.exists(annotationsfile):
# logger.info("annotationfile: %s" % annotationfile)
# if not self.events:
# self.events=pickle.load(open(pevntsfile, 'rb'))
# self.addChildTarget(CreateAnnotationFile(self.events, opts.tabixfile, annotationfile))
# generank isn't an option - geneords is done instead using seghists.
#generankfile=os.path.join(outputdir, "generanks.txt")
# annotation file comes before generankfile (gene ranking depends on annotations.)
#if opts.generank and not os.path.exists(generankfile):
# self.logToMaster("Creating generankfile: %s" % generankfile)
# logger.info("generankfile: %s" % generankfile)
# if not self.events:
# self.events=pickle.load(open(pevntsfile, 'rb'))
# self.historyScores=np.loadtxt(historystatsfile, dtype=int)
if opts.mcmcmix:
self.logToMaster("Setting up MCMC analysis")
mcmcdir=os.path.join(outputdir, "mcmcdata")
mcmcdat=os.path.join(mcmcdir, "edge_counts.dat")
mcmcdir=os.path.join(outputdir, "mcmcdata")
mcmcdat=os.path.join(mcmcdir, "edge_counts.dat")
if not os.path.exists(mcmcdir) or not os.path.exists(mcmcdat):
subprocess.call("mkdir -p %s" % mcmcdir, shell=True)
opts.pevnts=pevntsfile
opts.pedges=pedgesfile
self.addChildTarget(mcmcjobtree.SetupMCMC(opts, mcmcdir))
if opts.simulation:
self.logToMaster("Setting up Simulation analysis")
simoutput=os.path.join(outputdir, "events.stats")
if ((not os.path.exists(simoutput)) or (os.path.getsize(simoutput) == 0)):
self.addChildTarget(SimAnalysisJob(pevntsfile, opts.trueID, self.historyScores, "events", outputdir, opts.binwidth))
simoutput2=os.path.join(outputdir, "edges.stats")
if ((not os.path.exists(simoutput2)) or (os.path.getsize(simoutput2) == 0)):
self.addChildTarget(SimAnalysisJob(pedgesfile, opts.trueID, self.historyScores, "edges", outputdir, opts.binwidth))
simoutput3=os.path.join(outputdir, "mrgedges.stats")
if ((not os.path.exists(simoutput3)) or (os.path.getsize(simoutput3) == 0)):
self.addChildTarget(SimAnalysisJob(mrgpedgesfile, opts.trueID, self.historyScores, "mrgedges", outputdir, opts.binwidth))
class SimAnalysisJob(Target):
def __init__(self, peventsfile, trueID, historyScores, outname, outputdir, binwidth):
Target.__init__(self)
self.events=pickle.load(open(peventsfile, 'rb'))
self.outname=outname
self.trueID = trueID
self.historyScores=historyScores
self.outputdir=outputdir
self.binwidth=binwidth
def run(self):
datout=open(os.path.join(self.outputdir, "%s.dat" % self.outname), 'w')
statout=open(os.path.join(self.outputdir, "%s.stats" % self.outname), 'w')
#breaksfile=open(os.path.join(self.outputdir, "breakpoints.txt"), 'w')
breaksfile=""
histseg.Global_BINWIDTH=self.binwidth
analyze_simulation.analyze_simulation(self.events, self.trueID, self.historyScores, datout, statout, breaksfile, self.outputdir)
class CreateLinksFile(Target):
def __init__(self, pevntsfile, linksfile, totalp):
Target.__init__(self)
self.pevntsfile=pevntsfile
self.linksfile=linksfile
self.totalp=totalp
def run(self):
self.logToMaster("CreateLinksFile\n")
opts=self.options
myargs=['--inpickle', self.pevntsfile, '--links', self.linksfile, '--totalp', str(self.totalp)]
parser = argparse.ArgumentParser()
add_event_link_options(parser)
args=parser.parse_args(myargs)
score_and_link_cycles(args)
class CreatePedgesFile(Target):
def __init__(self, events, pedgesfile, historyScores, totalp, ignore_cn):
Target.__init__(self)
self.events=events
self.pedgesfile=pedgesfile
self.historyScores=historyScores
self.totalp=totalp
self.ignore_cn=ignore_cn
def run(self):
self.logToMaster("CreatePedgesFile, ignore_cn: %s\n" % self.ignore_cn)
edges=score_edges_within_pevents(self.events, self.historyScores, self.totalp, ignore_cn=self.ignore_cn)
pickle.dump(edges, open(self.pedgesfile, 'wb'), pickle.HIGHEST_PROTOCOL)
class CreateMergedEventsFile(Target):
def __init__(self, events, outpfn, historyScores):
Target.__init__(self)
self.events=events
self.outpfn=outpfn
self.historyScores=historyScores
def run(self):
self.logToMaster("CreateMergedEventsFile\n")
events=self.events
for e in events:
e.unpack()
mrged=histseg.merge_events_by_type(events, self.historyScores)
for e in mrged:
e.trim()
pickle.dump(mrged, open(self.outpfn, 'wb'), pickle.HIGHEST_PROTOCOL)
class CreateAnnotationFile(Target):
def __init__(self, evnts, tabixfile, annotationfile):
Target.__init__(self)
self.evnts=evnts
self.tabixfile=tabixfile
self.annfile=annotationfile
def run(self):
self.logToMaster("CreateAnnotationFile\n")
outputfh=open(self.annfile, 'w')
allevents=self.evnts
mytabix=pysam.Tabixfile(self.tabixfile, 'r')
annotate_events.main(allevents, mytabix, outputfh)
def add_analysis_options(parser):
group=OptionGroup(parser, "CNAVG Post Analysis Options")
group.add_option("--sampleid", dest="sampleid", help="the sampleid")
group.add_option("--cnavgout", dest="cnavgout", help="The CN-AVG output directory for the sample")
group.add_option("--outputdir", dest="outputdir", help="The output directory for the analysis")
group.add_option('--binwidth', dest='binwidth', help='the multiplier between history ids of independent runs', default=histseg.Global_BINWIDTH, type="int")
group.add_option('--pevnts', dest="pevnts", default=False, action="store_true", help="Rewrite .pevnts file. This file by default is created if it doesn't exist.")
group.add_option('--pedges', dest="pedges", default=False, action="store_true", help="Rewrite .pedges file. This file is created by default if it doesn't exist.")
group.add_option('--sgh', dest="sgh", default=False, action="store_true", help="Rewrite .seghists.txt file. This file is created by default if it doesn't exist.")
group.add_option('--bedfile', dest="bedfile", help="The bed file containing gene annotations you are interested in. If given, the seghists will be annotated using these genes.")
group.add_option('--geneords', dest="geneords", default=False, action="store_true", help="Rewrite geneords.txt file. This is created by default if given a bedfile.")
#group.add_option('--tabixfile', dest="tabixfile", help="The tabix file containing gene annotations you are interested in. If given, the seghists will be annotated using these genes.")
#group.add_option('--generank', dest="generank", default=False, action="store_true", help="Create .gnrnk file if it doesn't exist.")
#group.add_option('--links', dest="links", default=False, action="store_true", help="create or rewrite .links file")
# group.add_option('--ann', dest="ann", default=False, action="store_true", help="create .annotation file if it doesn't exist.")
group.add_option('--mcmcmix', dest="mcmcmix", default=False, action="store_true", help="do analysis to look at the mixing across and within runs.")
group.add_option('--simulation', dest="simulation", default=False, action="store_true", help="do simulation analysis.")
group.add_option('--trueID', dest="trueID", default=0, help="The history id of the true simulated history.", type="int")
parser.add_option_group(group)
def main():
parser = OptionParser(usage = "cn-avg_post_analysis_jobtree.py --cnavgout CN-AVG_outputdir --sampleid Sampleid --jobTree jobtreedir")
add_analysis_options(parser)
mcmcjobtree.add_mcmc_options(parser)
Stack.addJobTreeOptions(parser)
options, args = parser.parse_args()
i = Stack(Setup(options)).startJobTree(options)
if i:
raise RuntimeError("The jobtree contains %d failed jobs.\n" % i)
if __name__ == "__main__":
from cnavg_post_analysis_jobtree import *
main()
| dzerbino/cn-avg | paper_figures/scripts/cnavg_post_analysis_jobtree.py | Python | bsd-3-clause | 14,248 | [
"pysam"
] | 207d837a5e6cd3974144450926daff3221ed5bbaabed739b23e9aa81a7532d9b |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Real NVP bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template as template_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"RealNVP",
"real_nvp_default_template"
]
class RealNVP(bijector.Bijector):
"""RealNVP "affine coupling layer" for vector-valued events.
Real NVP models a normalizing flow on a `D`-dimensional distribution via a
single `D-d`-dimensional conditional distribution [(Dinh et al., 2017)][1]:
`y[d:D] = y[d:D] * math_ops.exp(log_scale_fn(y[d:D])) + shift_fn(y[d:D])`
`y[0:d] = x[0:d]`
The last `D-d` units are scaled and shifted based on the first `d` units only,
while the first `d` units are 'masked' and left unchanged. Real NVP's
`shift_and_log_scale_fn` computes vector-valued quantities. For
scale-and-shift transforms that do not depend on any masked units, i.e.
`d=0`, use the `tfb.Affine` bijector with learned parameters instead.
Masking is currently only supported for base distributions with
`event_ndims=1`. For more sophisticated masking schemes like checkerboard or
channel-wise masking [(Papamakarios et al., 2016)[4], use the `tfb.Permute`
bijector to re-order desired masked units into the first `d` units. For base
distributions with `event_ndims > 1`, use the `tfb.Reshape` bijector to
flatten the event shape.
Recall that the MAF bijector [(Papamakarios et al., 2016)][4] implements a
normalizing flow via an autoregressive transformation. MAF and IAF have
opposite computational tradeoffs - MAF can train all units in parallel but
must sample units sequentially, while IAF must train units sequentially but
can sample in parallel. In contrast, Real NVP can compute both forward and
inverse computations in parallel. However, the lack of an autoregressive
transformations makes it less expressive on a per-bijector basis.
A "valid" `shift_and_log_scale_fn` must compute each `shift` (aka `loc` or
"mu" in [Papamakarios et al. (2016)][4]) and `log(scale)` (aka "alpha" in
[Papamakarios et al. (2016)][4]) such that each are broadcastable with the
arguments to `forward` and `inverse`, i.e., such that the calculations in
`forward`, `inverse` [below] are possible. For convenience,
`real_nvp_default_nvp` is offered as a possible `shift_and_log_scale_fn`
function.
NICE [(Dinh et al., 2014)][2] is a special case of the Real NVP bijector
which discards the scale transformation, resulting in a constant-time
inverse-log-determinant-Jacobian. To use a NICE bijector instead of Real
NVP, `shift_and_log_scale_fn` should return `(shift, None)`, and
`is_constant_jacobian` should be set to `True` in the `RealNVP` constructor.
Calling `real_nvp_default_template` with `shift_only=True` returns one such
NICE-compatible `shift_and_log_scale_fn`.
Caching: the scalar input depth `D` of the base distribution is not known at
construction time. The first call to any of `forward(x)`, `inverse(x)`,
`inverse_log_det_jacobian(x)`, or `forward_log_det_jacobian(x)` memoizes
`D`, which is re-used in subsequent calls. This shape must be known prior to
graph execution (which is the case if using tf.layers).
#### Example Use
```python
tfd = tf.contrib.distributions
tfb = tfd.bijectors
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
nvp = tfd.TransformedDistribution(
distribution=tfd.MultivariateNormalDiag(loc=[0., 0., 0.])),
bijector=tfb.RealNVP(
num_masked=2,
shift_and_log_scale_fn=tfb.real_nvp_default_template(
hidden_layers=[512, 512])))
x = nvp.sample()
nvp.log_prob(x)
nvp.log_prob(0.)
```
For more examples, see [Jang (2018)][3].
#### References
[1]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation
using Real NVP. In _International Conference on Learning
Representations_, 2017. https://arxiv.org/abs/1605.08803
[2]: Laurent Dinh, David Krueger, and Yoshua Bengio. NICE: Non-linear
Independent Components Estimation. _arXiv preprint arXiv:1410.8516_,
2014. https://arxiv.org/abs/1410.8516
[3]: Eric Jang. Normalizing Flows Tutorial, Part 2: Modern Normalizing Flows.
_Technical Report_, 2018. http://blog.evjang.com/2018/01/nf2.html
[4]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
num_masked,
shift_and_log_scale_fn,
is_constant_jacobian=False,
validate_args=False,
name=None):
"""Creates the Real NVP or NICE bijector.
Args:
num_masked: Python `int` indicating that the first `d` units of the event
should be masked. Must be in the closed interval `[1, D-1]`, where `D`
is the event size of the base distribution.
shift_and_log_scale_fn: Python `callable` which computes `shift` and
`log_scale` from both the forward domain (`x`) and the inverse domain
(`y`). Calculation must respect the "autoregressive property" (see class
docstring). Suggested default
`masked_autoregressive_default_template(hidden_layers=...)`.
Typically the function contains `tf.Variables` and is wrapped using
`tf.make_template`. Returning `None` for either (both) `shift`,
`log_scale` is equivalent to (but more efficient than) returning zero.
is_constant_jacobian: Python `bool`. Default: `False`. When `True` the
implementation assumes `log_scale` does not depend on the forward domain
(`x`) or inverse domain (`y`) values. (No validation is made;
`is_constant_jacobian=False` is always safe but possibly computationally
inefficient.)
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
ValueError: If num_masked < 1.
"""
name = name or "real_nvp"
if num_masked <= 0:
raise ValueError("num_masked must be a positive integer.")
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
self._shift_and_log_scale_fn = shift_and_log_scale_fn
super(RealNVP, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = x.shape.with_rank_at_least(1)[-1].value
if self._input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
if self._num_masked >= self._input_depth:
raise ValueError(
"Number of masked units must be smaller than the event size.")
def _forward(self, x):
self._cache_input_depth(x)
# Performs scale and shift.
x0, x1 = x[:, :self._num_masked], x[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
y1 = x1
if log_scale is not None:
y1 *= math_ops.exp(log_scale)
if shift is not None:
y1 += shift
y = array_ops.concat([x0, y1], axis=-1)
return y
def _inverse(self, y):
self._cache_input_depth(y)
# Performs un-shift and un-scale.
y0, y1 = y[:, :self._num_masked], y[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
x1 = y1
if shift is not None:
x1 -= shift
if log_scale is not None:
x1 *= math_ops.exp(-log_scale)
x = array_ops.concat([y0, x1], axis=-1)
return x
def _inverse_log_det_jacobian(self, y):
self._cache_input_depth(y)
y0 = y[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=y.dtype, name="ildj")
return -math_ops.reduce_sum(log_scale, axis=-1)
def _forward_log_det_jacobian(self, x):
self._cache_input_depth(x)
x0 = x[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=x.dtype, name="fldj")
return math_ops.reduce_sum(log_scale, axis=-1)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def real_nvp_default_template(
hidden_layers,
shift_only=False,
activation=nn_ops.relu,
name=None,
*args,
**kwargs):
"""Build a scale-and-shift function using a multi-layer neural network.
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`
dimensional outputs `loc` ("mu") and `log_scale` ("alpha").
Arguments:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed (i.e. NICE bijector). Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
name: A name for ops managed by this function. Default:
"real_nvp_default_template".
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms ("mu" in
[Papamakarios et al. (2016)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms ("alpha" in
[Papamakarios et al. (2016)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
with ops.name_scope(name, "real_nvp_default_template"):
def _fn(x, output_units):
"""Fully connected MLP parameterized via `real_nvp_template`."""
for units in hidden_layers:
x = layers.dense(
inputs=x,
units=units,
activation=activation,
*args,
**kwargs)
x = layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args,
**kwargs)
if shift_only:
return x, None
shift, log_scale = array_ops.split(x, 2, axis=-1)
return shift, log_scale
return template_ops.make_template(
"real_nvp_default_template", _fn)
| drpngx/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py | Python | apache-2.0 | 12,719 | [
"Gaussian"
] | f4ce03c2bbc5412dcb6e61327c52c65901b6d4304c709aa9378f59806c6def91 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module defines PDEntry, which wraps information (composition and energy)
necessary to create phase diagrams.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2011"
import re
import csv
from monty.json import MontyDecoder
from io import open
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from monty.json import MSONable
from monty.string import unicode2str
class PDEntry(MSONable):
"""
An object encompassing all relevant data for phase diagrams.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
Args:
comp: Composition as a pymatgen.core.structure.Composition
energy: Energy for composition.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula.
attribute: Optional attribute of the entry. This can be used to
specify that the entry is a newly found compound, or to specify a
particular label for the entry, or else ... Used for further
analysis and plotting purposes. An attribute can be anything
but must be MSONable.
"""
def __init__(self, composition, energy, name=None, attribute=None):
self.energy = energy
self.composition = Composition(composition)
self.name = name if name else self.composition.reduced_formula
self.attribute = attribute
@property
def energy_per_atom(self):
"""
Returns the final energy per atom.
"""
return self.energy / self.composition.num_atoms
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.composition.is_element
def __repr__(self):
return "PDEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"composition": self.composition.as_dict(),
"energy": self.energy,
"name": self.name,
"attribute": self.attribute}
@classmethod
def from_dict(cls, d):
return cls(Composition(d["composition"]), d["energy"],
d["name"] if "name" in d else None,
d["attribute"] if "attribute" in d else None)
class GrandPotPDEntry(PDEntry):
"""
A grand potential pd entry object encompassing all relevant data for phase
diagrams. Chemical potentials are given as a element-chemical potential
dict.
Args:
entry: A PDEntry-like object.
chempots: Chemical potential specification as {Element: float}.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula of the original entry.
"""
def __init__(self, entry, chempots, name=None):
comp = entry.composition
self.original_entry = entry
self.original_comp = comp
grandpot = entry.energy - sum([comp[el] * pot
for el, pot in chempots.items()])
self.chempots = chempots
new_comp_map = {el: comp[el] for el in comp.elements
if el not in chempots}
super(GrandPotPDEntry, self).__init__(new_comp_map, grandpot,
entry.name)
self.name = name if name else entry.name
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.original_comp.is_element
def __repr__(self):
chempot_str = " ".join(["mu_%s = %.4f" % (el, mu)
for el, mu in self.chempots.items()])
return "GrandPotPDEntry with original composition " + \
"{}, energy = {:.4f}, {}".format(self.original_entry.composition,
self.original_entry.energy,
chempot_str)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"chempots": {el.symbol: u for el, u in self.chempots.items()},
"name": self.name}
@classmethod
def from_dict(cls, d):
chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
entry = MontyDecoder().process_decoded(d["entry"])
return cls(entry, chempots, d["name"])
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
class PDEntryIO(object):
"""
Utility class to export and import PDEntry to and from csv files, as well
as to and from json.
"""
@staticmethod
def to_csv(filename, entries, latexify_names=False):
"""
Exports PDEntries to a csv
Args:
filename: Filename to write to.
entries: PDEntries to export.
latexify_names: Format entry names to be LaTex compatible,
e.g., Li_{2}O
"""
import csv
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = sorted(list(elements), key=lambda a: a.X)
writer = csv.writer(open(filename, "wb"), delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Name"] + elements + ["Energy"])
for entry in entries:
row = [entry.name if not latexify_names
else re.sub(r"([0-9]+)", r"_{\1}", entry.name)]
row.extend([entry.composition[el] for el in elements])
row.append(entry.energy)
writer.writerow(row)
@staticmethod
def from_csv(filename):
"""
Imports PDEntries from a csv.
Args:
filename: Filename to import from.
Returns:
List of Elements, List of PDEntries
"""
with open(filename, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
entries = list()
header_read = False
for row in reader:
if not header_read:
elements = row[1:(len(row) - 1)]
header_read = True
else:
name = row[0]
energy = float(row[-1])
comp = dict()
for ind in range(1, len(row) - 1):
if float(row[ind]) > 0:
comp[Element(elements[ind - 1])] = float(row[ind])
entries.append(PDEntry(Composition(comp), energy, name))
elements = [Element(el) for el in elements]
return elements, entries
class TransformedPDEntry(PDEntry):
"""
This class repesents a TransformedPDEntry, which allows for a PDEntry to be
transformed to a different composition coordinate space. It is used in the
construction of phase diagrams that do not have elements as the terminal
compositions.
Args:
comp: Transformed composition as a Composition.
energy: Energy for composition.
original_entry: Original entry that this entry arose from.
"""
def __init__(self, comp, original_entry):
super(TransformedPDEntry, self).__init__(comp, original_entry.energy)
self.original_entry = original_entry
self.name = original_entry.name
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
def __repr__(self):
output = ["TransformedPDEntry {}".format(self.composition),
" with original composition {}"
.format(self.original_entry.composition),
", E = {:.4f}".format(self.original_entry.energy)]
return "".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"composition": self.composition}
@classmethod
def from_dict(cls, d):
entry = MontyDecoder().process_decoded(d["entry"])
return cls(d["composition"], entry)
| xhqu1981/pymatgen | pymatgen/phasediagram/entries.py | Python | mit | 9,519 | [
"pymatgen"
] | 3ef961da4198eb9f51cdc14decd539c84a6cffcd1a7303c3153a8b675405ed5f |
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Trinity, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Balazs Hajgato (Vrije Universiteit Brussel)
@author: Robert Qiao (DeepThought HPC Service, Flinders University, Adelaide, Australia)
"""
import glob
import os
import shutil
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.environment import setvar
from easybuild.tools.filetools import apply_regex_substitutions
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_Trinity(EasyBlock):
"""Support for building/installing Trinity."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for Trinity."""
EasyBlock.__init__(self, *args, **kwargs)
self.build_in_installdir = True
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Trinity."""
extra_vars = {
'withsampledata': [False, "Include sample data", CUSTOM],
'bwapluginver': [None, "BWA pugin version", CUSTOM],
'RSEMmod': [False, "Enable RSEMmod", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def butterfly(self):
"""Install procedure for Butterfly."""
self.log.info("Begin Butterfly")
setvar("JAVA_TOOL_OPTIONS", "-Dfile.encoding=UTF8")
dst = os.path.join(self.cfg['start_dir'], 'Butterfly', 'src')
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("Butterfly: failed to change to dst dir %s: %s", dst, err)
cmd = "ant"
run_cmd(cmd)
self.log.info("End Butterfly")
def chrysalis(self, run=True):
"""Install procedure for Chrysalis."""
make_flags = "COMPILER='%s' CPLUSPLUS='%s' CC='%s' " % (os.getenv('CXX'),
os.getenv('CXX'),
os.getenv('CC'))
make_flags += "OMP_FLAGS='%s' OMP_LINK='%s' " % (self.toolchain.get_flag('openmp'),
os.getenv('LIBS'))
make_flags += "OPTIM='-O1' SYS_OPT='-O2 %s' " % self.toolchain.get_flag('optarch')
make_flags += "OPEN_MP=yes UNSUPPORTED=yes DEBUG=no QUIET=yes"
if run:
self.log.info("Begin Chrysalis")
dst = os.path.join(self.cfg['start_dir'], 'Chrysalis')
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("Chrysalis: failed to change to dst dir %s: %s", dst, err)
run_cmd("make clean")
run_cmd("make %s" % make_flags)
self.log.info("End Chrysalis")
else:
return make_flags
def inchworm(self, run=True):
"""Install procedure for Inchworm."""
make_flags = 'CXXFLAGS="%s %s"' % (os.getenv('CXXFLAGS'), self.toolchain.get_flag('openmp'))
version = LooseVersion(self.version)
if version >= LooseVersion('2.0') and version < LooseVersion('3.0'):
make_flags += ' CXX=%s' % os.getenv('CXX')
if run:
self.log.info("Begin Inchworm")
dst = os.path.join(self.cfg['start_dir'], 'Inchworm')
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("Inchworm: failed to change to dst dir %s: %s", dst, err)
run_cmd('./configure --prefix=%s' % dst)
run_cmd("make install %s" % make_flags)
self.log.info("End Inchworm")
else:
return make_flags
def jellyfish(self):
"""use a seperate jellyfish source if it exists, otherwise, just install the bundled jellyfish"""
self.log.debug("begin jellyfish")
self.log.debug("startdir: %s", self.cfg['start_dir'])
cwd = os.getcwd()
glob_pat = os.path.join(self.cfg['start_dir'], "..", "jellyfish-*")
jellyfishdirs = glob.glob(glob_pat)
self.log.debug("glob pattern '%s' yields %s" % (glob_pat, jellyfishdirs))
if len(jellyfishdirs) == 1 and os.path.isdir(jellyfishdirs[0]):
jellyfishdir = jellyfishdirs[0]
# if there is a jellyfish directory
self.log.info("detected jellyfish directory %s, so using this source", jellyfishdir)
orig_jellyfishdir = os.path.join(self.cfg['start_dir'], 'trinity-plugins', 'jellyfish')
try:
# remove original symlink
os.unlink(orig_jellyfishdir)
except OSError as err:
self.log.warning("jellyfish plugin: failed to remove dir %s: %s" % (orig_jellyfishdir, err))
try:
# create new one
os.symlink(jellyfishdir, orig_jellyfishdir)
os.chdir(orig_jellyfishdir)
except OSError as err:
raise EasyBuildError("jellyfish plugin: failed to change dir %s: %s", orig_jellyfishdir, err)
run_cmd('./configure --prefix=%s' % orig_jellyfishdir)
cmd = "make CC='%s' CXX='%s' CFLAGS='%s'" % (os.getenv('CC'), os.getenv('CXX'), os.getenv('CFLAGS'))
run_cmd(cmd)
# the installstep is running the jellyfish script, this is a wrapper that will compile .lib/jellyfish
run_cmd("bin/jellyfish cite")
# return to original dir
try:
os.chdir(cwd)
except OSError:
raise EasyBuildError("jellyfish: Could not return to original dir %s", cwd)
elif jellyfishdirs:
raise EasyBuildError("Found multiple 'jellyfish-*' directories: %s", jellyfishdirs)
else:
self.log.info("no seperate source found for jellyfish, letting Makefile build shipped version")
self.log.debug("end jellyfish")
def kmer(self):
"""Install procedure for kmer (Meryl)."""
self.log.info("Begin Meryl")
dst = os.path.join(self.cfg['start_dir'], 'trinity-plugins', 'kmer')
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("Meryl: failed to change to dst dir %s: %s", dst, err)
cmd = "./configure.sh"
run_cmd(cmd)
cmd = 'make -j 1 CCDEP="%s -MM -MG" CXXDEP="%s -MM -MG"' % (os.getenv('CC'), os.getenv('CXX'))
run_cmd(cmd)
cmd = 'make install'
run_cmd(cmd)
self.log.info("End Meryl")
def trinityplugin(self, plugindir, cc=None):
"""Install procedure for Trinity plugins."""
self.log.info("Begin %s plugin" % plugindir)
dst = os.path.join(self.cfg['start_dir'], 'trinity-plugins', plugindir)
try:
os.chdir(dst)
except OSError as err:
raise EasyBuildError("%s plugin: failed to change to dst dir %s: %s", plugindir, dst, err)
if not cc:
cc = os.getenv('CC')
cmd = "make CC='%s' CXX='%s' CFLAGS='%s'" % (cc, os.getenv('CXX'), os.getenv('CFLAGS'))
run_cmd(cmd)
self.log.info("End %s plugin" % plugindir)
def configure_step(self):
"""No configuration for Trinity."""
pass
def build_step(self):
"""No building for Trinity."""
pass
def install_step(self):
"""Custom install procedure for Trinity."""
version = LooseVersion(self.version)
if version > LooseVersion('2012') and version < LooseVersion('2012-10-05'):
self.inchworm()
self.chrysalis()
self.kmer()
if version < LooseVersion('2.9'):
self.butterfly()
bwapluginver = self.cfg['bwapluginver']
if bwapluginver:
self.trinityplugin('bwa-%s-patched_multi_map' % bwapluginver)
if self.cfg['RSEMmod']:
self.trinityplugin('RSEM-mod', cc=os.getenv('CXX'))
else:
self.jellyfish()
inchworm_flags = self.inchworm(run=False)
chrysalis_flags = self.chrysalis(run=False)
cc = os.getenv('CC')
cxx = os.getenv('CXX')
lib_flags = ""
for lib in ['ncurses', 'zlib']:
libroot = get_software_root(lib)
if libroot:
lib_flags += " -L%s/lib" % libroot
if version >= LooseVersion('2.0') and version < LooseVersion('3.0'):
regex_subs = [
(r'^( INCHWORM_CONFIGURE_FLAGS\s*=\s*).*$', r'\1%s' % inchworm_flags),
(r'^( CHRYSALIS_MAKE_FLAGS\s*=\s*).*$', r'\1%s' % chrysalis_flags),
]
else:
regex_subs = [
(r'^(INCHWORM_CONFIGURE_FLAGS\s*=\s*).*$', r'\1%s' % inchworm_flags),
(r'^(CHRYSALIS_MAKE_FLAGS\s*=\s*).*$', r'\1%s' % chrysalis_flags),
(r'(/rsem && \$\(MAKE\))\s*$',
r'\1 CC=%s CXX="%s %s" CFLAGS_EXTRA="%s"\n' % (cc, cxx, lib_flags, lib_flags)),
(r'(/fastool && \$\(MAKE\))\s*$',
r'\1 CC="%s -std=c99" CFLAGS="%s ${CFLAGS}"\n' % (cc, lib_flags)),
]
apply_regex_substitutions('Makefile', regex_subs)
trinity_compiler = None
comp_fam = self.toolchain.comp_family()
if comp_fam in [toolchain.INTELCOMP]:
trinity_compiler = "intel"
elif comp_fam in [toolchain.GCC]:
trinity_compiler = "gcc"
else:
raise EasyBuildError("Don't know how to set TRINITY_COMPILER for %s compiler", comp_fam)
explicit_make_args = ''
if version >= LooseVersion('2.0') and version < LooseVersion('3.0'):
explicit_make_args = 'all plugins'
cmd = "make TRINITY_COMPILER=%s %s" % (trinity_compiler, explicit_make_args)
run_cmd(cmd)
# butterfly is not included in standard build before v2.9.0
if version < LooseVersion('2.9'):
self.butterfly()
# remove sample data if desired
if not self.cfg['withsampledata']:
try:
shutil.rmtree(os.path.join(self.cfg['start_dir'], 'sample_data'))
except OSError as err:
raise EasyBuildError("Failed to remove sample data: %s", err)
def sanity_check_step(self):
"""Custom sanity check for Trinity."""
version = LooseVersion(self.version)
if version >= LooseVersion('2.0') and version < LooseVersion('2.3'):
sep = '-'
elif version >= LooseVersion('2.3') and version < LooseVersion('2.9'):
sep = '-Trinity-v'
elif version >= LooseVersion('2.9') and version < LooseVersion('3.0'):
sep = '-v'
else:
sep = '_r'
# Chrysalis
if version >= LooseVersion('2.9') and version < LooseVersion('2000'):
chrysalis_bin = os.path.join('Chrysalis', 'bin')
chrysalis_files = ['BubbleUpClustering',
'CreateIwormFastaBundle',
'QuantifyGraph',
'Chrysalis',
'GraphFromFasta',
'ReadsToTranscripts']
elif version >= LooseVersion('2.8') and version < LooseVersion('2.9'):
chrysalis_bin = os.path.join('Chrysalis', 'bin')
chrysalis_files = ['Chrysalis']
else:
chrysalis_bin = 'Chrysalis'
chrysalis_files = ['Chrysalis']
chrysalis_bin_files = [os.path.join(chrysalis_bin, x) for x in chrysalis_files]
# Inchworm
inchworm_bin = os.path.join('Inchworm', 'bin')
inchworm_files = ['inchworm']
if version >= LooseVersion('2.9') and version < LooseVersion('2000'):
inchworm_files.extend(['FastaToDeBruijn', 'fastaToKmerCoverageStats'])
inchworm_bin_files = [os.path.join(inchworm_bin, x) for x in inchworm_files]
path = 'trinityrnaseq%s%s' % (sep, self.version)
# folders path
dir_path = ['util']
if version < LooseVersion('2.9'):
dir_path.append(os.path.join('Butterfly', 'src', 'bin'))
# these lists are definitely non-exhaustive, but better than nothing
custom_paths = {
'files': [os.path.join(path, x) for x in (inchworm_bin_files + chrysalis_bin_files)],
'dirs': [os.path.join(path, x) for x in dir_path]
}
super(EB_Trinity, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom tweaks for PATH variable for Trinity."""
guesses = super(EB_Trinity, self).make_module_req_guess()
install_rootdir = os.path.basename(self.cfg['start_dir'].strip('/'))
guesses.update({
'PATH': [install_rootdir],
'TRINITY_HOME': [install_rootdir],
})
return guesses
| boegel/easybuild-easyblocks | easybuild/easyblocks/t/trinity.py | Python | gpl-2.0 | 14,495 | [
"BWA"
] | 90ab7a3a3ad0ce231d7be41a9d92e9ad36cc17ae968f3286e4cc519f9fd67bb2 |
'''
Main "pipeline" script for the Legacy Survey (DECaLS, MzLS, BASS)
data reductions.
For calling from other scripts, see:
- :py:func:`run_brick`
Or for much more fine-grained control, see the individual stages:
- :py:func:`stage_tims`
- :py:func:`stage_refs`
- :py:func:`stage_outliers`
- :py:func:`stage_halos`
- :py:func:`stage_fit_on_coadds [optional]`
- :py:func:`stage_image_coadds`
- :py:func:`stage_srcs`
- :py:func:`stage_fitblobs`
- :py:func:`stage_coadds`
- :py:func:`stage_wise_forced`
- :py:func:`stage_galex_forced` [optional]
- :py:func:`stage_writecat`
To see the code we run on each "blob" of pixels, see "oneblob.py".
- :py:func:`one_blob`
'''
import sys
import os
import warnings
import numpy as np
import fitsio
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.ttime import Time
from legacypipe.survey import imsave_jpeg
from legacypipe.bits import DQ_BITS, MASKBITS, FITBITS
from legacypipe.utils import RunbrickError, NothingToDoError, iterwrapper, find_unique_pixels
from legacypipe.coadds import make_coadds, write_coadd_images, quick_coadds
from legacypipe.fit_on_coadds import stage_fit_on_coadds
from legacypipe.blobmask import stage_blobmask
from legacypipe.galex import stage_galex_forced
import logging
logger = logging.getLogger('legacypipe.runbrick')
def info(*args):
from legacypipe.utils import log_info
log_info(logger, args)
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
def formatwarning(message, category, filename, lineno, line=None):
#return 'Warning: %s (%s:%i)' % (message, filename, lineno)
return 'Warning: %s' % (message)
warnings.formatwarning = formatwarning
def runbrick_global_init():
from tractor.galaxy import disable_galaxy_cache
info('Starting process', os.getpid(), Time()-Time())
disable_galaxy_cache()
def stage_tims(W=3600, H=3600, pixscale=0.262, brickname=None,
survey=None,
survey_blob_mask=None,
ra=None, dec=None,
release=None,
plots=False, ps=None,
target_extent=None, program_name='runbrick.py',
bands=None,
do_calibs=True,
old_calibs_ok=True,
splinesky=True,
subsky=True,
gaussPsf=False, pixPsf=True, hybridPsf=True,
normalizePsf=True,
apodize=False,
constant_invvar=False,
read_image_pixels = True,
min_mjd=None, max_mjd=None,
gaia_stars=True,
mp=None,
record_event=None,
unwise_dir=None,
unwise_tr_dir=None,
unwise_modelsky_dir=None,
galex_dir=None,
command_line=None,
read_parallel=True,
max_memory_gb=None,
**kwargs):
'''
This is the first stage in the pipeline. It
determines which CCD images overlap the brick or region of
interest, runs calibrations for those images if necessary, and
then reads the images, creating `tractor.Image` ("tractor image"
or "tim") objects for them.
PSF options:
- *gaussPsf*: boolean. Single-component circular Gaussian, with
width set from the header FWHM value. Useful for quick
debugging.
- *pixPsf*: boolean. Pixelized PsfEx model.
- *hybridPsf*: boolean. Hybrid Pixelized PsfEx / Gaussian approx model.
Sky:
- *splinesky*: boolean. If we have to create sky calibs, create SplineSky model rather than ConstantSky?
- *subsky*: boolean. Subtract sky model from tims?
'''
from legacypipe.survey import (
get_git_version, get_version_header, get_dependency_versions,
wcs_for_brick, read_one_tim)
from astrometry.util.starutil_numpy import ra2hmsstring, dec2dmsstring
tlast = Time()
record_event and record_event('stage_tims: starting')
assert(survey is not None)
# Get brick object
custom_brick = (ra is not None)
if custom_brick:
from legacypipe.survey import BrickDuck
# Custom brick; create a fake 'brick' object
brick = BrickDuck(ra, dec, brickname)
else:
brick = survey.get_brick_by_name(brickname)
if brick is None:
raise RunbrickError('No such brick: "%s"' % brickname)
brickid = brick.brickid
brickname = brick.brickname
# Get WCS object describing brick
targetwcs = wcs_for_brick(brick, W=W, H=H, pixscale=pixscale)
if target_extent is not None:
(x0,x1,y0,y1) = target_extent
W = x1-x0
H = y1-y0
targetwcs = targetwcs.get_subimage(x0, y0, W, H)
pixscale = targetwcs.pixel_scale()
targetrd = np.array([targetwcs.pixelxy2radec(x,y) for x,y in
[(1,1),(W,1),(W,H),(1,H),(1,1)]])
# custom brick -- set RA,Dec bounds
if custom_brick:
brick.ra1,_ = targetwcs.pixelxy2radec(W, H/2)
brick.ra2,_ = targetwcs.pixelxy2radec(1, H/2)
_, brick.dec1 = targetwcs.pixelxy2radec(W/2, 1)
_, brick.dec2 = targetwcs.pixelxy2radec(W/2, H)
# Create FITS header with version strings
gitver = get_git_version()
version_header = get_version_header(program_name, survey.survey_dir, release,
git_version=gitver)
deps = get_dependency_versions(unwise_dir, unwise_tr_dir, unwise_modelsky_dir, galex_dir)
for name,value,comment in deps:
version_header.add_record(dict(name=name, value=value, comment=comment))
if command_line is not None:
version_header.add_record(dict(name='CMDLINE', value=command_line,
comment='runbrick command-line'))
version_header.add_record(dict(name='BRICK', value=brickname,
comment='LegacySurveys brick RRRr[pm]DDd'))
version_header.add_record(dict(name='BRICKID' , value=brickid,
comment='LegacySurveys brick id'))
version_header.add_record(dict(name='RAMIN' , value=brick.ra1,
comment='Brick RA min (deg)'))
version_header.add_record(dict(name='RAMAX' , value=brick.ra2,
comment='Brick RA max (deg)'))
version_header.add_record(dict(name='DECMIN' , value=brick.dec1,
comment='Brick Dec min (deg)'))
version_header.add_record(dict(name='DECMAX' , value=brick.dec2,
comment='Brick Dec max (deg)'))
# Add NOAO-requested headers
version_header.add_record(dict(
name='RA', value=ra2hmsstring(brick.ra, separator=':'), comment='Brick center RA (hms)'))
version_header.add_record(dict(
name='DEC', value=dec2dmsstring(brick.dec, separator=':'), comment='Brick center DEC (dms)'))
version_header.add_record(dict(
name='CENTRA', value=brick.ra, comment='Brick center RA (deg)'))
version_header.add_record(dict(
name='CENTDEC', value=brick.dec, comment='Brick center Dec (deg)'))
for i,(r,d) in enumerate(targetrd[:4]):
version_header.add_record(dict(
name='CORN%iRA' %(i+1), value=r, comment='Brick corner RA (deg)'))
version_header.add_record(dict(
name='CORN%iDEC'%(i+1), value=d, comment='Brick corner Dec (deg)'))
# Find CCDs
ccds = survey.ccds_touching_wcs(targetwcs, ccdrad=None)
if ccds is None:
raise NothingToDoError('No CCDs touching brick')
debug(len(ccds), 'CCDs touching target WCS')
survey.drop_cache()
if 'ccd_cuts' in ccds.get_columns():
ccds.cut(ccds.ccd_cuts == 0)
debug(len(ccds), 'CCDs survive cuts')
else:
warnings.warn('Not applying CCD cuts')
# Cut on bands to be used
ccds.cut(np.array([b in bands for b in ccds.filter]))
debug('Cut to', len(ccds), 'CCDs in bands', ','.join(bands))
debug('Cutting on CCDs to be used for fitting...')
I = survey.ccds_for_fitting(brick, ccds)
if I is not None:
debug('Cutting to', len(I), 'of', len(ccds), 'CCDs for fitting.')
ccds.cut(I)
if min_mjd is not None:
ccds.cut(ccds.mjd_obs >= min_mjd)
debug('Cut to', len(ccds), 'after MJD', min_mjd)
if max_mjd is not None:
ccds.cut(ccds.mjd_obs <= max_mjd)
debug('Cut to', len(ccds), 'before MJD', max_mjd)
# Create Image objects for each CCD
ims = []
info('Keeping', len(ccds), 'CCDs:')
for ccd in ccds:
im = survey.get_image_object(ccd)
if survey.cache_dir is not None:
im.check_for_cached_files(survey)
ims.append(im)
info(' ', im, im.band, 'expnum', im.expnum, 'exptime', im.exptime, 'propid', ccd.propid,
'seeing %.2f' % (ccd.fwhm*im.pixscale), 'MJD %.3f' % ccd.mjd_obs,
'object', getattr(ccd, 'object', '').strip(), '\n ', im.print_imgpath)
tnow = Time()
debug('Finding images touching brick:', tnow-tlast)
tlast = tnow
if max_memory_gb:
# Estimate total memory required for tim pixels
mem = sum([im.estimate_memory_required(radecpoly=targetrd,
mywcs=survey.get_approx_wcs(ccd))
for im,ccd in zip(ims,ccds)])
info('Estimated memory required: %.1f GB' % (mem/1e9))
if mem / 1e9 > max_memory_gb:
raise RuntimeError('Too much memory required: %.1f > %.1f GB' % (mem/1e9, max_memory_gb))
if do_calibs:
from legacypipe.survey import run_calibs
record_event and record_event('stage_tims: starting calibs')
kwa = dict(git_version=gitver, survey=survey,
old_calibs_ok=old_calibs_ok,
survey_blob_mask=survey_blob_mask)
if gaussPsf:
kwa.update(psfex=False)
if splinesky:
kwa.update(splinesky=True)
if not gaia_stars:
kwa.update(gaia=False)
# Run calibrations
args = [(im, kwa) for im in ims]
mp.map(run_calibs, args)
tnow = Time()
debug('Calibrations:', tnow-tlast)
tlast = tnow
# Read Tractor images
args = [(im, targetrd, dict(gaussPsf=gaussPsf, pixPsf=pixPsf,
hybridPsf=hybridPsf, normalizePsf=normalizePsf,
subsky=subsky,
apodize=apodize,
constant_invvar=constant_invvar,
pixels=read_image_pixels,
old_calibs_ok=old_calibs_ok))
for im in ims]
record_event and record_event('stage_tims: starting read_tims')
if read_parallel:
tims = list(mp.map(read_one_tim, args))
else:
tims = list(map(read_one_tim, args))
record_event and record_event('stage_tims: done read_tims')
tnow = Time()
debug('Read', len(ccds), 'images:', tnow-tlast)
tlast = tnow
# Cut the table of CCDs to match the 'tims' list
I = np.array([i for i,tim in enumerate(tims) if tim is not None])
ccds.cut(I)
tims = [tim for tim in tims if tim is not None]
assert(len(ccds) == len(tims))
if len(tims) == 0:
raise NothingToDoError('No photometric CCDs touching brick.')
# Check calibration product versions
for tim in tims:
for cal,ver in [('sky', tim.skyver), ('psf', tim.psfver)]:
if tim.plver.strip() != ver[1].strip():
warnings.warn(('Image "%s" PLVER is "%s" but %s calib was run'
+' on PLVER "%s"') % (str(tim), tim.plver, cal, ver[1]))
# Add additional columns to the CCDs table.
ccds.ccd_x0 = np.array([tim.x0 for tim in tims]).astype(np.int16)
ccds.ccd_y0 = np.array([tim.y0 for tim in tims]).astype(np.int16)
ccds.ccd_x1 = np.array([tim.x0 + tim.shape[1]
for tim in tims]).astype(np.int16)
ccds.ccd_y1 = np.array([tim.y0 + tim.shape[0]
for tim in tims]).astype(np.int16)
rd = np.array([[tim.subwcs.pixelxy2radec(1, 1)[-2:],
tim.subwcs.pixelxy2radec(1, y1-y0)[-2:],
tim.subwcs.pixelxy2radec(x1-x0, 1)[-2:],
tim.subwcs.pixelxy2radec(x1-x0, y1-y0)[-2:]]
for tim,x0,y0,x1,y1 in
zip(tims, ccds.ccd_x0+1, ccds.ccd_y0+1,
ccds.ccd_x1, ccds.ccd_y1)])
_,x,y = targetwcs.radec2pixelxy(rd[:,:,0], rd[:,:,1])
ccds.brick_x0 = np.floor(np.min(x, axis=1)).astype(np.int16)
ccds.brick_x1 = np.ceil (np.max(x, axis=1)).astype(np.int16)
ccds.brick_y0 = np.floor(np.min(y, axis=1)).astype(np.int16)
ccds.brick_y1 = np.ceil (np.max(y, axis=1)).astype(np.int16)
ccds.psfnorm = np.array([tim.psfnorm for tim in tims])
ccds.galnorm = np.array([tim.galnorm for tim in tims])
ccds.propid = np.array([tim.propid for tim in tims])
ccds.plver = np.array([tim.plver for tim in tims])
ccds.skyver = np.array([tim.skyver[0] for tim in tims])
ccds.psfver = np.array([tim.psfver[0] for tim in tims])
ccds.skyplver = np.array([tim.skyver[1] for tim in tims])
ccds.psfplver = np.array([tim.psfver[1] for tim in tims])
# Cut "bands" down to just the bands for which we have images.
timbands = [tim.band for tim in tims]
bands = [b for b in bands if b in timbands]
debug('Cut bands to', bands)
if plots:
from legacypipe.runbrick_plots import tim_plots
tim_plots(tims, bands, ps)
# Add header cards about which bands and cameras are involved.
for band in survey.allbands:
hasit = band in bands
version_header.add_record(dict(
name='BRICK_%s' % band.upper(), value=hasit,
comment='Does band %s touch this brick?' % band))
cams = np.unique([tim.imobj.camera for tim in tims
if tim.band == band])
version_header.add_record(dict(
name='CAMS_%s' % band.upper(), value=' '.join(cams),
comment='Cameras contributing band %s' % band))
version_header.add_record(dict(name='BANDS', value=''.join(bands),
comment='Bands touching this brick'))
version_header.add_record(dict(name='NBANDS', value=len(bands),
comment='Number of bands in this catalog'))
for i,band in enumerate(bands):
version_header.add_record(dict(name='BAND%i' % i, value=band,
comment='Band name in this catalog'))
_add_stage_version(version_header, 'TIMS', 'tims')
keys = ['version_header', 'targetrd', 'pixscale', 'targetwcs', 'W','H',
'tims', 'ps', 'brickid', 'brickname', 'brick', 'custom_brick',
'target_extent', 'ccds', 'bands', 'survey']
L = locals()
rtn = dict([(k,L[k]) for k in keys])
return rtn
def _add_stage_version(version_header, short, stagename):
from legacypipe.survey import get_git_version
version_header.add_record(dict(name='VER_%s'%short, value=get_git_version(),
help='legacypipe version for stage_%s'%stagename))
def stage_refs(survey=None,
brick=None,
brickname=None,
brickid=None,
pixscale=None,
targetwcs=None,
bands=None,
version_header=None,
tycho_stars=True,
gaia_stars=True,
large_galaxies=True,
star_clusters=True,
plots=False, ps=None,
record_event=None,
tims=None,
**kwargs):
from legacypipe.reference import get_reference_sources
record_event and record_event('stage_refs: starting')
_add_stage_version(version_header, 'REFS', 'refs')
refstars,refcat = get_reference_sources(survey, targetwcs, pixscale, bands,
tycho_stars=tycho_stars,
gaia_stars=gaia_stars,
large_galaxies=large_galaxies,
star_clusters=star_clusters,
plots=plots, ps=ps)
# "refstars" is a table
# "refcat" is a list of tractor Sources
# They are aligned
if refstars:
from legacypipe.units import get_units_for_columns
assert(len(refstars) == len(refcat))
cols = ['ra', 'dec', 'ref_cat', 'ref_id', 'mag',
'istycho', 'isgaia', 'islargegalaxy', 'iscluster',
'isbright', 'ismedium', 'freezeparams', 'pointsource', 'donotfit', 'in_bounds',
'ba', 'pa', 'decam_mag_g', 'decam_mag_r', 'decam_mag_i', 'decam_mag_z',
'zguess', 'mask_mag', 'radius', 'keep_radius', 'radius_pix', 'ibx', 'iby',
'ref_epoch', 'pmra', 'pmdec', 'parallax',
'ra_ivar', 'dec_ivar', 'pmra_ivar', 'pmdec_ivar', 'parallax_ivar',
# Gaia
'phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag', 'phot_g_mean_flux_over_error',
'phot_bp_mean_flux_over_error', 'phot_rp_mean_flux_over_error', 'phot_g_n_obs',
'phot_bp_n_obs', 'phot_rp_n_obs', 'phot_variable_flag', 'astrometric_excess_noise',
'astrometric_excess_noise_sig', 'astrometric_n_obs_al', 'astrometric_n_good_obs_al',
'astrometric_weight_al', 'duplicated_source', 'a_g_val', 'e_bp_min_rp_val',
'phot_bp_rp_excess_factor', 'astrometric_sigma5d_max', 'astrometric_params_solved',
]
# Drop columns that don't exist (because one of the ref catalogs has no entries or is
# not being used)
refcols = refstars.get_columns()
cols = [c for c in cols if c in refcols]
extra_units = dict(zguess='mag', pa='deg', radius='deg', keep_radius='deg')
units = get_units_for_columns(cols, extras=extra_units)
with survey.write_output('ref-sources', brick=brickname) as out:
refstars.writeto(None, fits_object=out.fits, primheader=version_header,
columns=cols, units=units)
T_dup = None
T_clusters = None
if refstars:
# Pull out reference sources flagged do-not-fit; we add them
# back in (much) later. These are Gaia sources near the
# centers of SGA large galaxies, so we want to propagate the
# Gaia catalog information, but don't want to fit them.
I, = np.nonzero(refstars.donotfit)
if len(I):
T_dup = refstars[I]
# Pull out star clusters too.
I, = np.nonzero(refstars.iscluster)
if len(I):
T_clusters = refstars[I]
# Drop from refstars & refcat
drop = np.logical_or(refstars.donotfit, refstars.iscluster)
if np.any(drop):
I, = np.nonzero(np.logical_not(drop))
refstars.cut(I)
refcat = [refcat[i] for i in I]
assert(len(refstars) == len(refcat))
del I,drop
if plots and refstars:
import pylab as plt
from tractor import Tractor
for tim in tims:
I = np.flatnonzero(refstars.istycho | refstars.isgaia)
stars = refstars[I]
info(len(stars), 'ref stars')
stars.index = I
ok,xx,yy = tim.subwcs.radec2pixelxy(stars.ra, stars.dec)
xx -= 1.
yy -= 1.
stars.xx = xx
stars.yy = yy
h,w = tim.shape
edge = 25
stars.cut((xx > edge) * (yy > edge) * (xx < w-1-edge) * (yy < h-1-edge))
info(len(stars), 'are within tim', tim.name)
K = np.argsort(stars.mag)
stars.cut(K)
plt.clf()
for i in range(len(stars)):
if i >= 5:
break
src = refcat[stars.index[i]].copy()
tr = Tractor([tim], [src])
tr.freezeParam('images')
src.freezeAllBut('brightness')
src.getBrightness().freezeAllBut(tim.band)
try:
from tractor.ceres_optimizer import CeresOptimizer
ceres_block = 8
tr.optimizer = CeresOptimizer(BW=ceres_block, BH=ceres_block)
except ImportError:
from tractor.lsqr_optimizer import LsqrOptimizer
tr.optimizer = LsqrOptimizer()
R = tr.optimize_forced_photometry(shared_params=False, wantims=True)
src.thawAllParams()
y = int(stars.yy[i])
x = int(stars.xx[i])
sz = edge
sl = slice(y-sz, y+sz+1), slice(x-sz, x+sz+1)
for data,mod,ie,chi,roi in R.ims1:
print('x,y', x, y, 'tim shape', tim.shape, 'slice', sl,
'roi', roi, 'data size', data.shape)
subimg = data[sl]
mn,mx = np.percentile(subimg.ravel(), [25,99])
mx = subimg.max()
ima = dict(origin='lower', interpolation='nearest', vmin=mn, vmax=mx)
plt.subplot(3,5, 1 + i)
plt.imshow(data[sl], **ima)
plt.subplot(3,5, 1 + 5 + i)
plt.imshow(mod[sl], **ima)
plt.subplot(3,5, 1 + 2*5 + i)
plt.imshow(chi[sl], origin='lower', interpolation='nearest', vmin=-5, vmax=+5)
plt.suptitle('Ref stars: %s' % tim.name)
ps.savefig()
keys = ['refstars', 'gaia_stars', 'T_dup', 'T_clusters', 'version_header',
'refcat']
L = locals()
rtn = dict([(k,L[k]) for k in keys])
return rtn
def stage_outliers(tims=None, targetwcs=None, W=None, H=None, bands=None,
mp=None, nsigma=None, plots=None, ps=None, record_event=None,
survey=None, brickname=None, version_header=None,
refstars=None, outlier_mask_file=None,
outliers=True, cache_outliers=False,
**kwargs):
'''This pipeline stage tries to detect artifacts in the individual
exposures, by blurring all images in the same band to the same PSF size,
then searching for outliers.
*cache_outliers*: bool: if the outliers-mask*.fits.fz file exists
(from a previous run), use it. We turn this off in production
because we still want to create the JPEGs and the checksum entry
for the outliers file.
'''
from legacypipe.outliers import patch_from_coadd, mask_outlier_pixels, read_outlier_mask_file
record_event and record_event('stage_outliers: starting')
_add_stage_version(version_header, 'OUTL', 'outliers')
version_header.add_record(dict(name='OUTLIER',
value=outliers,
help='Are we applying outlier rejection?'))
info('outlier_mask_file:', outlier_mask_file)
# Check for existing MEF containing masks for all the chips we need.
if (outliers and
not (cache_outliers and
read_outlier_mask_file(survey, tims, brickname, outlier_mask_file=outlier_mask_file,
output='both'))):
# Make before-n-after plots (before)
t0 = Time()
C = make_coadds(tims, bands, targetwcs, mp=mp, sbscale=False,
allmasks=False, coweights=False)
with survey.write_output('outliers-pre', brick=brickname) as out:
rgb,kwa = survey.get_rgb(C.coimgs, bands)
imsave_jpeg(out.fn, rgb, origin='lower', **kwa)
del rgb
info('"Before" coadds:', Time()-t0)
# Patch individual-CCD masked pixels from a coadd
patch_from_coadd(C.coimgs, targetwcs, bands, tims, mp=mp)
del C
t0 = Time()
make_badcoadds = True
badcoaddspos, badcoaddsneg = mask_outlier_pixels(survey, tims, bands, targetwcs, brickname, version_header,
mp=mp, plots=plots, ps=ps, make_badcoadds=make_badcoadds,
refstars=refstars)
info('Masking outliers:', Time()-t0)
# Make before-n-after plots (after)
t0 = Time()
C = make_coadds(tims, bands, targetwcs, mp=mp, sbscale=False,
allmasks=False, coweights=False)
with survey.write_output('outliers-post', brick=brickname) as out:
rgb,kwa = survey.get_rgb(C.coimgs, bands)
imsave_jpeg(out.fn, rgb, origin='lower', **kwa)
del rgb
del C
with survey.write_output('outliers-masked-pos', brick=brickname) as out:
rgb,kwa = survey.get_rgb(badcoaddspos, bands)
imsave_jpeg(out.fn, rgb, origin='lower', **kwa)
del rgb
del badcoaddspos
with survey.write_output('outliers-masked-neg', brick=brickname) as out:
rgb,kwa = survey.get_rgb(badcoaddsneg, bands)
imsave_jpeg(out.fn, rgb, origin='lower', **kwa)
del rgb
del badcoaddsneg
info('"After" coadds:', Time()-t0)
return dict(tims=tims, version_header=version_header)
def stage_halos(pixscale=None, targetwcs=None,
W=None,H=None,
bands=None, ps=None, tims=None,
plots=False, plots2=False,
brickname=None,
version_header=None,
mp=None, nsigma=None,
survey=None, brick=None,
refstars=None,
star_halos=True,
old_calibs_ok=True,
record_event=None,
**kwargs):
record_event and record_event('stage_halos: starting')
_add_stage_version(version_header, 'HALO', 'halos')
# Subtract star halos?
if star_halos and refstars:
Igaia, = np.nonzero(refstars.isgaia * refstars.pointsource)
debug(len(Igaia), 'stars for halo subtraction')
if len(Igaia):
from legacypipe.halos import subtract_halos
halostars = refstars[Igaia]
if plots:
from legacypipe.runbrick_plots import halo_plots_before, halo_plots_after
coimgs = halo_plots_before(tims, bands, targetwcs, halostars, ps)
subtract_halos(tims, halostars, bands, mp, plots, ps, old_calibs_ok=old_calibs_ok)
if plots:
halo_plots_after(tims, bands, targetwcs, halostars, coimgs, ps)
return dict(tims=tims, version_header=version_header)
def stage_image_coadds(survey=None, targetwcs=None, bands=None, tims=None,
brickname=None, version_header=None,
plots=False, ps=None, coadd_bw=False, W=None, H=None,
brick=None, blobmap=None, lanczos=True, ccds=None,
write_metrics=True,
minimal_coadds=False,
mp=None, record_event=None,
co_sky=None,
custom_brick=False,
refstars=None,
T_clusters=None,
saturated_pix=None,
less_masking=False,
**kwargs):
from legacypipe.utils import copy_header_with_wcs
record_event and record_event('stage_image_coadds: starting')
'''
Immediately after reading the images, we can create coadds of just
the image products. Later, full coadds including the models will
be created (in `stage_coadds`). But it's handy to have the coadds
early on, to diagnose problems or just to look at the data.
'''
primhdr = fitsio.FITSHDR()
for r in version_header.records():
primhdr.add_record(r)
primhdr.add_record(dict(name='PRODTYPE', value='ccdinfo',
comment='NOAO data product type'))
# Write per-brick CCDs table
with survey.write_output('ccds-table', brick=brickname) as out:
ccds.writeto(None, fits_object=out.fits, primheader=primhdr)
kw = dict(ngood=True, coweights=False)
if minimal_coadds:
kw.update(allmasks=False)
else:
kw.update(detmaps=True)
C = make_coadds(tims, bands, targetwcs, lanczos=lanczos,
callback=write_coadd_images,
callback_args=(survey, brickname, version_header, tims,
targetwcs, co_sky),
mp=mp, plots=plots, ps=ps, **kw)
if not minimal_coadds:
# interim maskbits
from legacypipe.bits import IN_BLOB
refmap = get_blobiter_ref_map(refstars, T_clusters, less_masking, targetwcs)
# Construct a mask bits map
maskbits = np.zeros((H,W), np.int32)
# !PRIMARY
if not custom_brick:
U = find_unique_pixels(targetwcs, W, H, None,
brick.ra1, brick.ra2, brick.dec1, brick.dec2)
maskbits |= MASKBITS['NPRIMARY'] * np.logical_not(U).astype(np.int32)
del U
# BRIGHT
if refmap is not None:
maskbits |= MASKBITS['BRIGHT'] * ((refmap & IN_BLOB['BRIGHT'] ) > 0)
maskbits |= MASKBITS['MEDIUM'] * ((refmap & IN_BLOB['MEDIUM'] ) > 0)
maskbits |= MASKBITS['GALAXY'] * ((refmap & IN_BLOB['GALAXY'] ) > 0)
maskbits |= MASKBITS['CLUSTER'] * ((refmap & IN_BLOB['CLUSTER']) > 0)
del refmap
# SATUR
if saturated_pix is not None:
for b, sat in zip(bands, saturated_pix):
maskbits |= (MASKBITS['SATUR_' + b.upper()] * sat).astype(np.int32)
# ALLMASK_{g,r,z}
for b,allmask in zip(bands, C.allmasks):
bitname = 'ALLMASK_' + b.upper()
if not bitname in MASKBITS:
warnings.warn('Skipping ALLMASK for band %s' % b)
continue
maskbits |= (MASKBITS[bitname] * (allmask > 0))
# omitting maskbits header cards, bailout, & WISE
hdr = copy_header_with_wcs(version_header, targetwcs)
with survey.write_output('maskbits', brick=brickname, shape=maskbits.shape) as out:
out.fits.write(maskbits, header=hdr, extname='MASKBITS')
# Sims: coadds of galaxy sims only, image only
if hasattr(tims[0], 'sims_image'):
sims_coadd,_ = quick_coadds(
tims, bands, targetwcs, images=[tim.sims_image for tim in tims])
if not minimal_coadds:
D = _depth_histogram(brick, targetwcs, bands, C.psfdetivs, C.galdetivs)
with survey.write_output('depth-table', brick=brickname) as out:
D.writeto(None, fits_object=out.fits)
del D
coadd_list= [('image', C.coimgs)]
if hasattr(tims[0], 'sims_image'):
coadd_list.append(('simscoadd', sims_coadd))
for name,ims in coadd_list:
rgb,kwa = survey.get_rgb(ims, bands)
del ims
with survey.write_output(name + '-jpeg', brick=brickname) as out:
imsave_jpeg(out.fn, rgb, origin='lower', **kwa)
debug('Wrote', out.fn)
# Blob-outlined version
if blobmap is not None:
from scipy.ndimage.morphology import binary_dilation
outline = np.logical_xor(
binary_dilation(blobmap >= 0, structure=np.ones((3,3))),
(blobmap >= 0))
# coadd_bw
if len(rgb.shape) == 2:
rgb = np.repeat(rgb[:,:,np.newaxis], 3, axis=2)
# Outline in green
rgb[:,:,0][outline] = 0
rgb[:,:,1][outline] = 1
rgb[:,:,2][outline] = 0
with survey.write_output(name+'blob-jpeg', brick=brickname) as out:
imsave_jpeg(out.fn, rgb, origin='lower', **kwa)
debug('Wrote', out.fn)
# write out blob map
if write_metrics:
hdr = copy_header_with_wcs(version_header, targetwcs)
hdr.add_record(dict(name='IMTYPE', value='blobmap',
comment='LegacySurveys image type'))
with survey.write_output('blobmap', brick=brickname,
shape=blobmap.shape) as out:
out.fits.write(blobmap, header=hdr)
del rgb
del coadd_list
del C
return None
def stage_srcs(pixscale=None, targetwcs=None,
W=None,H=None,
bands=None, ps=None, tims=None,
plots=False, plots2=False,
brickname=None,
version_header=None,
mp=None, nsigma=None,
saddle_fraction=None,
saddle_min=None,
survey=None, brick=None,
refcat=None, refstars=None,
T_clusters=None,
ccds=None,
ubercal_sky=False,
nsatur=None,
record_event=None,
large_galaxies=True,
gaia_stars=True,
blob_dilate=None,
**kwargs):
'''
In this stage we run SED-matched detection to find objects in the
images. For each object detected, a `tractor` source object is
created, initially a `tractor.PointSource`. In this stage, the
sources are also split into "blobs" of overlapping pixels. Each
of these blobs will be processed independently.
'''
from tractor import Catalog
from legacypipe.detection import (detection_maps, merge_hot_satur,
run_sed_matched_filters, segment_and_group_sources)
from scipy.ndimage.morphology import binary_dilation
record_event and record_event('stage_srcs: starting')
_add_stage_version(version_header, 'SRCS', 'srcs')
tlast = Time()
avoid_map = None
avoid_xyr = []
if refstars:
# Don't detect new sources where we already have reference stars
# To treat fast-moving stars, we evaluate proper motions at each image
# epoch and exclude the set of integer pixel locations.
# Init with ref sources without proper motions:
I = np.flatnonzero(refstars.in_bounds * (refstars.ref_epoch == 0) *
np.logical_not(refstars.islargegalaxy))
xy = set(zip(refstars.ibx[I], refstars.iby[I]))
ns = len(xy)
# For moving stars, evaluate position at epoch of each input image
I = np.flatnonzero(refstars.in_bounds * (refstars.ref_epoch > 0) *
np.logical_not(refstars.islargegalaxy))
if len(I):
from legacypipe.survey import radec_at_mjd
for tim in tims:
ra,dec = radec_at_mjd(
refstars.ra[I], refstars.dec[I], refstars.ref_epoch[I].astype(float),
refstars.pmra[I], refstars.pmdec[I], refstars.parallax[I],
tim.time.toMjd())
_,xx,yy = targetwcs.radec2pixelxy(ra, dec)
xy.update(zip(np.round(xx-1.).astype(int), np.round(yy-1.).astype(int)))
debug('Avoiding', ns, 'stationary and', len(xy)-ns, '(from %i stars) pixels' % np.sum(refstars.in_bounds * (refstars.ref_epoch > 0)))
# Add a ~1" exclusion zone around reference stars
# (assuming pixel_scale ~ 0.25")
r_excl = 4
avoid_xyr.extend([(x,y,r_excl) for x,y in xy])
# (We tried a larger exclusion radius on SGA sources, for
# pre-burning SGA catalog; results were so-so)
r_sga_excl = r_excl
J = np.flatnonzero(refstars.islargegalaxy * refstars.in_bounds)
avoid_xyr.extend([(x,y,r_sga_excl) for x,y in zip(refstars.ibx[J], refstars.iby[J])])
avoid_xyr = np.array(avoid_xyr, dtype=np.int32)
if len(avoid_xyr) > 0:
avoid_x = avoid_xyr[:,0]
avoid_y = avoid_xyr[:,1]
avoid_r = avoid_xyr[:,2]
else:
avoid_x = avoid_y = avoid_r = np.array([], dtype=np.int32)
del avoid_xyr
if T_clusters is not None and len(T_clusters) > 0:
from legacypipe.reference import get_reference_map
info('Avoiding source detection in', len(T_clusters), 'CLUSTER masks')
avoid_map = (get_reference_map(targetwcs, T_clusters) != 0)
record_event and record_event('stage_srcs: detection maps')
tnow = Time()
debug('Rendering detection maps...')
detmaps, detivs, satmaps = detection_maps(tims, targetwcs, bands, mp,
apodize=10, nsatur=nsatur)
tnow = Time()
debug('Detmaps:', tnow-tlast)
tlast = tnow
record_event and record_event('stage_srcs: sources')
if plots:
import pylab as plt
for band,detmap,satmap in zip(bands, detmaps, satmaps):
plt.clf()
plt.subplot(1,2,1)
plt.imshow(detmap, origin='lower', interpolation='nearest')
plt.subplot(1,2,2)
plt.imshow(satmap, origin='lower', interpolation='nearest', vmin=0, vmax=1, cmap='hot')
plt.suptitle('%s detmap/satmap' % band)
ps.savefig()
# Expand the mask around saturated pixels to avoid generating
# peaks at the edge of the mask.
saturated_pix = [binary_dilation(satmap > 0, iterations=4) for satmap in satmaps]
# Formerly, we generated sources for each saturated blob, but since we now initialize
# with Tycho-2 and Gaia stars and large galaxies, not needed.
if plots:
from legacypipe.runbrick_plots import detection_plots
detection_plots(detmaps, detivs, bands, saturated_pix, tims,
targetwcs, refstars, large_galaxies, gaia_stars, ps)
# SED-matched detections
record_event and record_event('stage_srcs: SED-matched')
debug('Running source detection at', nsigma, 'sigma')
SEDs = survey.sed_matched_filters(bands)
kwa = {}
if plots:
coims,_ = quick_coadds(tims, bands, targetwcs)
rgb,_ = survey.get_rgb(coims, bands)
kwa.update(rgbimg=rgb)
Tnew,newcat,hot = run_sed_matched_filters(
SEDs, bands, detmaps, detivs, (avoid_x,avoid_y,avoid_r), targetwcs,
nsigma=nsigma, saddle_fraction=saddle_fraction, saddle_min=saddle_min,
saturated_pix=saturated_pix, veto_map=avoid_map, blob_dilate=blob_dilate,
plots=plots, ps=ps, mp=mp, **kwa)
if Tnew is not None:
assert(len(Tnew) == len(newcat))
Tnew.delete_column('peaksn')
Tnew.delete_column('apsn')
Tnew.ref_cat = np.array([' '] * len(Tnew))
Tnew.ref_id = np.zeros(len(Tnew), np.int64)
del detmaps
del detivs
# Merge newly detected sources with reference sources (Tycho2, Gaia, large galaxies)
cats = []
tables = []
if Tnew is not None:
for src,ix,iy in zip(newcat, Tnew.ibx, Tnew.iby):
for satmap in saturated_pix:
if satmap[iy, ix]:
src.needs_initial_flux = True
cats.extend(newcat)
tables.append(Tnew)
if refstars and len(refstars):
cats.extend(refcat)
tables.append(refstars)
T = merge_tables(tables, columns='fillzero')
cat = Catalog(*cats)
cat.freezeAllParams()
# The tractor Source object list "cat" and the table "T" are row-aligned.
assert(len(T) > 0)
assert(len(cat) == len(T))
tnow = Time()
debug('Peaks:', tnow-tlast)
tlast = tnow
if plots:
from legacypipe.runbrick_plots import detection_plots_2
detection_plots_2(tims, bands, targetwcs, refstars, Tnew, hot,
saturated_pix, ps)
# Find "hot" pixels that are separated by masked pixels,
# to connect blobs across, eg, bleed trails and saturated cores.
hot = merge_hot_satur(hot, saturated_pix)
# Segment, and record which sources fall into each blob
blobmap,blobsrcs,blobslices = segment_and_group_sources(hot, T, name=brickname,
ps=ps, plots=plots)
del hot
tnow = Time()
debug('Blobs:', tnow-tlast)
tlast = tnow
# DEBUG
if False:
BT = fits_table()
BT.blob_pix = []
BT.blob_srcs = []
for blobid, (srcs, slc) in enumerate(zip(blobsrcs, blobslices)):
BT.blob_pix.append(np.sum(blobmap[slc] == blobid))
BT.blob_srcs.append(len(srcs))
BT.to_np_arrays()
BT.writeto('blob-stats-dilate%i.fits' % blob_dilate)
sys.exit(0)
ccds.co_sky = np.zeros(len(ccds), np.float32)
if ubercal_sky:
sky_overlap = False
else:
sky_overlap = True
if sky_overlap:
'''
A note about units here: we're passing 'sbscale=False' to the
coadd function, so images are *not* getting scaled to constant
surface-brightness -- so you don't want to mix-and-match
cameras with different pixel scales within a band! We're
estimating the sky level as a surface brightness, in
nanomaggies per pixel of the CCDs.
'''
debug('Creating coadd for sky overlap...')
C = make_coadds(tims, bands, targetwcs, mp=mp, sbscale=False)
co_sky = {}
for band,co,cowt in zip(bands, C.coimgs, C.cowimgs):
pix = co[(cowt > 0) * (blobmap == -1)]
if len(pix) == 0:
debug('Cosky band', band, ': no unmasked pixels outside blobs')
continue
cosky = np.median(pix)
info('Median coadd sky for', band, ':', cosky)
co_sky[band] = cosky
for itim,tim in enumerate(tims):
if tim.band != band:
continue
goodpix = (tim.inverr > 0)
tim.data[goodpix] -= cosky
ccds.co_sky[itim] = cosky
else:
co_sky = None
info('Sources detected:', len(T), 'in', len(blobslices), 'blobs')
keys = ['T', 'tims', 'blobsrcs', 'blobslices', 'blobmap', 'cat',
'ps', 'saturated_pix', 'version_header', 'co_sky', 'ccds']
L = locals()
rtn = dict([(k,L[k]) for k in keys])
return rtn
def stage_fitblobs(T=None,
T_clusters=None,
T_dup=None,
brickname=None,
brickid=None,
brick=None,
version_header=None,
blobsrcs=None, blobslices=None, blobmap=None,
cat=None,
targetwcs=None,
W=None,H=None,
bands=None, ps=None, tims=None,
survey=None,
plots=False, plots2=False,
nblobs=None, blob0=None, blobxy=None,
blobradec=None, blobid=None,
max_blobsize=None,
reoptimize=False,
iterative=False,
large_galaxies_force_pointsource=True,
less_masking=False,
use_ceres=True, mp=None,
checkpoint_filename=None,
checkpoint_period=600,
write_pickle_filename=None,
write_metrics=True,
get_all_models=False,
refstars=None,
bailout=False,
record_event=None,
custom_brick=False,
**kwargs):
'''
This is where the actual source fitting happens.
The `one_blob` function is called for each "blob" of pixels with
the sources contained within that blob.
'''
from tractor import Catalog
from legacypipe.oneblob import MODEL_NAMES
record_event and record_event('stage_fitblobs: starting')
_add_stage_version(version_header, 'FITB', 'fitblobs')
tlast = Time()
version_header.add_record(dict(name='GALFRPSF',
value=large_galaxies_force_pointsource,
help='Large galaxies force PSF?'))
version_header.add_record(dict(name='LESSMASK',
value=less_masking,
help='Reduce masking behaviors?'))
version_header.add_record(dict(name='COMMENT', value='DCHISQ array model names'))
for i,mod in enumerate(MODEL_NAMES):
version_header.add_record(dict(name='DCHISQ_%i' % i, value=mod.upper()))
if plots:
from legacypipe.runbrick_plots import fitblobs_plots
fitblobs_plots(tims, bands, targetwcs, blobslices, blobsrcs, cat,
blobmap, ps)
tnow = Time()
debug('Fitblobs:', tnow-tlast)
tlast = tnow
# Were we asked to only run a subset of blobs?
keepblobs = None
if blobradec is not None:
# blobradec is a list like [(ra0,dec0), ...]
rd = np.array(blobradec)
_,x,y = targetwcs.radec2pixelxy(rd[:,0], rd[:,1])
x = (x - 1).astype(int)
y = (y - 1).astype(int)
blobxy = list(zip(x, y))
if blobxy is not None:
# blobxy is a list like [(x0,y0), (x1,y1), ...]
keepblobs = []
for x,y in blobxy:
x,y = int(x), int(y)
if x < 0 or x >= W or y < 0 or y >= H:
warnings.warn('Clipping blob x,y to brick bounds %i,%i' % (x,y))
x = np.clip(x, 0, W-1)
y = np.clip(y, 0, H-1)
blob = blobmap[y,x]
if blob >= 0:
keepblobs.append(blob)
else:
warnings.warn('Blobxy %i,%i is not in a blob!' % (x,y))
keepblobs = np.unique(keepblobs)
if blobid is not None:
# comma-separated list of blob id numbers.
keepblobs = np.array([int(b) for b in blobid.split(',')])
if blob0 is not None or (nblobs is not None and nblobs < len(blobslices)):
if blob0 is None:
blob0 = 0
if nblobs is None:
nblobs = len(blobslices) - blob0
keepblobs = np.arange(blob0, blob0+nblobs)
# keepblobs can be None or empty list
if keepblobs is not None and len(keepblobs):
# 'blobmap' is an image with values -1 for no blob, or the index
# of the blob. Create a map from old 'blob number+1' to new
# 'blob number', keeping only blobs in the 'keepblobs' list.
# The +1 is so that -1 is a valid index in the mapping.
NB = len(blobslices)
remap = np.empty(NB+1, np.int32)
remap[:] = -1
remap[keepblobs + 1] = np.arange(len(keepblobs))
# apply the map!
blobmap = remap[blobmap + 1]
# 'blobslices' and 'blobsrcs' are lists where the index
# corresponds to the value in the 'blobs' map.
blobslices = [blobslices[i] for i in keepblobs]
blobsrcs = [blobsrcs [i] for i in keepblobs]
# drop any cached data before we start pickling/multiprocessing
survey.drop_cache()
if plots and refstars:
from legacypipe.runbrick_plots import fitblobs_plots_2
fitblobs_plots_2(blobmap, refstars, ps)
skipblobs = []
R = []
# Check for existing checkpoint file.
if checkpoint_filename and os.path.exists(checkpoint_filename):
from astrometry.util.file import unpickle_from_file
info('Reading', checkpoint_filename)
try:
R = unpickle_from_file(checkpoint_filename)
debug('Read', len(R), 'results from checkpoint file', checkpoint_filename)
except:
import traceback
print('Failed to read checkpoint file ' + checkpoint_filename)
traceback.print_exc()
keepR = _check_checkpoints(R, blobslices, brickname)
info('Keeping', len(keepR), 'of', len(R), 'checkpointed results')
R = keepR
skipblobs = [r['iblob'] for r in R]
bailout_mask = None
T_refbail = None
if bailout:
bailout_mask = _get_bailout_mask(blobmap, skipblobs, targetwcs, W, H, brick,
blobslices)
# skip all blobs!
new_skipblobs = np.unique(blobmap[blobmap>=0])
# Which blobs are we bailing out on?
bailing = set(new_skipblobs) - set(skipblobs)
info('Bailing out on blobs:', bailing)
if len(bailing):
Ibail = np.hstack([blobsrcs[b] for b in bailing])
# Find reference sources in bailout blobs
Irefbail = []
for i in Ibail:
if getattr(cat[i], 'is_reference_source', False):
Irefbail.append(i)
if len(Irefbail):
from legacypipe.catalog import _get_tractor_fits_values
from legacypipe.oneblob import _convert_ellipses
T_refbail = T[np.array(Irefbail)]
cat_refbail = [cat[i] for i in Irefbail]
# For SGA sources
for src in cat_refbail:
_convert_ellipses(src)
# Sets TYPE, etc for T_refbail table.
_get_tractor_fits_values(T_refbail, cat_refbail, '%s')
if T_refbail is not None:
info('Found', len(T_refbail), 'reference sources in bail-out blobs')
skipblobs = new_skipblobs
# append empty results so that a later assert on the lengths will pass
while len(R) < len(blobsrcs):
R.append(dict(brickname=brickname, iblob=-1, result=None))
frozen_galaxies = get_frozen_galaxies(T, blobsrcs, blobmap, targetwcs, cat)
refmap = get_blobiter_ref_map(refstars, T_clusters, less_masking, targetwcs)
# Create the iterator over blobs to process
blobiter = _blob_iter(brickname, blobslices, blobsrcs, blobmap, targetwcs, tims,
cat, bands, plots, ps, reoptimize, iterative, use_ceres,
refmap, large_galaxies_force_pointsource, less_masking, brick,
frozen_galaxies,
skipblobs=skipblobs,
single_thread=(mp is None or mp.pool is None),
max_blobsize=max_blobsize, custom_brick=custom_brick)
# to allow timingpool to queue tasks one at a time
blobiter = iterwrapper(blobiter, len(blobsrcs))
if checkpoint_filename is None:
R.extend(mp.map(_bounce_one_blob, blobiter))
else:
from astrometry.util.ttime import CpuMeas
# Begin running one_blob on each blob...
Riter = mp.imap_unordered(_bounce_one_blob, blobiter)
# measure wall time and write out checkpoint file periodically.
last_checkpoint = CpuMeas()
n_finished = 0
n_finished_total = 0
while True:
import multiprocessing
# Time to write a checkpoint file? (And have something to write?)
tnow = CpuMeas()
dt = tnow.wall_seconds_since(last_checkpoint)
if dt >= checkpoint_period and n_finished > 0:
# Write checkpoint!
debug('Writing', n_finished, 'new results; total for this run', n_finished_total)
try:
_write_checkpoint(R, checkpoint_filename)
last_checkpoint = tnow
dt = 0.
n_finished = 0
except:
print('Failed to write checkpoint file', checkpoint_filename)
import traceback
traceback.print_exc()
# Wait for results (with timeout)
try:
if mp.pool is not None:
timeout = max(1, checkpoint_period - dt)
r = Riter.next(timeout)
else:
r = next(Riter)
R.append(r)
n_finished += 1
n_finished_total += 1
except StopIteration:
break
except multiprocessing.TimeoutError:
continue
# Write checkpoint when done!
_write_checkpoint(R, checkpoint_filename)
debug('Got', n_finished_total, 'results; wrote', len(R), 'to checkpoint')
debug('Fitting sources:', Time()-tlast)
# Repackage the results from one_blob...
# one_blob can change the number and types of sources.
# Reorder the sources:
assert(len(R) == len(blobsrcs))
# drop brickname,iblob
R = [r['result'] for r in R]
# Drop now-empty blobs.
R = [r for r in R if r is not None and len(r)]
if len(R) == 0:
raise NothingToDoError('No sources passed significance tests.')
# Merge results R into one big table
BB = merge_tables(R)
del R
# Pull out the source indices...
II = BB.Isrcs
newcat = BB.sources
# ... and make the table T parallel with BB.
# For iterative sources:
n_iter = np.sum(II < 0)
if n_iter:
n_old = len(T)
# first have to pad T with some new entries...
Tnew = fits_table()
Tnew.iterative = np.ones(n_iter, bool)
Tnew.ref_cat = np.array([' '] * len(Tnew))
T = merge_tables([T, Tnew], columns='fillzero')
# ... and then point II at them.
II[II < 0] = n_old + np.arange(n_iter)
else:
T.iterative = np.zeros(len(T), bool)
assert(np.all(II >= 0))
assert(np.all(II < len(T)))
assert(len(np.unique(II)) == len(II))
T.cut(II)
assert(len(T) == len(BB))
del BB.Isrcs
# Drop sources that exited the blob as a result of fitting.
left_blob = np.logical_and(BB.started_in_blob,
np.logical_not(BB.finished_in_blob))
I, = np.nonzero(np.logical_not(left_blob))
if len(I) < len(BB):
debug('Dropping', len(BB)-len(I), 'sources that exited their blobs during fitting')
BB.cut(I)
T.cut(I)
newcat = [newcat[i] for i in I]
assert(len(T) == len(BB))
assert(len(T) == len(newcat))
info('Old catalog:', len(cat))
info('New catalog:', len(newcat))
assert(len(newcat) > 0)
ns,nb = BB.fracflux.shape
assert(ns == len(newcat))
assert(nb == len(bands))
ns,nb = BB.fracmasked.shape
assert(ns == len(newcat))
assert(nb == len(bands))
ns,nb = BB.fracin.shape
assert(ns == len(newcat))
assert(nb == len(bands))
ns,nb = BB.rchisq.shape
assert(ns == len(newcat))
assert(nb == len(bands))
ns,nb = BB.dchisq.shape
assert(ns == len(newcat))
assert(nb == 5) # psf, rex, dev, exp, ser
# We want to order sources (and assign objids) so that sources outside the brick
# are at the end, and T_dup sources are included.
# Grab source positions
T.ra = np.array([src.getPosition().ra for src in newcat])
T.dec = np.array([src.getPosition().dec for src in newcat])
# Copy blob results to table T
for k in ['fracflux', 'fracin', 'fracmasked', 'rchisq',
'cpu_arch', 'cpu_source', 'cpu_blob',
'blob_width', 'blob_height', 'blob_npix',
'blob_nimages', 'blob_totalpix',
'blob_symm_width', 'blob_symm_height', 'blob_symm_npix',
'blob_symm_nimages', 'bx0', 'by0',
'hit_limit', 'hit_ser_limit', 'hit_r_limit',
'dchisq',
'force_keep_source', 'fit_background', 'forced_pointsource']:
T.set(k, BB.get(k))
T.regular = np.ones(len(T), bool)
T.dup = np.zeros(len(T), bool)
Tall = [T]
dup_cat = []
if T_dup:
from legacypipe.survey import GaiaSource
T_dup.type = np.array(['DUP']*len(T_dup))
T_dup.dup = np.ones(len(T_dup), bool)
Tall.append(T_dup)
# re-create source objects for DUP stars
for g in T_dup:
src = GaiaSource.from_catalog(g, bands)
src.brightness.setParams([0] * src.brightness.numberOfParams())
dup_cat.append(src)
if T_refbail:
Tall.append(T_refbail)
dup_cat.extend([None] * len(T_refbail))
if len(Tall) > 1:
T = merge_tables(Tall, columns='fillzero')
T_dup = None
del T_refbail
_,bx,by = targetwcs.radec2pixelxy(T.ra, T.dec)
T.bx = (bx - 1.).astype(np.float32)
T.by = (by - 1.).astype(np.float32)
T.ibx = np.round(T.bx).astype(np.int32)
T.iby = np.round(T.by).astype(np.int32)
T.in_bounds = ((T.ibx >= 0) * (T.iby >= 0) * (T.ibx < W) * (T.iby < H))
# DUP sources are Gaia/Tycho-2 stars, so fill in bx0=bx.
T.bx0[T.dup] = T.bx[T.dup]
T.by0[T.dup] = T.by[T.dup]
# Order sources by RA.
# (Here we're just setting 'objid', not actually reordering arrays.)
# (put all the regular * in_bounds sources, then dup in-bound, then oob)
I = np.argsort(T.ra + (-2000 * T.in_bounds) + (-1000 * T.regular))
T.objid = np.empty(len(T), np.int32)
T.objid[I] = np.arange(len(T))
# Extend catalog with sources for T_dup entries
cat = Catalog(*(newcat + dup_cat))
# freeze DUP entries (so that number of catalog parameters is corrrect)
for i in range(len(newcat), len(cat)):
cat.freezeParam(i)
del newcat
del dup_cat
assert(len(cat) == len(T))
invvars = np.hstack(BB.srcinvvars)
assert(cat.numberOfParams() == len(invvars))
# NOTE that "BB" can now be shorter than cat and T.
assert(np.sum(T.regular) == len(BB))
# We assume below (when unpacking BB for all-models) that the
# "regular" entries are at the beginning of T.
# Set blob numbers
T.blob = np.empty(len(T), np.int32)
T.blob[:] = -1
T.blob[T.in_bounds] = blobmap[T.iby[T.in_bounds], T.ibx[T.in_bounds]]
# Renumber blobs to make them contiguous.
goodblobs = (T.blob > -1)
oldblobs = T.blob[goodblobs]
_,iblob = np.unique(oldblobs, return_inverse=True)
T.blob[goodblobs] = iblob
del goodblobs
# Renumber blobmap to match T.blob
remap = np.empty(blobmap.max() + 2, np.int32)
# dropped blobs -> -1
remap[:] = -1
# (this +1 business is because we're using a numpy array for the map)
remap[oldblobs + 1] = iblob
blobmap = remap[blobmap+1]
del iblob, oldblobs
# Frozen galaxies: update blob numbers.
# while remapping, flip from blob->[srcs] to src->[blobs].
fro_gals = {}
for b,gals in frozen_galaxies.items():
for gal in gals:
if not gal in fro_gals:
fro_gals[gal] = []
bnew = remap[b+1]
if bnew != -1:
fro_gals[gal].append(bnew)
frozen_galaxies = fro_gals
del remap
# How many sources in each blob?
from collections import Counter
ninblob = Counter(T.blob)
ninblob[-1] = 0
T.ninblob = np.array([ninblob[b] for b in T.blob]).astype(np.int32)
del ninblob
# write out blob map
if write_metrics:
from legacypipe.utils import copy_header_with_wcs
hdr = copy_header_with_wcs(version_header, targetwcs)
hdr.add_record(dict(name='IMTYPE', value='blobmap',
comment='LegacySurveys image type'))
with survey.write_output('blobmap', brick=brickname, shape=blobmap.shape) as out:
out.fits.write(blobmap, header=hdr)
T.brickid = np.zeros(len(T), np.int32) + brickid
T.brickname = np.array([brickname] * len(T))
if write_metrics or get_all_models:
from legacypipe.format_catalog import format_all_models
TT,hdr = format_all_models(T, cat, BB, bands, survey.allbands,
force_keep=T.force_keep_source)
if get_all_models:
all_models = TT
if write_metrics:
primhdr = fitsio.FITSHDR()
for r in version_header.records():
primhdr.add_record(r)
primhdr.add_record(dict(name='PRODTYPE', value='catalog',
comment='NOAO data product type'))
with survey.write_output('all-models', brick=brickname) as out:
TT[np.argsort(TT.objid)].writeto(None, fits_object=out.fits, header=hdr,
primheader=primhdr)
keys = ['cat', 'invvars', 'T', 'blobmap', 'refmap', 'version_header',
'frozen_galaxies', 'T_dup']
if get_all_models:
keys.append('all_models')
if bailout:
keys.extend(['bailout_mask'])
L = locals()
rtn = dict([(k,L[k]) for k in keys])
return rtn
# Also called by farm.py
def get_blobiter_ref_map(refstars, T_clusters, less_masking, targetwcs):
if refstars:
from legacypipe.reference import get_reference_map
refs = refstars[refstars.donotfit == False]
if T_clusters is not None:
refs = merge_tables([refs, T_clusters], columns='fillzero')
refmap = get_reference_map(targetwcs, refs)
del refs
else:
HH, WW = targetwcs.shape
refmap = np.zeros((int(HH), int(WW)), np.uint8)
return refmap
# Also called by farm.py
def get_frozen_galaxies(T, blobsrcs, blobmap, targetwcs, cat):
# Find reference (frozen) large galaxies that touch blobs that
# they are not part of, to get their profiles subtracted.
# Generate a blob -> [sources] mapping.
frozen_galaxies = {}
cols = T.get_columns()
if not ('islargegalaxy' in cols and 'freezeparams' in cols):
return frozen_galaxies
Igals = np.flatnonzero(T.islargegalaxy * T.freezeparams)
if len(Igals) == 0:
return frozen_galaxies
from legacypipe.reference import get_reference_map
debug('Found', len(Igals), 'frozen large galaxies')
# create map in pixel space for each one.
for ii in Igals:
# length-1 table
refgal = T[np.array([ii])].copy()
refgal.radius_pix *= 2
galmap = get_reference_map(targetwcs, refgal)
galblobs = set(blobmap[galmap > 0])
debug('galaxy mask overlaps blobs:', galblobs)
galblobs.discard(-1)
debug('source:', cat[ii])
if refgal.in_bounds:
# If in-bounds, remove the blob that this source is
# already part of, if it exists; it will get processed
# within that blob.
for ib,bsrcs in enumerate(blobsrcs):
if ii in bsrcs:
if ib in galblobs:
debug('in bounds; removing frozen-galaxy entry for blob', ib, 'bsrcs', bsrcs)
galblobs.remove(ib)
else:
# Otherwise, remove this from any 'blobsrcs' members it is
# part of -- this can happen when we clip a source
# position outside the brick to the brick bounds and that
# happens to touch a blob.
for j,bsrcs in enumerate(blobsrcs):
if ii in bsrcs:
blobsrcs[j] = bsrcs[bsrcs != ii]
debug('removed source', ii, 'from blob', j, 'blobsrcs', bsrcs, '->', blobsrcs[j])
for blob in galblobs:
if not blob in frozen_galaxies:
frozen_galaxies[blob] = []
frozen_galaxies[blob].append(cat[ii])
return frozen_galaxies
def _get_bailout_mask(blobmap, skipblobs, targetwcs, W, H, brick, blobslices):
maxblob = blobmap.max()
# mark all as bailed out...
bmap = np.ones(maxblob+2, bool)
# except no-blob
bmap[0] = False
# and blobs from the checkpoint file
for i in skipblobs:
bmap[i+1] = False
# and blobs that are completely outside the primary region of this brick.
U = find_unique_pixels(targetwcs, W, H, None,
brick.ra1, brick.ra2, brick.dec1, brick.dec2)
for iblob in np.unique(blobmap):
if iblob == -1:
continue
if iblob in skipblobs:
continue
bslc = blobslices[iblob]
blobmask = (blobmap[bslc] == iblob)
if np.all(U[bslc][blobmask] == False):
debug('Blob', iblob, 'is completely outside the PRIMARY region')
bmap[iblob+1] = False
bailout_mask = bmap[blobmap+1]
return bailout_mask
def _write_checkpoint(R, checkpoint_filename):
from astrometry.util.file import pickle_to_file, trymakedirs
d = os.path.dirname(checkpoint_filename)
if len(d) and not os.path.exists(d):
trymakedirs(d)
fn = checkpoint_filename + '.tmp'
pickle_to_file(R, fn)
os.rename(fn, checkpoint_filename)
debug('Wrote checkpoint to', checkpoint_filename)
def _check_checkpoints(R, blobslices, brickname):
# Check that checkpointed blobids match our current set of blobs,
# based on blob bounding-box. This can fail if the code changes
# between writing & reading the checkpoint, resulting in a
# different set of detected sources.
keepR = []
for ri in R:
brick = ri['brickname']
iblob = ri['iblob']
r = ri['result']
if brick != brickname:
print('Checkpoint brick mismatch:', brick, brickname)
continue
if r is None:
pass
else:
if r.iblob != iblob:
print('Checkpoint iblob mismatch:', r.iblob, iblob)
continue
if iblob >= len(blobslices):
print('Checkpointed iblob', iblob, 'is too large! (>= %i)' % len(blobslices))
continue
if len(r) == 0:
pass
else:
# expected bbox:
sy,sx = blobslices[iblob]
by0,by1,bx0,bx1 = sy.start, sy.stop, sx.start, sx.stop
# check bbox
rx0,ry0 = r.blob_x0[0], r.blob_y0[0]
rx1,ry1 = rx0 + r.blob_width[0], ry0 + r.blob_height[0]
if rx0 != bx0 or ry0 != by0 or rx1 != bx1 or ry1 != by1:
print('Checkpointed blob bbox', [rx0,rx1,ry0,ry1],
'does not match expected', [bx0,bx1,by0,by1], 'for iblob', iblob)
continue
keepR.append(ri)
return keepR
def _blob_iter(brickname, blobslices, blobsrcs, blobmap, targetwcs, tims, cat, bands,
plots, ps, reoptimize, iterative, use_ceres, refmap,
large_galaxies_force_pointsource, less_masking,
brick, frozen_galaxies, single_thread=False,
skipblobs=None, max_blobsize=None, custom_brick=False):
'''
*blobmap*: map, with -1 indicating no-blob, other values indexing *blobslices*,*blobsrcs*.
'''
from collections import Counter
if skipblobs is None:
skipblobs = []
# sort blobs by size so that larger ones start running first
blobvals = Counter(blobmap[blobmap>=0])
blob_order = np.array([b for b,npix in blobvals.most_common()])
del blobvals
if custom_brick:
U = None
else:
H,W = targetwcs.shape
U = find_unique_pixels(targetwcs, W, H, None,
brick.ra1, brick.ra2, brick.dec1, brick.dec2)
for nblob,iblob in enumerate(blob_order):
if iblob in skipblobs:
info('Skipping blob', iblob)
continue
bslc = blobslices[iblob]
Isrcs = blobsrcs [iblob]
assert(len(Isrcs) > 0)
# blob bbox in target coords
sy,sx = bslc
by0,by1 = sy.start, sy.stop
bx0,bx1 = sx.start, sx.stop
blobh,blobw = by1 - by0, bx1 - bx0
# Here we assume the "blobmap" array has been remapped so that
# -1 means "no blob", while 0 and up label the blobs, thus
# iblob equals the value in the "blobmap" map.
blobmask = (blobmap[bslc] == iblob)
# at least one pixel should be set!
assert(np.any(blobmask))
if U is not None:
# If the blob is solely outside the unique region of this brick,
# skip it!
if np.all(U[bslc][blobmask] == False):
info('Blob', nblob+1, 'is completely outside the unique region of this brick -- skipping')
yield (brickname, iblob, None)
continue
# find one pixel within the blob, for debugging purposes
onex = oney = None
for y in range(by0, by1):
ii = np.flatnonzero(blobmask[y-by0,:])
if len(ii) == 0:
continue
onex = bx0 + ii[0]
oney = y
break
npix = np.sum(blobmask)
info(('Blob %i of %i, id: %i, sources: %i, size: %ix%i, npix %i, brick X: %i,%i, ' +
'Y: %i,%i, one pixel: %i %i') %
(nblob+1, len(blobslices), iblob, len(Isrcs), blobw, blobh, npix,
bx0,bx1,by0,by1, onex,oney))
if max_blobsize is not None and npix > max_blobsize:
info('Number of pixels in blob,', npix, ', exceeds max blobsize', max_blobsize)
yield (brickname, iblob, None)
continue
# Here we cut out subimages for the blob...
rr,dd = targetwcs.pixelxy2radec([bx0,bx0,bx1,bx1],[by0,by1,by1,by0])
subtimargs = []
for tim in tims:
h,w = tim.shape
_,x,y = tim.subwcs.radec2pixelxy(rr,dd)
sx0,sx1 = x.min(), x.max()
sy0,sy1 = y.min(), y.max()
#print('blob extent in pixel space of', tim.name, ': x',
# (sx0,sx1), 'y', (sy0,sy1), 'tim shape', (h,w))
if sx1 < 0 or sy1 < 0 or sx0 > w or sy0 > h:
continue
sx0 = int(np.clip(int(np.floor(sx0)), 0, w-1))
sx1 = int(np.clip(int(np.ceil (sx1)), 0, w-1)) + 1
sy0 = int(np.clip(int(np.floor(sy0)), 0, h-1))
sy1 = int(np.clip(int(np.ceil (sy1)), 0, h-1)) + 1
subslc = slice(sy0,sy1),slice(sx0,sx1)
subimg = tim.getImage ()[subslc]
subie = tim.getInvError()[subslc]
if tim.dq is None:
subdq = None
else:
subdq = tim.dq[subslc]
subwcs = tim.getWcs().shifted(sx0, sy0)
subsky = tim.getSky().shifted(sx0, sy0)
subpsf = tim.getPsf().getShifted(sx0, sy0)
subwcsobj = tim.subwcs.get_subimage(sx0, sy0, sx1-sx0, sy1-sy0)
tim.imobj.psfnorm = tim.psfnorm
tim.imobj.galnorm = tim.galnorm
# FIXME -- maybe the cache is worth sending?
if hasattr(tim.psf, 'clear_cache'):
tim.psf.clear_cache()
# Yuck! If we not running with --threads AND oneblob.py modifies the data,
# bad things happen!
if single_thread:
subimg = subimg.copy()
subie = subie.copy()
subdq = subdq.copy()
subtimargs.append((subimg, subie, subdq, subwcs, subwcsobj,
tim.getPhotoCal(),
subsky, subpsf, tim.name, tim.band, tim.sig1, tim.imobj))
yield (brickname, iblob,
(nblob, iblob, Isrcs, targetwcs, bx0, by0, blobw, blobh,
blobmask, subtimargs, [cat[i] for i in Isrcs], bands, plots, ps,
reoptimize, iterative, use_ceres, refmap[bslc],
large_galaxies_force_pointsource, less_masking,
frozen_galaxies.get(iblob, [])))
def _bounce_one_blob(X):
''' This just wraps the one_blob function, for debugging &
multiprocessing purposes.
'''
from legacypipe.oneblob import one_blob
(brickname, iblob, X) = X
try:
result = one_blob(X)
### This defines the format of the results in the checkpoints files
return dict(brickname=brickname, iblob=iblob, result=result)
except:
import traceback
print('Exception in one_blob: brick %s, iblob %i' % (brickname, iblob))
traceback.print_exc()
raise
def _get_mod(X):
from tractor import Tractor
(tim, srcs) = X
t0 = Time()
tractor = Tractor([tim], srcs)
mod = tractor.getModelImage(0)
debug('Getting model for', tim, ':', Time()-t0)
if hasattr(tim.psf, 'clear_cache'):
tim.psf.clear_cache()
return mod
def _get_both_mods(X):
from astrometry.util.resample import resample_with_wcs, OverlapError
from astrometry.util.miscutils import get_overlapping_region
(tim, srcs, srcblobs, blobmap, targetwcs, frozen_galaxies, ps, plots) = X
mod = np.zeros(tim.getModelShape(), np.float32)
blobmod = np.zeros(tim.getModelShape(), np.float32)
assert(len(srcs) == len(srcblobs))
### modelMasks during fitblobs()....?
try:
Yo,Xo,Yi,Xi,_ = resample_with_wcs(tim.subwcs, targetwcs)
except OverlapError:
return None,None
timblobmap = np.empty(mod.shape, blobmap.dtype)
timblobmap[:,:] = -1
timblobmap[Yo,Xo] = blobmap[Yi,Xi]
del Yo,Xo,Yi,Xi
srcs_blobs = list(zip(srcs, srcblobs))
fro_rd = set()
if frozen_galaxies is not None:
from tractor.patch import ModelMask
timblobs = set(timblobmap.ravel())
timblobs.discard(-1)
h,w = tim.shape
mm = ModelMask(0, 0, w, h)
for fro,bb in frozen_galaxies.items():
# Does this source (which touches blobs bb) touch any blobs in this tim?
touchedblobs = timblobs.intersection(bb)
if len(touchedblobs) == 0:
continue
patch = fro.getModelPatch(tim, modelMask=mm)
if patch is None:
continue
patch.addTo(mod)
assert(patch.shape == mod.shape)
# np.isin doesn't work with a *set* argument!
blobmask = np.isin(timblobmap, list(touchedblobs))
blobmod += patch.patch * blobmask
if plots:
import pylab as plt
plt.clf()
plt.imshow(blobmask, interpolation='nearest', origin='lower', vmin=0, vmax=1,
cmap='gray')
plt.title('tim %s: frozen-galaxy blobmask' % tim.name)
ps.savefig()
plt.clf()
plt.imshow(patch.patch, interpolation='nearest', origin='lower',
cmap='gray')
plt.title('tim %s: frozen-galaxy patch' % tim.name)
ps.savefig()
# Drop this frozen galaxy from the catalog to render, if it is present
# (ie, if it is in_bounds)
fro_rd.add((fro.pos.ra, fro.pos.dec))
NEA = []
no_nea = [0.,0.,0.]
pcal = tim.getPhotoCal()
for src,srcblob in srcs_blobs:
if src is None:
NEA.append(no_nea)
continue
if (src.pos.ra, src.pos.dec) in fro_rd:
# Skip frozen galaxy source (here we choose not to compute NEA)
NEA.append(no_nea)
continue
patch = src.getModelPatch(tim)
if patch is None:
NEA.append(no_nea)
continue
# From patch.addTo() -- find pixel overlap region
(ih, iw) = mod.shape
(ph, pw) = patch.shape
(outx, inx) = get_overlapping_region(
patch.x0, patch.x0 + pw - 1, 0, iw - 1)
(outy, iny) = get_overlapping_region(
patch.y0, patch.y0 + ph - 1, 0, ih - 1)
if inx == [] or iny == []:
NEA.append(no_nea)
continue
# model image patch
p = patch.patch[iny, inx]
# add to model image
mod[outy, outx] += p
# mask by blob map
maskedp = p * (timblobmap[outy,outx] == srcblob)
# add to blob-masked image
blobmod[outy, outx] += maskedp
# per-image NEA computations
# total flux
flux = pcal.brightnessToCounts(src.brightness)
# flux in patch
pflux = np.sum(p)
# weighting -- fraction of flux that is in the patch
fracin = pflux / flux
# nea
if pflux == 0: # sum(p**2) can only be zero if all(p==0), and then pflux==0
nea = 0.
else:
nea = pflux**2 / np.sum(p**2)
mpsq = np.sum(maskedp**2)
if mpsq == 0 or pflux == 0:
mnea = 0.
else:
mnea = flux**2 / mpsq
NEA.append([nea, mnea, fracin])
if hasattr(tim.psf, 'clear_cache'):
tim.psf.clear_cache()
return mod, blobmod, NEA
def stage_coadds(survey=None, bands=None, version_header=None, targetwcs=None,
tims=None, ps=None, brickname=None, ccds=None,
custom_brick=False,
T=None,
refstars=None,
blobmap=None,
cat=None, pixscale=None, plots=False,
coadd_bw=False, brick=None, W=None, H=None, lanczos=True,
co_sky=None,
saturated_pix=None,
refmap=None,
frozen_galaxies=None,
bailout_mask=None,
coadd_headers={},
mp=None,
record_event=None,
**kwargs):
'''
After the `stage_fitblobs` fitting stage, we have all the source
model fits, and we can create coadds of the images, model, and
residuals. We also perform aperture photometry in this stage.
'''
from functools import reduce
from legacypipe.survey import apertures_arcsec
from legacypipe.bits import IN_BLOB
record_event and record_event('stage_coadds: starting')
_add_stage_version(version_header, 'COAD', 'coadds')
tlast = Time()
# Write per-brick CCDs table
primhdr = fitsio.FITSHDR()
for r in version_header.records():
primhdr.add_record(r)
primhdr.add_record(dict(name='PRODTYPE', value='ccdinfo',
comment='NOAO data product type'))
with survey.write_output('ccds-table', brick=brickname) as out:
ccds.writeto(None, fits_object=out.fits, primheader=primhdr)
if plots and False:
import pylab as plt
from astrometry.util.plotutils import dimshow
cat_init = [src for it,src in zip(T.iterative, cat) if not(it)]
cat_iter = [src for it,src in zip(T.iterative, cat) if it]
info(len(cat_init), 'initial sources and', len(cat_iter), 'iterative')
mods_init = mp.map(_get_mod, [(tim, cat_init) for tim in tims])
mods_iter = mp.map(_get_mod, [(tim, cat_iter) for tim in tims])
coimgs_init,_ = quick_coadds(tims, bands, targetwcs, images=mods_init)
coimgs_iter,_ = quick_coadds(tims, bands, targetwcs, images=mods_iter)
coimgs,_ = quick_coadds(tims, bands, targetwcs)
plt.clf()
rgb,kw = survey.get_rgb(coimgs, bands)
dimshow(rgb, **kw)
plt.title('First-round data')
ps.savefig()
plt.clf()
rgb,kw = survey.get_rgb(coimgs_init, bands)
dimshow(rgb, **kw)
plt.title('First-round model fits')
ps.savefig()
plt.clf()
rgb,kw = survey.get_rgb([img-mod for img,mod in zip(coimgs,coimgs_init)], bands)
dimshow(rgb, **kw)
plt.title('First-round residuals')
ps.savefig()
plt.clf()
rgb,kw = survey.get_rgb(coimgs_iter, bands)
dimshow(rgb, **kw)
plt.title('Iterative model fits')
ps.savefig()
plt.clf()
rgb,kw = survey.get_rgb([mod+mod2 for mod,mod2 in zip(coimgs_init, coimgs_iter)], bands)
dimshow(rgb, **kw)
plt.title('Initial + Iterative model fits')
ps.savefig()
plt.clf()
rgb,kw = survey.get_rgb([img-mod-mod2 for img,mod,mod2 in zip(coimgs,coimgs_init,coimgs_iter)], bands)
dimshow(rgb, **kw)
plt.title('Iterative model residuals')
ps.savefig()
# Render model images...
record_event and record_event('stage_coadds: model images')
# Re-add the blob that this galaxy is actually inside
# (that blob got dropped way earlier, before fitblobs)
if frozen_galaxies is not None:
for src,bb in frozen_galaxies.items():
_,xx,yy = targetwcs.radec2pixelxy(src.pos.ra, src.pos.dec)
xx = int(xx-1)
yy = int(yy-1)
bh,bw = blobmap.shape
if xx >= 0 and xx < bw and yy >= 0 and yy < bh:
# in bounds!
debug('Frozen galaxy', src, 'lands in blob', blobmap[yy,xx])
if blobmap[yy,xx] != -1:
bb.append(blobmap[yy,xx])
Ireg = np.flatnonzero(T.regular)
Nreg = len(Ireg)
bothmods = mp.map(_get_both_mods, [(tim, [cat[i] for i in Ireg], T.blob[Ireg], blobmap,
targetwcs, frozen_galaxies, ps, plots)
for tim in tims])
mods = [r[0] for r in bothmods]
blobmods = [r[1] for r in bothmods]
NEA = [r[2] for r in bothmods]
NEA = np.array(NEA)
# NEA shape (tims, srcs, 3:[nea, blobnea, weight])
neas = NEA[:,:,0]
blobneas = NEA[:,:,1]
nea_wts = NEA[:,:,2]
del bothmods, NEA
tnow = Time()
debug('Model images:', tnow-tlast)
tlast = tnow
# source pixel positions to probe depth maps, etc
ixy = (np.clip(T.ibx, 0, W-1).astype(int), np.clip(T.iby, 0, H-1).astype(int))
# convert apertures to pixels
apertures = apertures_arcsec / pixscale
# Aperture photometry locations
apxy = np.vstack((T.bx, T.by)).T
record_event and record_event('stage_coadds: coadds')
C = make_coadds(tims, bands, targetwcs, mods=mods, blobmods=blobmods,
xy=ixy,
ngood=True, detmaps=True, psfsize=True, allmasks=True,
lanczos=lanczos,
apertures=apertures, apxy=apxy,
callback=write_coadd_images,
callback_args=(survey, brickname, version_header, tims,
targetwcs, co_sky, coadd_headers),
plots=plots, ps=ps, mp=mp)
record_event and record_event('stage_coadds: extras')
# Coadds of galaxy sims only, image only
if hasattr(tims[0], 'sims_image'):
sims_mods = [tim.sims_image for tim in tims]
T_sims_coadds = make_coadds(tims, bands, targetwcs, mods=sims_mods,
lanczos=lanczos, mp=mp)
sims_coadd = T_sims_coadds.comods
del T_sims_coadds
image_only_mods= [tim.data-tim.sims_image for tim in tims]
make_coadds(tims, bands, targetwcs, mods=image_only_mods,
lanczos=lanczos, mp=mp)
###
# Save per-source measurements of the maps produced during coadding
cols = ['nobs', 'ngood', 'anymask', 'allmask', 'psfsize', 'psfdepth', 'galdepth',
'mjd_min', 'mjd_max']
# store galaxy sim bounding box in Tractor cat
if 'sims_xy' in C.T.get_columns():
cols.append('sims_xy')
for c in cols:
T.set(c, C.T.get(c))
# average NEA stats per band -- after psfsize,psfdepth computed.
# first init all bands expected by format_catalog
for band in survey.allbands:
T.set('nea_%s' % band, np.zeros(len(T), np.float32))
T.set('blob_nea_%s' % band, np.zeros(len(T), np.float32))
for iband,band in enumerate(bands):
num = np.zeros(Nreg, np.float32)
den = np.zeros(Nreg, np.float32)
bnum = np.zeros(Nreg, np.float32)
for tim,nea,bnea,nea_wt in zip(
tims, neas, blobneas, nea_wts):
if not tim.band == band:
continue
iv = 1./(tim.sig1**2)
I, = np.nonzero(nea)
wt = nea_wt[I]
num[I] += iv * wt * 1./(nea[I] * tim.imobj.pixscale**2)
den[I] += iv * wt
I, = np.nonzero(bnea)
bnum[I] += iv * 1./bnea[I]
# bden is the coadded per-pixel inverse variance derived from psfdepth and psfsize
# this ends up in arcsec units, not pixels
bden = T.psfdepth[Ireg,iband] * (4 * np.pi * (T.psfsize[Ireg,iband]/2.3548)**2)
# numerator and denominator are for the inverse-NEA!
with np.errstate(divide='ignore', invalid='ignore'):
nea = den / num
bnea = bden / bnum
nea [np.logical_not(np.isfinite(nea ))] = 0.
bnea[np.logical_not(np.isfinite(bnea))] = 0.
# Set vals in T
T.get('nea_%s' % band)[Ireg] = nea
T.get('blob_nea_%s' % band)[Ireg] = bnea
# Grab aperture fluxes
assert(C.AP is not None)
# How many apertures?
A = len(apertures_arcsec)
for src,dst in [('apflux_img_%s', 'apflux'),
('apflux_img_ivar_%s', 'apflux_ivar'),
('apflux_masked_%s', 'apflux_masked'),
('apflux_resid_%s', 'apflux_resid'),
('apflux_blobresid_%s', 'apflux_blobresid'),]:
X = np.zeros((len(T), len(bands), A), np.float32)
for iband,band in enumerate(bands):
X[:,iband,:] = C.AP.get(src % band)
T.set(dst, X)
# Compute depth histogram
D = _depth_histogram(brick, targetwcs, bands, C.psfdetivs, C.galdetivs)
with survey.write_output('depth-table', brick=brickname) as out:
D.writeto(None, fits_object=out.fits)
del D
# Create JPEG coadds
coadd_list= [('image', C.coimgs, {}),
('model', C.comods, {}),
('blobmodel', C.coblobmods, {}),
('resid', C.coresids, dict(resids=True))]
if hasattr(tims[0], 'sims_image'):
coadd_list.append(('simscoadd', sims_coadd, {}))
for name,ims,rgbkw in coadd_list:
rgb,kwa = survey.get_rgb(ims, bands, **rgbkw)
with survey.write_output(name + '-jpeg', brick=brickname) as out:
imsave_jpeg(out.fn, rgb, origin='lower', **kwa)
info('Wrote', out.fn)
del rgb
# Construct the maskbits map
maskbits = np.zeros((H,W), np.int32)
# !PRIMARY
if not custom_brick:
U = find_unique_pixels(targetwcs, W, H, None,
brick.ra1, brick.ra2, brick.dec1, brick.dec2)
maskbits |= MASKBITS['NPRIMARY'] * np.logical_not(U).astype(np.int32)
del U
# BRIGHT
if refmap is not None:
maskbits |= MASKBITS['BRIGHT'] * ((refmap & IN_BLOB['BRIGHT'] ) > 0)
maskbits |= MASKBITS['MEDIUM'] * ((refmap & IN_BLOB['MEDIUM'] ) > 0)
maskbits |= MASKBITS['GALAXY'] * ((refmap & IN_BLOB['GALAXY'] ) > 0)
maskbits |= MASKBITS['CLUSTER'] * ((refmap & IN_BLOB['CLUSTER']) > 0)
del refmap
# SATUR
if saturated_pix is not None:
for b, sat in zip(bands, saturated_pix):
key = 'SATUR_' + b.upper()
if key in MASKBITS:
maskbits |= (MASKBITS[key] * sat).astype(np.int32)
# ALLMASK_{g,r,z}
for b,allmask in zip(bands, C.allmasks):
key = 'ALLMASK_' + b.upper()
if key in MASKBITS:
maskbits |= (MASKBITS[key] * (allmask > 0))
# BAILOUT_MASK
if bailout_mask is not None:
maskbits |= MASKBITS['BAILOUT'] * bailout_mask.astype(bool)
# Add the maskbits header cards to version_header
mbits = [
('NPRIMARY', 'NPRIM', 'not primary brick area'),
('BRIGHT', 'BRIGH', 'bright star nearby'),
('SATUR_G', 'SAT_G', 'g band saturated'),
('SATUR_R', 'SAT_R', 'r band saturated'),
('SATUR_Z', 'SAT_Z', 'z band saturated'),
('ALLMASK_G', 'ALL_G', 'any ALLMASK_G bit set'),
('ALLMASK_R', 'ALL_R', 'any ALLMASK_R bit set'),
('ALLMASK_Z', 'ALL_Z', 'any ALLMASK_Z bit set'),
('WISEM1', 'WISE1', 'WISE W1 (all masks)'),
('WISEM2', 'WISE2', 'WISE W2 (all masks)'),
('BAILOUT', 'BAIL', 'Bailed out processing'),
('MEDIUM', 'MED', 'medium-bright star'),
('GALAXY', 'GAL', 'SGA large galaxy'),
('CLUSTER', 'CLUST', 'Globular cluster')]
version_header.add_record(dict(name='COMMENT', value='maskbits bits:'))
_add_bit_description(version_header, MASKBITS, mbits,
'MB_%s', 'MBIT_%i', 'maskbits')
# Add the fitbits header cards to version_header
fbits = [
('FORCED_POINTSOURCE', 'FPSF', 'forced to be PSF'),
('FIT_BACKGROUND', 'FITBG', 'background levels fit'),
('HIT_RADIUS_LIMIT', 'RLIM', 'hit radius limit during fit'),
('HIT_SERSIC_LIMIT', 'SLIM', 'hit Sersic index limit during fit'),
('FROZEN', 'FROZE', 'parameters were not fit'),
('BRIGHT', 'BRITE', 'bright star'),
('MEDIUM', 'MED', 'medium-bright star'),
('GAIA', 'GAIA', 'Gaia source'),
('TYCHO2', 'TYCHO', 'Tycho-2 star'),
('LARGEGALAXY', 'LGAL', 'SGA large galaxy'),
('WALKER', 'WALK', 'fitting moved pos > 1 arcsec'),
('RUNNER', 'RUN', 'fitting moved pos > 2.5 arcsec'),
('GAIA_POINTSOURCE', 'GPSF', 'Gaia source treated as point source'),
('ITERATIVE', 'ITER', 'source detected during iterative detection'),
]
version_header.add_record(dict(name='COMMENT', value='fitbits bits:'))
_add_bit_description(version_header, FITBITS, fbits,
'FB_%s', 'FBIT_%i', 'fitbits')
if plots:
import pylab as plt
from astrometry.util.plotutils import dimshow
plt.clf()
ra = np.array([src.getPosition().ra for src in cat])
dec = np.array([src.getPosition().dec for src in cat])
x0,y0 = T.bx0, T.by0
ok,x1,y1 = targetwcs.radec2pixelxy(ra, dec)
x1 -= 1.
y1 -= 1.
rgb,kw = survey.get_rgb(C.coimgs, bands)
dimshow(rgb, **kw)
ax = plt.axis()
for xx0,yy0,xx1,yy1 in zip(x0,y0,x1,y1):
plt.plot([xx0,xx1], [yy0,yy1], 'r-')
plt.plot(x1, y1, 'r.')
plt.axis(ax)
plt.title('Original to final source positions')
ps.savefig()
plt.clf()
rgb,kw = survey.get_rgb(C.coimgs, bands)
dimshow(rgb, **kw)
ax = plt.axis()
ps.savefig()
for src,x,y,rr,dd in zip(cat, x1, y1, ra, dec):
from tractor import PointSource
from tractor.galaxy import DevGalaxy, ExpGalaxy
from tractor.sersic import SersicGalaxy
ee = []
ec = []
cc = None
green = (0.2,1,0.2)
if isinstance(src, PointSource):
plt.plot(x, y, 'o', mfc=green, mec='k', alpha=0.6)
elif isinstance(src, ExpGalaxy):
ee = [src.shape]
cc = '0.8'
ec = [cc]
elif isinstance(src, DevGalaxy):
ee = [src.shape]
cc = green
ec = [cc]
elif isinstance(src, SersicGalaxy):
ee = [src.shape]
cc = 'm'
ec = [cc]
else:
print('Unknown type:', src)
continue
for e,c in zip(ee, ec):
G = e.getRaDecBasis()
angle = np.linspace(0, 2.*np.pi, 60)
xy = np.vstack((np.append([0,0,1], np.sin(angle)),
np.append([0,1,0], np.cos(angle)))).T
rd = np.dot(G, xy.T).T
r = rr + rd[:,0] * np.cos(np.deg2rad(dd))
d = dd + rd[:,1]
ok,xx,yy = targetwcs.radec2pixelxy(r, d)
xx -= 1.
yy -= 1.
x1,x2,x3 = xx[:3]
y1,y2,y3 = yy[:3]
plt.plot([x3, x1, x2], [y3, y1, y2], '-', color=c)
plt.plot(x1, y1, '.', color=cc, ms=3, alpha=0.6)
xx = xx[3:]
yy = yy[3:]
plt.plot(xx, yy, '-', color=c)
plt.axis(ax)
ps.savefig()
tnow = Time()
debug('Aperture photometry wrap-up:', tnow-tlast)
return dict(T=T, apertures_pix=apertures,
apertures_arcsec=apertures_arcsec,
maskbits=maskbits,
version_header=version_header)
def _add_bit_description(header, BITS, bits, bnpat, bitpat, bitmapname):
for key,short,comm in bits:
header.add_record(
dict(name=bnpat % short, value=BITS[key],
comment='%s: %s' % (bitmapname, comm)))
revmap = dict([(bit,name) for name,bit in BITS.items()])
nicemap = dict([(k,c) for k,short,c in bits])
for bit in range(16):
bitval = 1<<bit
if not bitval in revmap:
continue
name = revmap[bitval]
nice = nicemap.get(name, '')
header.add_record(
dict(name=bitpat % bit, value=name,
comment='%s bit %i (0x%x): %s' % (bitmapname, bit, bitval, nice)))
def get_fiber_fluxes(cat, T, targetwcs, H, W, pixscale, bands,
fibersize=1.5, seeing=1., year=2020.0,
plots=False, ps=None):
from tractor import GaussianMixturePSF
from legacypipe.survey import LegacySurveyWcs
import astropy.time
from tractor.tractortime import TAITime
from tractor.image import Image
from tractor.basics import LinearPhotoCal
import photutils
# Create a fake tim for each band to construct the models in 1" seeing
# For Gaia stars, we need to give a time for evaluating the models.
mjd_tai = astropy.time.Time(year, format='jyear').tai.mjd
tai = TAITime(None, mjd=mjd_tai)
# 1" FWHM -> pixels FWHM -> pixels sigma -> pixels variance
v = ((seeing / pixscale) / 2.35)**2
data = np.zeros((H,W), np.float32)
inverr = np.ones((H,W), np.float32)
psf = GaussianMixturePSF(1., 0., 0., v, v, 0.)
wcs = LegacySurveyWcs(targetwcs, tai)
faketim = Image(data=data, inverr=inverr, psf=psf,
wcs=wcs, photocal=LinearPhotoCal(1., bands[0]))
# A model image (containing all sources) for each band
modimgs = [np.zeros((H,W), np.float32) for b in bands]
# A blank image that we'll use for rendering the flux from a single model
onemod = data
# Results go here!
fiberflux = np.zeros((len(cat),len(bands)), np.float32)
fibertotflux = np.zeros((len(cat),len(bands)), np.float32)
# Fiber diameter in arcsec -> radius in pix
fiberrad = (fibersize / pixscale) / 2.
# For each source, compute and measure its model, and accumulate
for isrc,src in enumerate(cat):
if src is None:
continue
# This works even if bands[0] has zero flux (or no overlapping
# images)
ums = src.getUnitFluxModelPatches(faketim)
assert(len(ums) == 1)
patch = ums[0]
if patch is None:
continue
br = src.getBrightness()
for iband,(modimg,band) in enumerate(zip(modimgs,bands)):
flux = br.getFlux(band)
flux_iv = T.flux_ivar[isrc, iband]
if flux <= 0 or flux_iv <= 0:
continue
# Accumulate into image containing all models
patch.addTo(modimg, scale=flux)
# Add to blank image & photometer
patch.addTo(onemod, scale=flux)
sx,sy = faketim.getWcs().positionToPixel(src.getPosition())
aper = photutils.CircularAperture((sx, sy), fiberrad)
p = photutils.aperture_photometry(onemod, aper)
f = p.field('aperture_sum')[0]
if not np.isfinite(f):
# If the source is off the brick (eg, ref sources), can be NaN
continue
fiberflux[isrc,iband] = f
# Blank out the image again
x0,x1,y0,y1 = patch.getExtent()
onemod[y0:y1, x0:x1] = 0.
# Now photometer the accumulated images
# Aperture photometry locations
apxy = np.vstack((T.bx, T.by)).T
aper = photutils.CircularAperture(apxy, fiberrad)
for iband,modimg in enumerate(modimgs):
p = photutils.aperture_photometry(modimg, aper)
f = p.field('aperture_sum')
# If the source is off the brick (eg, ref sources), can be NaN
I = np.isfinite(f)
if len(I):
fibertotflux[I, iband] = f[I]
if plots:
import pylab as plt
for modimg,band in zip(modimgs, bands):
plt.clf()
plt.imshow(modimg, interpolation='nearest', origin='lower',
vmin=0, vmax=0.1, cmap='gray')
plt.title('Fiberflux model for band %s' % band)
ps.savefig()
for iband,band in enumerate(bands):
plt.clf()
flux = [src.getBrightness().getFlux(band) for src in cat]
plt.plot(flux, fiberflux[:,iband], 'b.', label='FiberFlux')
plt.plot(flux, fibertotflux[:,iband], 'gx', label='FiberTotFlux')
plt.plot(flux, T.apflux[:,iband, 1], 'r+', label='Apflux(1.5)')
plt.legend()
plt.xlabel('Catalog total flux')
plt.ylabel('Aperture flux')
plt.title('Fiberflux: %s band' % band)
plt.xscale('symlog')
plt.yscale('symlog')
ps.savefig()
return fiberflux, fibertotflux
def _depth_histogram(brick, targetwcs, bands, detivs, galdetivs):
# Compute the brick's unique pixels.
U = None
if hasattr(brick, 'ra1'):
debug('Computing unique brick pixels...')
H,W = targetwcs.shape
U = find_unique_pixels(targetwcs, W, H, None,
brick.ra1, brick.ra2, brick.dec1, brick.dec2)
U = np.flatnonzero(U)
debug(len(U), 'of', W*H, 'pixels are unique to this brick')
# depth histogram bins
depthbins = np.arange(20, 25.001, 0.1)
depthbins[0] = 0.
depthbins[-1] = 100.
D = fits_table()
D.depthlo = depthbins[:-1].astype(np.float32)
D.depthhi = depthbins[1: ].astype(np.float32)
for band,detiv,galdetiv in zip(bands,detivs,galdetivs):
for det,name in [(detiv, 'ptsrc'), (galdetiv, 'gal')]:
# compute stats for 5-sigma detection
with np.errstate(divide='ignore'):
depth = 5. / np.sqrt(det)
# that's flux in nanomaggies -- convert to mag
depth = -2.5 * (np.log10(depth) - 9)
# no coverage -> very bright detection limit
depth[np.logical_not(np.isfinite(depth))] = 0.
if U is not None:
depth = depth.flat[U]
if len(depth):
debug(band, name, 'band depth map: percentiles',
np.percentile(depth, np.arange(0,101, 10)))
# histogram
D.set('counts_%s_%s' % (name, band),
np.histogram(depth, bins=depthbins)[0].astype(np.int32))
return D
def stage_wise_forced(
survey=None,
cat=None,
T=None,
targetwcs=None,
targetrd=None,
W=None, H=None,
pixscale=None,
brickname=None,
unwise_dir=None,
unwise_tr_dir=None,
unwise_modelsky_dir=None,
brick=None,
wise_ceres=True,
unwise_coadds=True,
version_header=None,
maskbits=None,
mp=None,
record_event=None,
wise_checkpoint_filename=None,
wise_checkpoint_period=600,
ps=None,
plots=False,
**kwargs):
'''
After the model fits are finished, we can perform forced
photometry of the unWISE coadds.
'''
from legacypipe.unwise import unwise_phot, collapse_unwise_bitmask, unwise_tiles_touching_wcs
from legacypipe.survey import wise_apertures_arcsec
from tractor import NanoMaggies
record_event and record_event('stage_wise_forced: starting')
_add_stage_version(version_header, 'WISE', 'wise_forced')
if not plots:
ps = None
tiles = unwise_tiles_touching_wcs(targetwcs)
info('Cut to', len(tiles), 'unWISE tiles')
# the way the roiradec box is used, the min/max order doesn't matter
roiradec = [targetrd[0,0], targetrd[2,0], targetrd[0,1], targetrd[2,1]]
# Sources to photometer
do_phot = T.regular.copy()
# Drop sources within the CLUSTER mask from forced photometry.
Icluster = None
if maskbits is not None:
incluster = (maskbits & MASKBITS['CLUSTER'] > 0)
if np.any(incluster):
info('Checking for sources inside CLUSTER mask')
ra = np.array([src.getPosition().ra for src in cat])
dec = np.array([src.getPosition().dec for src in cat])
ok,xx,yy = targetwcs.radec2pixelxy(ra, dec)
xx = np.round(xx - 1).astype(int)
yy = np.round(yy - 1).astype(int)
I = np.flatnonzero(ok * (xx >= 0)*(xx < W) * (yy >= 0)*(yy < H))
if len(I):
Icluster = I[incluster[yy[I], xx[I]]]
info('Found', len(Icluster), 'of', len(cat), 'sources inside CLUSTER mask')
do_phot[Icluster] = False
Nskipped = len(T) - np.sum(do_phot)
wcat = []
for i in np.flatnonzero(do_phot):
src = cat[i]
src = src.copy()
src.setBrightness(NanoMaggies(w=1.))
wcat.append(src)
# use Aaron's WISE pixelized PSF model (unwise_psf repository)?
wpixpsf = True
# Create list of groups-of-tiles to photometer
args = []
# Skip if $UNWISE_COADDS_DIR or --unwise-dir not set.
if unwise_dir is not None:
wtiles = tiles.copy()
wtiles.unwise_dir = np.array([unwise_dir]*len(tiles))
for band in [1,2,3,4]:
get_masks = targetwcs if (band == 1) else None
args.append(((-1,band),
(wcat, wtiles, band, roiradec, wise_ceres, wpixpsf,
unwise_coadds, get_masks, ps, True,
unwise_modelsky_dir)))
# Add time-resolved WISE coadds
# Skip if $UNWISE_COADDS_TIMERESOLVED_DIR or --unwise-tr-dir not set.
eargs = []
if unwise_tr_dir is not None:
tdir = unwise_tr_dir
TR = fits_table(os.path.join(tdir, 'time_resolved_atlas.fits'))
debug('Read', len(TR), 'time-resolved WISE coadd tiles')
TR.cut(np.array([t in tiles.coadd_id for t in TR.coadd_id]))
debug('Cut to', len(TR), 'time-resolved vs', len(tiles), 'full-depth')
assert(len(TR) == len(tiles))
# Ugly -- we need to look up the "{ra,dec}[12]" fields from the non-TR
# table to support unique areas of tiles.
imap = dict((c,i) for i,c in enumerate(tiles.coadd_id))
I = np.array([imap[c] for c in TR.coadd_id])
for c in ['ra1','ra2','dec1','dec2', 'crpix_w1', 'crpix_w2']:
TR.set(c, tiles.get(c)[I])
# How big do we need to make the WISE time-resolved arrays?
debug('TR epoch_bitmask:', TR.epoch_bitmask)
# axis= arg to np.count_nonzero is new in numpy 1.12
Nepochs = max(np.atleast_1d([np.count_nonzero(e)
for e in TR.epoch_bitmask]))
_,ne = TR.epoch_bitmask.shape
info('Max number of time-resolved unWISE epochs for these tiles:', Nepochs)
debug('epoch bitmask length:', ne)
# Add time-resolved coadds
for band in [1,2]:
# W1 is bit 0 (value 0x1), W2 is bit 1 (value 0x2)
bitmask = (1 << (band-1))
# The epoch_bitmask entries are not *necessarily*
# contiguous, and not necessarily aligned for the set of
# overlapping tiles. We will align the non-zero epochs of
# the tiles. (eg, brick 2437p425 vs coadds 2426p424 &
# 2447p424 in NEO-2).
# find the non-zero epochs for each overlapping tile
epochs = np.empty((len(TR), Nepochs), int)
epochs[:,:] = -1
for i in range(len(TR)):
ei = np.flatnonzero(TR.epoch_bitmask[i,:] & bitmask)
epochs[i,:len(ei)] = ei
for ie in range(Nepochs):
# Which tiles have images for this epoch?
I = np.flatnonzero(epochs[:,ie] >= 0)
if len(I) == 0:
continue
debug('Epoch index %i: %i tiles:' % (ie, len(I)), TR.coadd_id[I],
'epoch numbers', epochs[I,ie])
eptiles = TR[I]
eptiles.unwise_dir = np.array([os.path.join(tdir, 'e%03i'%ep)
for ep in epochs[I,ie]])
eargs.append(((ie,band),
(wcat, eptiles, band, roiradec,
wise_ceres, wpixpsf, False, None, ps, False, unwise_modelsky_dir)))
runargs = args + eargs
info('unWISE forced phot: total of', len(runargs), 'images to photometer')
photresults = {}
# Check for existing checkpoint file.
if wise_checkpoint_filename and os.path.exists(wise_checkpoint_filename):
from astrometry.util.file import unpickle_from_file
info('Reading', wise_checkpoint_filename)
try:
photresults = unpickle_from_file(wise_checkpoint_filename)
info('Read', len(photresults), 'results from checkpoint file', wise_checkpoint_filename)
except:
import traceback
print('Failed to read checkpoint file', wise_checkpoint_filename)
traceback.print_exc()
keepargs = [(key,a) for (key,a) in runargs if not key in photresults]
info('Running', len(keepargs), 'of', len(runargs), 'images not in checkpoint')
runargs = keepargs
# Run the forced photometry!
record_event and record_event('stage_wise_forced: photometry')
#phots = mp.map(unwise_phot, args + eargs)
if wise_checkpoint_filename is None or mp is None:
res = mp.map(unwise_phot, runargs)
for k,v in res:
photresults[k] = v
del res
elif len(runargs) > 0:
res = mp.imap_unordered(unwise_phot, runargs)
from astrometry.util.ttime import CpuMeas
import multiprocessing
import concurrent.futures
last_checkpoint = CpuMeas()
n_finished = 0
n_finished_total = 0
while True:
# Time to write a checkpoint file? (And have something to write?)
tnow = CpuMeas()
dt = tnow.wall_seconds_since(last_checkpoint)
if dt >= wise_checkpoint_period and n_finished > 0:
# Write checkpoint!
info('Writing checkpoint:', n_finished, 'new results; total for this run', n_finished_total, 'total:', len(photresults))
try:
_write_checkpoint(photresults, wise_checkpoint_filename)
last_checkpoint = tnow
dt = 0.
n_finished = 0
except:
print('Failed to write checkpoint file', wise_checkpoint_filename)
import traceback
traceback.print_exc()
# Wait for results (with timeout)
try:
info('waiting for result (%i to go)...' % (len(runargs)-n_finished_total))
if mp.pool is not None:
timeout = max(1, wise_checkpoint_period - dt)
# If we don't have any new results to write, wait indefinitely
if n_finished == 0:
timeout = None
r = res.next(timeout)
else:
r = next(res)
k,v = r
info('got result for epoch,band', k)
photresults[k] = v
n_finished += 1
n_finished_total += 1
except StopIteration:
#info('got StopIteration')
break
except multiprocessing.TimeoutError:
#info('got TimeoutError')
continue
except concurrent.futures.TimeoutError:
#info('got MPI TimeoutError')
continue
except TimeoutError:
continue
except:
import traceback
traceback.print_exc()
# Write checkpoint when done!
_write_checkpoint(photresults, wise_checkpoint_filename)
info('Computed', n_finished_total, 'new results; wrote', len(photresults), 'to checkpoint')
phots = [photresults[k] for k,a in (args + eargs)]
record_event and record_event('stage_wise_forced: results')
# Unpack results...
WISE = None
wise_mask_maps = None
if len(phots):
# The "phot" results for the full-depth coadds are one table per
# band. Merge all those columns.
wise_models = []
for i,p in enumerate(phots[:len(args)]):
if p is None:
(wcat,tiles,band) = args[i][:3]
info('"None" result from WISE forced phot:', tiles, band)
continue
if unwise_coadds:
wise_models.extend(p.models)
if p.maskmap is not None:
wise_mask_maps = p.maskmap
if WISE is None:
WISE = p.phot
else:
# remove duplicates
p.phot.delete_column('wise_coadd_id')
# (with move_crpix -- Aaron's update astrometry -- the
# pixel positions can be *slightly* different per
# band. Ignoring that here.)
p.phot.delete_column('wise_x')
p.phot.delete_column('wise_y')
WISE.add_columns_from(p.phot)
if wise_mask_maps is not None:
wise_mask_maps = [
collapse_unwise_bitmask(wise_mask_maps, 1),
collapse_unwise_bitmask(wise_mask_maps, 2)]
if Nskipped > 0:
assert(len(WISE) == len(wcat))
WISE = _fill_skipped_values(WISE, Nskipped, do_phot)
assert(len(WISE) == len(cat))
assert(len(WISE) == len(T))
if unwise_coadds:
from legacypipe.coadds import UnwiseCoadd
# Create the WCS into which we'll resample the tiles.
# Same center as "targetwcs" but bigger pixel scale.
wpixscale = 2.75
rc,dc = targetwcs.radec_center()
ww = int(W * pixscale / wpixscale)
hh = int(H * pixscale / wpixscale)
wcoadds = UnwiseCoadd(rc, dc, ww, hh, wpixscale)
wcoadds.add(wise_models, unique=True)
apphot = wcoadds.finish(survey, brickname, version_header,
apradec=(T.ra,T.dec),
apertures=wise_apertures_arcsec/wpixscale)
api,apd,apr = apphot
for iband,band in enumerate([1,2,3,4]):
WISE.set('apflux_w%i' % band, api[iband])
WISE.set('apflux_resid_w%i' % band, apr[iband])
d = apd[iband]
iv = np.zeros_like(d)
iv[d != 0.] = 1./(d[d != 0]**2)
WISE.set('apflux_ivar_w%i' % band, iv)
# Look up mask values for sources
WISE.wise_mask = np.zeros((len(cat), 2), np.uint8)
WISE.wise_mask[T.in_bounds,0] = wise_mask_maps[0][T.iby[T.in_bounds], T.ibx[T.in_bounds]]
WISE.wise_mask[T.in_bounds,1] = wise_mask_maps[1][T.iby[T.in_bounds], T.ibx[T.in_bounds]]
# Unpack time-resolved results...
WISE_T = None
if len(phots) > len(args):
WISE_T = True
if WISE_T is not None:
WISE_T = fits_table()
phots = phots[len(args):]
# eargs contains [ (key,args) ]
for ((ie,_),_),r in zip(eargs, phots):
debug('Epoch', ie, 'photometry:')
if r is None:
debug('Failed.')
continue
assert(ie < Nepochs)
phot = r.phot
phot.delete_column('wise_coadd_id')
phot.delete_column('wise_x')
phot.delete_column('wise_y')
for c in phot.columns():
if not c in WISE_T.columns():
x = phot.get(c)
WISE_T.set(c, np.zeros((len(x), Nepochs), x.dtype))
X = WISE_T.get(c)
X[:,ie] = phot.get(c)
if Nskipped > 0:
assert(len(wcat) == len(WISE_T))
WISE_T = _fill_skipped_values(WISE_T, Nskipped, do_phot)
assert(len(WISE_T) == len(cat))
assert(len(WISE_T) == len(T))
debug('Returning: WISE', WISE)
debug('Returning: WISE_T', WISE_T)
return dict(WISE=WISE, WISE_T=WISE_T, wise_mask_maps=wise_mask_maps,
version_header=version_header,
wise_apertures_arcsec=wise_apertures_arcsec)
def _fill_skipped_values(WISE, Nskipped, do_phot):
# Fill in blank values for skipped (Icluster) sources
# Append empty rows to the WISE results for !do_phot sources.
Wempty = fits_table()
Wempty.nil = np.zeros(Nskipped, bool)
WISE = merge_tables([WISE, Wempty], columns='fillzero')
WISE.delete_column('nil')
# Reorder to match "cat" order.
I = np.empty(len(WISE), int)
I[:] = -1
Ido, = np.nonzero(do_phot)
I[Ido] = np.arange(len(Ido))
Idont, = np.nonzero(np.logical_not(do_phot))
I[Idont] = np.arange(len(Idont)) + len(Ido)
assert(np.all(I > -1))
WISE.cut(I)
return WISE
def stage_writecat(
survey=None,
version_header=None,
release=None,
T=None,
WISE=None,
WISE_T=None,
maskbits=None,
wise_mask_maps=None,
apertures_arcsec=None,
wise_apertures_arcsec=None,
GALEX=None,
galex_apertures_arcsec=None,
cat=None, pixscale=None, targetwcs=None,
W=None,H=None,
bands=None, ps=None,
plots=False,
brickname=None,
brickid=None,
brick=None,
invvars=None,
gaia_stars=True,
co_sky=None,
record_event=None,
**kwargs):
'''
Final stage in the pipeline: format results for the output
catalog.
'''
from legacypipe.catalog import prepare_fits_catalog
from legacypipe.utils import copy_header_with_wcs, add_bits
record_event and record_event('stage_writecat: starting')
_add_stage_version(version_header, 'WCAT', 'writecat')
assert(maskbits is not None)
if wise_mask_maps is not None:
# Add the WISE masks in!
maskbits |= MASKBITS['WISEM1'] * (wise_mask_maps[0] != 0)
maskbits |= MASKBITS['WISEM2'] * (wise_mask_maps[1] != 0)
version_header.add_record(dict(name='COMMENT', value='wisemask bits:'))
wbits = [
(0, 'BRIGHT', 'BRIGH', 'Bright star core/wings'),
(1, 'SPIKE', 'SPIKE', 'PSF-based diffraction spike'),
(2, 'GHOST', 'GHOST', 'Optical ghost'),
(3, 'LATENT', 'LATNT', 'First latent'),
(4, 'LATENT2', 'LATN2', 'Second latent image'),
(5, 'HALO', 'HALO', 'AllWISE-like circular halo'),
(6, 'SATUR', 'SATUR', 'Bright star saturation'),
(7, 'SPIKE2', 'SPIK2', 'Geometric diffraction spike')]
for bit,name,short,comm in wbits:
version_header.add_record(dict(
name='WB_%s' % short, value=1<<bit,
comment='WISE mask bit %i: %s, %s' % (bit, name, comm)))
for bit,name,_,comm in wbits:
version_header.add_record(dict(
name='WBIT_%i' % bit, value=name, comment='WISE: %s' % comm))
# Record the meaning of ALLMASK/ANYMASK bits
add_bits(version_header, DQ_BITS, 'allmask/anymask', 'AM', 'A')
# create maskbits header
hdr = copy_header_with_wcs(version_header, targetwcs)
hdr.add_record(dict(name='IMTYPE', value='maskbits',
comment='LegacySurveys image type'))
with survey.write_output('maskbits', brick=brickname, shape=maskbits.shape) as out:
out.fits.write(maskbits, header=hdr, extname='MASKBITS')
if wise_mask_maps is not None:
out.fits.write(wise_mask_maps[0], extname='WISEM1')
out.fits.write(wise_mask_maps[1], extname='WISEM2')
del wise_mask_maps
T_orig = T.copy()
T = prepare_fits_catalog(cat, invvars, T, bands, force_keep=T.force_keep_source)
# Override type for DUP objects
T.type[T.dup] = 'DUP'
# The "ra_ivar" values coming out of the tractor fits do *not*
# have a cos(Dec) term -- ie, they give the inverse-variance on
# the numerical value of RA -- so we want to make the ra_sigma
# values smaller by multiplying by cos(Dec); so invvars are /=
# cosdec^2
T.ra_ivar /= np.cos(np.deg2rad(T.dec))**2
# Compute fiber fluxes
T.fiberflux, T.fibertotflux = get_fiber_fluxes(
cat, T, targetwcs, H, W, pixscale, bands, plots=plots, ps=ps)
# For reference *stars* only, plug in the reference-catalog inverse-variances.
if 'ref_cat' in T.get_columns() and 'ra_ivar' in T_orig.get_columns():
I = np.isin(T.ref_cat, ['G2', 'T2'])
if len(I):
T.ra_ivar [I] = T_orig.ra_ivar [I]
T.dec_ivar[I] = T_orig.dec_ivar[I]
# In oneblob.py we have a step where we zero out the fluxes for sources
# with tiny "fracin" values. Repeat that here, but zero out more stuff...
for iband,band in enumerate(bands):
# we could do this on the 2d arrays...
I = np.flatnonzero(T.fracin[:,iband] < 1e-3)
debug('Zeroing out', len(I), 'objs in', band, 'band with small fracin.')
if len(I):
# zero out:
T.flux[I,iband] = 0.
T.flux_ivar[I,iband] = 0.
# zero out fracin itself??
primhdr = fitsio.FITSHDR()
for r in version_header.records():
primhdr.add_record(r)
primhdr.add_record(dict(name='PRODTYPE', value='catalog',
comment='NOAO data product type'))
if co_sky is not None:
for band in bands:
if band in co_sky:
primhdr.add_record(dict(name='COSKY_%s' % band.upper(),
value=co_sky[band],
comment='Sky level estimated (+subtracted) from coadd'))
for i,ap in enumerate(apertures_arcsec):
primhdr.add_record(dict(name='APRAD%i' % i, value=ap,
comment='(optical) Aperture radius, in arcsec'))
if wise_apertures_arcsec is not None:
for i,ap in enumerate(wise_apertures_arcsec):
primhdr.add_record(dict(name='WAPRAD%i' % i, value=ap,
comment='(unWISE) Aperture radius, in arcsec'))
if galex_apertures_arcsec is not None:
for i,ap in enumerate(galex_apertures_arcsec):
primhdr.add_record(dict(name='GAPRAD%i' % i, value=ap,
comment='GALEX aperture radius, in arcsec'))
if WISE is not None:
# Convert WISE fluxes from Vega to AB.
# http://wise2.ipac.caltech.edu/docs/release/allsky/expsup/sec4_4h.html#conv2ab
vega_to_ab = dict(w1=2.699,
w2=3.339,
w3=5.174,
w4=6.620)
for band in [1,2,3,4]:
primhdr.add_record(dict(
name='WISEAB%i' % band, value=vega_to_ab['w%i' % band],
comment='WISE Vega to AB conv for band %i' % band))
# Copy columns:
for c in ['wise_coadd_id', 'wise_x', 'wise_y', 'wise_mask']:
T.set(c, WISE.get(c))
for band in [1,2,3,4]:
# Apply the Vega-to-AB shift *while* copying columns from
# WISE to T.
dm = vega_to_ab['w%i' % band]
fluxfactor = 10.** (dm / -2.5)
# fluxes
c = t = 'flux_w%i' % band
T.set(t, WISE.get(c) * fluxfactor)
if WISE_T is not None and band <= 2:
t = 'lc_flux_w%i' % band
T.set(t, WISE_T.get(c) * fluxfactor)
# ivars
c = t = 'flux_ivar_w%i' % band
T.set(t, WISE.get(c) / fluxfactor**2)
if WISE_T is not None and band <= 2:
t = 'lc_flux_ivar_w%i' % band
T.set(t, WISE_T.get(c) / fluxfactor**2)
# This is in 1/nanomaggies**2 units also
c = t = 'psfdepth_w%i' % band
T.set(t, WISE.get(c) / fluxfactor**2)
if 'apflux_w%i'%band in WISE.get_columns():
t = c = 'apflux_w%i' % band
T.set(t, WISE.get(c) * fluxfactor)
t = c = 'apflux_resid_w%i' % band
T.set(t, WISE.get(c) * fluxfactor)
t = c = 'apflux_ivar_w%i' % band
T.set(t, WISE.get(c) / fluxfactor**2)
# Copy/rename more columns
for cin,cout in [('nobs_w%i', 'nobs_w%i' ),
('profracflux_w%i', 'fracflux_w%i'),
('prochi2_w%i', 'rchisq_w%i' )]:
for band in [1,2,3,4]:
T.set(cout % band, WISE.get(cin % band))
if WISE_T is not None:
for cin,cout in [('nobs_w%i', 'lc_nobs_w%i'),
('profracflux_w%i', 'lc_fracflux_w%i'),
('prochi2_w%i', 'lc_rchisq_w%i'),
('mjd_w%i', 'lc_mjd_w%i'),]:
for band in [1,2]:
T.set(cout % band, WISE_T.get(cin % band))
# Done with these now!
WISE_T = None
WISE = None
if GALEX is not None:
for c in ['flux_nuv', 'flux_ivar_nuv', 'flux_fuv', 'flux_ivar_fuv',
'apflux_nuv', 'apflux_resid_nuv', 'apflux_ivar_nuv',
'apflux_fuv', 'apflux_resid_fuv', 'apflux_ivar_fuv', ]:
T.set(c, GALEX.get(c))
GALEX = None
T.brick_primary = ((T.ra >= brick.ra1 ) * (T.ra < brick.ra2) *
(T.dec >= brick.dec1) * (T.dec < brick.dec2))
H,W = maskbits.shape
T.maskbits = maskbits[np.clip(T.iby, 0, H-1).astype(int),
np.clip(T.ibx, 0, W-1).astype(int)]
del maskbits
# Set Sersic indices for all galaxy types.
# sigh, bytes vs strings. In py3, T.type (dtype '|S3') are bytes.
T.sersic[np.array([t in ['DEV',b'DEV'] for t in T.type])] = 4.0
T.sersic[np.array([t in ['EXP',b'EXP'] for t in T.type])] = 1.0
T.sersic[np.array([t in ['REX',b'REX'] for t in T.type])] = 1.0
T.fitbits = np.zeros(len(T), np.int16)
T.fitbits[T.forced_pointsource] |= FITBITS['FORCED_POINTSOURCE']
T.fitbits[T.fit_background] |= FITBITS['FIT_BACKGROUND']
T.fitbits[T.hit_r_limit] |= FITBITS['HIT_RADIUS_LIMIT']
T.fitbits[T.hit_ser_limit] |= FITBITS['HIT_SERSIC_LIMIT']
# WALKER/RUNNER
moved = np.hypot(T.bx - T.bx0, T.by - T.by0)
# radii in pixels:
walk_radius = 1. / pixscale
run_radius = 2.5 / pixscale
T.fitbits[moved > walk_radius] |= FITBITS['WALKER']
T.fitbits[moved > run_radius ] |= FITBITS['RUNNER']
# do we have Gaia?
if 'pointsource' in T.get_columns():
T.fitbits[T.pointsource] |= FITBITS['GAIA_POINTSOURCE']
T.fitbits[T.iterative] |= FITBITS['ITERATIVE']
for col,bit in [('freezeparams', 'FROZEN'),
('isbright', 'BRIGHT'),
('ismedium', 'MEDIUM'),
('isgaia', 'GAIA'),
('istycho', 'TYCHO2'),
('islargegalaxy', 'LARGEGALAXY')]:
if not col in T.get_columns():
continue
T.fitbits[T.get(col)] |= FITBITS[bit]
with survey.write_output('tractor-intermediate', brick=brickname) as out:
T[np.argsort(T.objid)].writeto(None, fits_object=out.fits, primheader=primhdr)
# After writing tractor-i file, drop (reference) sources outside the brick.
T.cut(T.in_bounds)
# The "format_catalog" code expects all lower-case column names...
for c in T.columns():
if c != c.lower():
T.rename(c, c.lower())
from legacypipe.format_catalog import format_catalog
with survey.write_output('tractor', brick=brickname) as out:
format_catalog(T[np.argsort(T.objid)], None, primhdr, bands,
survey.allbands, None, release,
write_kwargs=dict(fits_object=out.fits),
N_wise_epochs=17, motions=gaia_stars, gaia_tagalong=True)
# write fits file with galaxy-sim stuff (xy bounds of each sim)
if 'sims_xy' in T.get_columns():
sims_data = fits_table()
sims_data.sims_xy = T.sims_xy
with survey.write_output('galaxy-sims', brick=brickname) as out:
sims_data.writeto(None, fits_object=out.fits)
# produce per-brick checksum file.
with survey.write_output('checksums', brick=brickname, hashsum=False) as out:
f = open(out.fn, 'w')
# Write our pre-computed hashcodes.
for fn,hashsum in survey.output_file_hashes.items():
f.write('%s *%s\n' % (hashsum, fn))
f.close()
record_event and record_event('stage_writecat: done')
return dict(T=T, version_header=version_header)
def stage_checksum(
survey=None,
brickname=None,
**kwargs):
'''
For debugging / special-case processing, write out the current checksums file.
'''
# produce per-brick checksum file.
with survey.write_output('checksums', brick=brickname, hashsum=False) as out:
f = open(out.fn, 'w')
# Write our pre-computed hashcodes.
for fn,hashsum in survey.output_file_hashes.items():
f.write('%s *%s\n' % (hashsum, fn))
f.close()
def run_brick(brick, survey, radec=None, pixscale=0.262,
width=3600, height=3600,
survey_blob_mask=None,
release=None,
zoom=None,
bands=None,
nblobs=None, blob=None, blobxy=None, blobradec=None, blobid=None,
max_blobsize=None,
nsigma=6,
saddle_fraction=0.1,
saddle_min=2.,
blob_dilate=None,
subsky_radii=None,
reoptimize=False,
iterative=False,
wise=True,
outliers=True,
cache_outliers=False,
lanczos=True,
blob_image=False,
blob_mask=False,
minimal_coadds=False,
do_calibs=True,
old_calibs_ok=False,
write_metrics=True,
gaussPsf=False,
pixPsf=True,
hybridPsf=True,
normalizePsf=True,
apodize=False,
splinesky=True,
subsky=True,
ubercal_sky=False,
constant_invvar=False,
tycho_stars=True,
gaia_stars=True,
large_galaxies=True,
large_galaxies_force_pointsource=True,
fitoncoadds_reweight_ivar=True,
less_masking=False,
nsatur=None,
fit_on_coadds=False,
coadd_tiers=None,
min_mjd=None, max_mjd=None,
unwise_coadds=True,
bail_out=False,
ceres=True,
wise_ceres=True,
galex_ceres=True,
unwise_dir=None,
unwise_tr_dir=None,
unwise_modelsky_dir=None,
galex=False,
galex_dir=None,
threads=None,
plots=False, plots2=False, coadd_bw=False,
plot_base=None, plot_number=0,
command_line=None,
read_parallel=True,
max_memory_gb=None,
record_event=None,
# These are for the 'stages' infrastructure
pickle_pat='pickles/runbrick-%(brick)s-%%(stage)s.pickle',
stages=None,
force=None, forceall=False, write_pickles=True,
checkpoint_filename=None,
checkpoint_period=None,
wise_checkpoint_filename=None,
wise_checkpoint_period=None,
prereqs_update=None,
stagefunc = None,
pool = None,
):
'''Run the full Legacy Survey data reduction pipeline.
The pipeline is built out of "stages" that run in sequence. By
default, this function will cache the result of each stage in a
(large) pickle file. If you re-run, it will read from the
prerequisite pickle file rather than re-running the prerequisite
stage. This can yield faster debugging times, but you almost
certainly want to turn it off (with `writePickles=False,
forceall=True`) in production.
Parameters
----------
brick : string
Brick name such as '2090m065'. Can be None if *radec* is given.
survey : a "LegacySurveyData" object (see common.LegacySurveyData), which is in
charge of the list of bricks and CCDs to be handled, and where output files
should be written.
radec : tuple of floats (ra,dec)
RA,Dec center of the custom region to run.
pixscale : float
Brick pixel scale, in arcsec/pixel. Default = 0.262
width, height : integers
Brick size in pixels. Default of 3600 pixels (with the default pixel
scale of 0.262) leads to a slight overlap between bricks.
zoom : list of four integers
Pixel coordinates [xlo,xhi, ylo,yhi] of the brick subimage to run.
bands : string
Filter (band) names to include; default is "grz".
Notes
-----
You must specify the region of sky to work on, via one of:
- *brick*: string, brick name such as '2090m065'
- *radec*: tuple of floats; RA,Dec center of the custom region to run
If *radec* is given, *brick* should be *None*. If *brick* is given,
that brick`s RA,Dec center will be looked up in the
survey-bricks.fits file.
You can also change the size of the region to reduce:
- *pixscale*: float, brick pixel scale, in arcsec/pixel.
- *width* and *height*: integers; brick size in pixels. 3600 pixels
(with the default pixel scale of 0.262) leads to a slight overlap
between bricks.
- *zoom*: list of four integers, [xlo,xhi, ylo,yhi] of the brick
subimage to run.
If you want to measure only a subset of the astronomical objects,
you can use:
- *nblobs*: None or int; for debugging purposes, only fit the
first N blobs.
- *blob*: int; for debugging purposes, start with this blob index.
- *blobxy*: list of (x,y) integer tuples; only run the blobs
containing these pixels.
- *blobradec*: list of (RA,Dec) tuples; only run the blobs
containing these coordinates.
Other options:
- *max_blobsize*: int; ignore blobs with more than this many pixels
- *nsigma*: float; detection threshold in sigmas.
- *wise*: boolean; run WISE forced photometry?
- *do_calibs*: boolean; run the calibration preprocessing steps?
- *old_calibs_ok*: boolean; allow/use old calibration frames?
- *write_metrics*: boolean; write out a variety of useful metrics
- *gaussPsf*: boolean; use a simpler single-component Gaussian PSF model?
- *pixPsf*: boolean; use the pixelized PsfEx PSF model and FFT convolution?
- *hybridPsf*: boolean; use combo pixelized PsfEx + Gaussian approx model
- *normalizePsf*: boolean; make PsfEx model have unit flux
- *splinesky*: boolean; use the splined sky model (default is constant)?
- *subsky*: boolean; subtract the sky model when reading in tims (tractor images)?
- *ceres*: boolean; use Ceres Solver when possible?
- *wise_ceres*: boolean; use Ceres Solver for unWISE forced photometry?
- *galex_ceres*: boolean; use Ceres Solver for GALEX forced photometry?
- *unwise_dir*: string; where to look for unWISE coadd files.
This may be a colon-separated list of directories to search in
order.
- *unwise_tr_dir*: string; where to look for time-resolved
unWISE coadd files. This may be a colon-separated list of
directories to search in order.
- *unwise_modelsky_dir*: string; where to look for the unWISE sky background
maps. The default is to look in the "wise/modelsky" subdirectory of the
calibration directory.
- *threads*: integer; how many CPU cores to use
Plotting options:
- *coadd_bw*: boolean: if only one band is available, make B&W coadds?
- *plots*: boolean; make a bunch of plots?
- *plots2*: boolean; make a bunch more plots?
- *plot_base*: string, default brick-BRICK, the plot filename prefix.
- *plot_number*: integer, default 0, starting number for plot filenames.
Options regarding the "stages":
- *pickle_pat*: string; filename for 'pickle' files
- *stages*: list of strings; stages (functions stage_*) to run.
- *force*: list of strings; prerequisite stages that will be run
even if pickle files exist.
- *forceall*: boolean; run all stages, ignoring all pickle files.
- *write_pickles*: boolean; write pickle files after each stage?
Raises
------
RunbrickError
If an invalid brick name is given.
NothingToDoError
If no CCDs, or no photometric CCDs, overlap the given brick or region.
'''
from astrometry.util.stages import CallGlobalTime, runstage
from astrometry.util.multiproc import multiproc
from astrometry.util.plotutils import PlotSequence
# *initargs* are passed to the first stage (stage_tims)
# so should be quantities that shouldn't get updated from their pickled
# values.
initargs = {}
# *kwargs* update the pickled values from previous stages
kwargs = {}
if force is None:
force = []
if stages is None:
stages=['writecat']
forceStages = [s for s in stages]
forceStages.extend(force)
if forceall:
kwargs.update(forceall=True)
if bands is None:
bands = ['g','r','z']
if radec is not None:
assert(len(radec) == 2)
ra,dec = radec
try:
ra = float(ra)
except:
from astrometry.util.starutil_numpy import hmsstring2ra
ra = hmsstring2ra(ra)
try:
dec = float(dec)
except:
from astrometry.util.starutil_numpy import dmsstring2dec
dec = dmsstring2dec(dec)
info('Parsed RA,Dec', ra,dec)
initargs.update(ra=ra, dec=dec)
if brick is None:
brick = ('custom-%06i%s%05i' %
(int(1000*ra), 'm' if dec < 0 else 'p',
int(1000*np.abs(dec))))
initargs.update(brickname=brick, survey=survey)
if stagefunc is None:
stagefunc = CallGlobalTime('stage_%s', globals())
plot_base_default = 'brick-%(brick)s'
if plot_base is None:
plot_base = plot_base_default
ps = PlotSequence(plot_base % dict(brick=brick))
initargs.update(ps=ps)
if plot_number:
ps.skipto(plot_number)
if release is None:
release = survey.get_default_release()
if release is None:
release = 9999
if fit_on_coadds:
# Implied options!
#subsky = False
large_galaxies = True
large_galaxies_force_pointsource = False
kwargs.update(ps=ps, nsigma=nsigma, saddle_fraction=saddle_fraction,
saddle_min=saddle_min,
blob_dilate=blob_dilate,
subsky_radii=subsky_radii,
survey_blob_mask=survey_blob_mask,
gaussPsf=gaussPsf, pixPsf=pixPsf, hybridPsf=hybridPsf,
release=release,
normalizePsf=normalizePsf,
apodize=apodize,
constant_invvar=constant_invvar,
splinesky=splinesky,
subsky=subsky,
ubercal_sky=ubercal_sky,
tycho_stars=tycho_stars,
gaia_stars=gaia_stars,
large_galaxies=large_galaxies,
large_galaxies_force_pointsource=large_galaxies_force_pointsource,
fitoncoadds_reweight_ivar=fitoncoadds_reweight_ivar,
less_masking=less_masking,
min_mjd=min_mjd, max_mjd=max_mjd,
coadd_tiers=coadd_tiers,
nsatur=nsatur,
reoptimize=reoptimize,
iterative=iterative,
outliers=outliers,
cache_outliers=cache_outliers,
use_ceres=ceres,
wise_ceres=wise_ceres,
galex_ceres=galex_ceres,
unwise_coadds=unwise_coadds,
bailout=bail_out,
minimal_coadds=minimal_coadds,
do_calibs=do_calibs,
old_calibs_ok=old_calibs_ok,
write_metrics=write_metrics,
lanczos=lanczos,
unwise_dir=unwise_dir,
unwise_tr_dir=unwise_tr_dir,
unwise_modelsky_dir=unwise_modelsky_dir,
galex=galex,
galex_dir=galex_dir,
command_line=command_line,
read_parallel=read_parallel,
max_memory_gb=max_memory_gb,
plots=plots, plots2=plots2, coadd_bw=coadd_bw,
force=forceStages, write=write_pickles,
record_event=record_event)
if checkpoint_filename is not None:
kwargs.update(checkpoint_filename=checkpoint_filename)
if checkpoint_period is not None:
kwargs.update(checkpoint_period=checkpoint_period)
if wise_checkpoint_filename is not None:
kwargs.update(wise_checkpoint_filename=wise_checkpoint_filename)
if wise_checkpoint_period is not None:
kwargs.update(wise_checkpoint_period=wise_checkpoint_period)
if pool or (threads and threads > 1):
from astrometry.util.timingpool import TimingPool, TimingPoolMeas
from astrometry.util.ttime import MemMeas
if pool is None:
pool = TimingPool(threads, initializer=runbrick_global_init,
initargs=[])
poolmeas = TimingPoolMeas(pool, pickleTraffic=False)
StageTime.add_measurement(poolmeas)
StageTime.add_measurement(MemMeas)
mp = multiproc(None, pool=pool)
else:
from astrometry.util.ttime import CpuMeas
from astrometry.util.ttime import MemMeas
mp = multiproc(init=runbrick_global_init, initargs=[])
StageTime.add_measurement(CpuMeas)
StageTime.add_measurement(MemMeas)
pool = None
kwargs.update(mp=mp)
if nblobs is not None:
kwargs.update(nblobs=nblobs)
if blob is not None:
kwargs.update(blob0=blob)
if blobxy is not None:
kwargs.update(blobxy=blobxy)
if blobradec is not None:
kwargs.update(blobradec=blobradec)
if blobid is not None:
kwargs.update(blobid=blobid)
if max_blobsize is not None:
kwargs.update(max_blobsize=max_blobsize)
pickle_pat = pickle_pat % dict(brick=brick)
prereqs = {
'tims':None,
'refs': 'tims',
'outliers': 'refs',
'halos': 'outliers',
'srcs': 'halos',
# fitblobs: see below
'blobmask': 'halos',
'coadds': 'fitblobs',
# wise_forced: see below
'fitplots': 'fitblobs',
'psfplots': 'tims',
'initplots': 'srcs',
}
if 'image_coadds' in stages:
if blob_mask:
prereqs.update({
'image_coadds':'blobmask',
'srcs': 'image_coadds',
'fitblobs':'srcs',
})
elif blob_image:
prereqs.update({
'image_coadds':'srcs',
'fitblobs':'image_coadds',
})
else:
prereqs.update({
'image_coadds':'halos',
'srcs':'image_coadds',
'fitblobs':'srcs',
})
else:
prereqs.update({
'fitblobs':'srcs',
})
# not sure how to set up the prereqs here. --galex could always require --wise?
if wise:
if galex:
prereqs.update({
'wise_forced': 'coadds',
'galex_forced': 'wise_forced',
'writecat': 'galex_forced',
})
else:
prereqs.update({
'wise_forced': 'coadds',
'writecat': 'wise_forced',
})
else:
if galex:
prereqs.update({
'galex_forced': 'coadds',
'writecat': 'galex_forced',
})
else:
prereqs.update({
'writecat': 'coadds',
})
if fit_on_coadds:
prereqs.update({
'fit_on_coadds': 'halos',
'srcs': 'fit_on_coadds',
'image_coadds': 'fit_on_coadds',
})
if blob_image:
prereqs.update({'image_coadds':'srcs'})
# HACK -- set the prereq to the stage after which you'd like to write out checksums.
prereqs.update({'checksum': 'outliers'})
if prereqs_update is not None:
prereqs.update(prereqs_update)
initargs.update(W=width, H=height, pixscale=pixscale,
target_extent=zoom)
if bands is not None:
initargs.update(bands=bands)
def mystagefunc(stage, mp=None, **kwargs):
# Update the (pickled) survey output directory, so that running
# with an updated --output-dir overrides the pickle file.
picsurvey = kwargs.get('survey',None)
if picsurvey is not None:
picsurvey.output_dir = survey.output_dir
picsurvey.allbands = survey.allbands
picsurvey.coadd_bw = survey.coadd_bw
flush()
if mp is not None and threads is not None and threads > 1:
# flush all workers too
mp.map(flush, [[]] * threads)
staget0 = StageTime()
R = stagefunc(stage, mp=mp, **kwargs)
flush()
if mp is not None and threads is not None and threads > 1:
mp.map(flush, [[]] * threads)
info('Resources for stage', stage, ':', StageTime()-staget0)
return R
t0 = StageTime()
R = None
for stage in stages:
R = runstage(stage, pickle_pat, mystagefunc, prereqs=prereqs,
initial_args=initargs, **kwargs)
info('All done:', StageTime()-t0)
if pool is not None:
pool.close()
pool.join()
return R
def flush(x=None):
sys.stdout.flush()
sys.stderr.flush()
class StageTime(Time):
'''
A Time subclass that reports overall CPU use, assuming multiprocessing.
'''
measurements = []
@classmethod
def add_measurement(cls, m):
cls.measurements.append(m)
def __init__(self):
self.meas = [m() for m in self.measurements]
def get_parser():
import argparse
de = ('Main "pipeline" script for the Legacy Survey ' +
'(DECaLS, MzLS, Bok) data reductions.')
ep = '''
e.g., to run a small field containing a cluster:
python -u legacypipe/runbrick.py --plots --brick 2440p070 --zoom 1900 2400 450 950 -P pickles/runbrick-cluster-%%s.pickle
'''
parser = argparse.ArgumentParser(description=de,epilog=ep)
parser.add_argument('-r', '--run', default=None,
help='Set the run type to execute')
parser.add_argument(
'-f', '--force-stage', dest='force', action='append', default=[],
help="Force re-running the given stage(s) -- don't read from pickle.")
parser.add_argument('-F', '--force-all', dest='forceall',
action='store_true', help='Force all stages to run')
parser.add_argument('-s', '--stage', dest='stage', default=[],
action='append', help="Run up to the given stage(s)")
parser.add_argument('-n', '--no-write', dest='write', default=True,
action='store_false')
parser.add_argument('-w', '--write-stage', action='append', default=None,
help='Write a pickle for a given stage: eg "tims", "image_coadds", "srcs"')
parser.add_argument('-v', '--verbose', dest='verbose', action='count',
default=0, help='Make more verbose')
parser.add_argument(
'--checkpoint', dest='checkpoint_filename', default=None,
help='Write to checkpoint file?')
parser.add_argument(
'--checkpoint-period', type=int, default=None,
help='Period for writing checkpoint files, in seconds; default 600')
parser.add_argument(
'--wise-checkpoint', dest='wise_checkpoint_filename', default=None,
help='Write WISE to checkpoint file?')
parser.add_argument(
'--wise-checkpoint-period', type=int, default=None,
help='Period for writing WISE checkpoint files, in seconds; default 600')
parser.add_argument('-b', '--brick',
help='Brick name to run; required unless --radec is given')
parser.add_argument('--radec', nargs=2,
help='RA,Dec center for a custom location (not a brick)')
parser.add_argument('--pixscale', type=float, default=0.262,
help='Pixel scale of the output coadds (arcsec/pixel)')
parser.add_argument('-W', '--width', type=int, default=3600,
help='Target image width, default %(default)i')
parser.add_argument('-H', '--height', type=int, default=3600,
help='Target image height, default %(default)i')
parser.add_argument('--zoom', type=int, nargs=4,
help='Set target image extent (default "0 3600 0 3600")')
parser.add_argument('-d', '--outdir', dest='output_dir',
help='Set output base directory, default "."')
parser.add_argument('--release', default=None, type=int,
help='Release code for output catalogs (default determined by --run)')
parser.add_argument('--survey-dir', type=str, default=None,
help='Override the $LEGACY_SURVEY_DIR environment variable')
parser.add_argument('--blob-mask-dir', type=str, default=None,
help='The base directory to search for blob masks during sky model construction')
parser.add_argument('--cache-dir', type=str, default=None,
help='Directory to search for cached files')
parser.add_argument('--prime-cache', default=False, action='store_true',
help='Copy image (ooi, ood, oow) files to --cache-dir before starting.')
parser.add_argument('--threads', type=int, help='Run multi-threaded')
parser.add_argument('-p', '--plots', dest='plots', action='store_true',
help='Per-blob plots?')
parser.add_argument('--plots2', action='store_true',
help='More plots?')
parser.add_argument(
'-P', '--pickle', dest='pickle_pat',
help='Pickle filename pattern, default %(default)s',
default='pickles/runbrick-%(brick)s-%%(stage)s.pickle')
parser.add_argument('--plot-base',
help='Base filename for plots, default brick-BRICK')
parser.add_argument('--plot-number', type=int, default=0,
help='Set PlotSequence starting number')
parser.add_argument('--ceres', default=False, action='store_true',
help='Use Ceres Solver for all optimization?')
parser.add_argument('--no-wise-ceres', dest='wise_ceres', default=True,
action='store_false',
help='Do not use Ceres Solver for unWISE forced phot')
parser.add_argument('--no-galex-ceres', dest='galex_ceres', default=True,
action='store_false',
help='Do not use Ceres Solver for GALEX forced phot')
parser.add_argument('--nblobs', type=int,help='Debugging: only fit N blobs')
parser.add_argument('--blob', type=int, help='Debugging: start with blob #')
parser.add_argument('--blobid', help='Debugging: process this list of (comma-separated) blob ids.')
parser.add_argument(
'--blobxy', type=int, nargs=2, default=None, action='append',
help=('Debugging: run the single blob containing pixel <bx> <by>; '+
'this option can be repeated to run multiple blobs.'))
parser.add_argument(
'--blobradec', type=float, nargs=2, default=None, action='append',
help=('Debugging: run the single blob containing RA,Dec <ra> <dec>; '+
'this option can be repeated to run multiple blobs.'))
parser.add_argument('--max-blobsize', type=int,
help='Skip blobs containing more than the given number of pixels.')
parser.add_argument(
'--check-done', default=False, action='store_true',
help='Just check for existence of output files for this brick?')
parser.add_argument('--skip', default=False, action='store_true',
help='Quit if the output catalog already exists.')
parser.add_argument('--skip-coadd', default=False, action='store_true',
help='Quit if the output coadd jpeg already exists.')
parser.add_argument(
'--skip-calibs', dest='do_calibs', default=True, action='store_false',
help='Do not run the calibration steps')
parser.add_argument(
'--old-calibs-ok', dest='old_calibs_ok', default=False, action='store_true',
help='Allow old calibration files (where the data validation does not necessarily pass).')
parser.add_argument('--skip-metrics', dest='write_metrics', default=True,
action='store_false',
help='Do not generate the metrics directory and files')
parser.add_argument('--nsigma', type=float, default=6.0,
help='Set N sigma source detection thresh')
parser.add_argument('--saddle-fraction', type=float, default=0.1,
help='Fraction of the peak height for selecting new sources.')
parser.add_argument('--saddle-min', type=float, default=2.0,
help='Saddle-point depth from existing sources down to new sources (sigma).')
parser.add_argument('--blob-dilate', type=int, default=None,
help='How many pixels to dilate detection pixels (default: 8)')
parser.add_argument(
'--reoptimize', action='store_true', default=False,
help='Do a second round of model fitting after all model selections')
parser.add_argument(
'--no-iterative', dest='iterative', action='store_false', default=True,
help='Turn off iterative source detection?')
parser.add_argument('--no-wise', dest='wise', default=True,
action='store_false',
help='Skip unWISE forced photometry')
parser.add_argument(
'--unwise-dir', default=None,
help='Base directory for unWISE coadds; may be a colon-separated list')
parser.add_argument(
'--unwise-tr-dir', default=None,
help='Base directory for unWISE time-resolved coadds; may be a colon-separated list')
parser.add_argument('--galex', dest='galex', default=False,
action='store_true',
help='Perform GALEX forced photometry')
parser.add_argument(
'--galex-dir', default=None,
help='Base directory for GALEX coadds')
parser.add_argument('--blob-image', action='store_true', default=False,
help='Create "imageblob" image?')
parser.add_argument('--blob-mask', action='store_true', default=False,
help='With --stage image_coadds, also run the "blobmask" stage?')
parser.add_argument('--minimal-coadds', action='store_true', default=False,
help='Only create image and invvar coadds in image_coadds stage')
parser.add_argument(
'--no-lanczos', dest='lanczos', action='store_false', default=True,
help='Do nearest-neighbour rather than Lanczos-3 coadds')
parser.add_argument('--gpsf', action='store_true', default=False,
help='Use a fixed single-Gaussian PSF')
parser.add_argument('--no-hybrid-psf', dest='hybridPsf', default=True,
action='store_false',
help="Don't use a hybrid pixelized/Gaussian PSF model")
parser.add_argument('--no-normalize-psf', dest='normalizePsf', default=True,
action='store_false',
help='Do not normalize the PSF model to unix flux')
parser.add_argument('--apodize', default=False, action='store_true',
help='Apodize image edges for prettier pictures?')
parser.add_argument(
'--coadd-bw', action='store_true', default=False,
help='Create grayscale coadds if only one band is available?')
parser.add_argument('--bands', default=None,
help='Set the list of bands (filters) that are included in processing: comma-separated list, default "g,r,z"')
parser.add_argument('--no-tycho', dest='tycho_stars', default=True,
action='store_false',
help="Don't use Tycho-2 sources as fixed stars")
parser.add_argument('--no-gaia', dest='gaia_stars', default=True,
action='store_false',
help="Don't use Gaia sources as fixed stars")
parser.add_argument('--no-large-galaxies', dest='large_galaxies', default=True,
action='store_false', help="Don't seed (or mask in and around) large galaxies.")
parser.add_argument('--min-mjd', type=float,
help='Only keep images taken after the given MJD')
parser.add_argument('--max-mjd', type=float,
help='Only keep images taken before the given MJD')
parser.add_argument('--no-splinesky', dest='splinesky', default=True,
action='store_false', help='Use constant sky rather than spline.')
parser.add_argument('--no-subsky', dest='subsky', default=True,
action='store_false', help='Do not subtract the sky background.')
parser.add_argument('--no-unwise-coadds', dest='unwise_coadds', default=True,
action='store_false', help='Turn off writing FITS and JPEG unWISE coadds?')
parser.add_argument('--no-outliers', dest='outliers', default=True,
action='store_false', help='Do not compute or apply outlier masks')
parser.add_argument('--cache-outliers', default=False,
action='store_true', help='Use outlier-mask file if it exists?')
parser.add_argument('--bail-out', default=False, action='store_true',
help='Bail out of "fitblobs" processing, writing all blobs from the checkpoint and skipping any remaining ones.')
parser.add_argument('--fit-on-coadds', default=False, action='store_true',
help='Fit to coadds rather than individual CCDs (e.g., large galaxies).')
parser.add_argument('--coadd-tiers', default=None, type=int,
help='Split images into this many tiers of coadds (per band) by FWHW')
parser.add_argument('--nsatur', default=None, type=int,
help='Demand that >= nsatur images per band are saturated before using saturated logic (eg, 2).')
parser.add_argument('--no-ivar-reweighting', dest='fitoncoadds_reweight_ivar',
default=True, action='store_false',
help='Reweight the inverse variance when fitting on coadds.')
parser.add_argument('--no-galaxy-forcepsf', dest='large_galaxies_force_pointsource',
default=True, action='store_false',
help='Do not force PSFs within galaxy mask.')
parser.add_argument('--less-masking', default=False, action='store_true',
help='Turn off background fitting within MEDIUM mask.')
parser.add_argument('--ubercal-sky', dest='ubercal_sky', default=False,
action='store_true', help='Use the ubercal sky-subtraction (only used with --fit-on-coadds and --no-subsky).')
parser.add_argument('--subsky-radii', type=float, nargs='*', default=None,
help="""Sky-subtraction radii: rin, rout [arcsec] (only used with --fit-on-coadds and --no-subsky).
Image pixels r<rmask are fully masked and the pedestal sky background is estimated from an annulus
rin<r<rout on each CCD centered on the targetwcs.crval coordinates.""")
parser.add_argument('--read-serial', dest='read_parallel', default=True,
action='store_false', help='Read images in series, not in parallel?')
parser.add_argument('--max-memory-gb', type=float, default=None,
help='Maximum (estimated) memory to allow for tim pixels, in GB')
parser.add_argument('--rgb-stretch', type=float, help='Stretch RGB jpeg plots by this factor.')
return parser
def get_runbrick_kwargs(survey=None,
brick=None,
radec=None,
run=None,
survey_dir=None,
output_dir=None,
cache_dir=None,
prime_cache=False,
check_done=False,
skip=False,
skip_coadd=False,
stage=None,
unwise_dir=None,
unwise_tr_dir=None,
unwise_modelsky_dir=None,
galex_dir=None,
write_stage=None,
write=True,
gpsf=False,
bands=None,
allbands=None,
coadd_bw=None,
**opt):
if stage is None:
stage = []
if brick is not None and radec is not None:
print('Only ONE of --brick and --radec may be specified.')
return None, -1
opt.update(radec=radec)
if bands is None:
bands = ['g','r','z']
else:
bands = bands.split(',')
opt.update(bands=bands, coadd_bw=coadd_bw)
if allbands is None:
allbands = bands
# # Make sure at least 'bands' are in allbands.
# allbands = ['g','r','z']
# for b in bands:
# if not b in allbands:
# allbands.append(b)
if survey is None:
from legacypipe.runs import get_survey
survey = get_survey(run,
survey_dir=survey_dir,
output_dir=output_dir,
cache_dir=cache_dir,
prime_cache=prime_cache,
allbands=allbands,
coadd_bw=coadd_bw)
info(survey)
blobdir = opt.pop('blob_mask_dir', None)
if blobdir is not None:
from legacypipe.survey import LegacySurveyData
opt.update(survey_blob_mask=LegacySurveyData(blobdir))
if check_done or skip or skip_coadd:
if skip_coadd:
fn = survey.find_file('image-jpeg', output=True, brick=brick)
else:
fn = survey.find_file('tractor', output=True, brick=brick)
info('Checking for', fn)
exists = os.path.exists(fn)
if skip_coadd and exists:
return survey,0
if exists:
try:
T = fits_table(fn)
info('Read', len(T), 'sources from', fn)
except:
print('Failed to read file', fn)
import traceback
traceback.print_exc()
exists = False
if skip:
if exists:
return survey,0
elif check_done:
if not exists:
print('Does not exist:', fn)
return survey,-1
info('Found:', fn)
return survey,0
if len(stage) == 0:
stage.append('writecat')
opt.update(stages=stage)
# Remove opt values that are None.
toremove = [k for k,v in opt.items() if v is None]
for k in toremove:
del opt[k]
if unwise_dir is None:
unwise_dir = os.environ.get('UNWISE_COADDS_DIR', None)
if unwise_tr_dir is None:
unwise_tr_dir = os.environ.get('UNWISE_COADDS_TIMERESOLVED_DIR', None)
if unwise_modelsky_dir is None:
unwise_modelsky_dir = os.environ.get('UNWISE_MODEL_SKY_DIR', None)
if unwise_modelsky_dir is not None and not os.path.exists(unwise_modelsky_dir):
raise RuntimeError('The directory specified in $UNWISE_MODEL_SKY_DIR does not exist!')
if galex_dir is None:
galex_dir = os.environ.get('GALEX_DIR', None)
opt.update(unwise_dir=unwise_dir, unwise_tr_dir=unwise_tr_dir,
unwise_modelsky_dir=unwise_modelsky_dir, galex_dir=galex_dir)
# list of strings if -w / --write-stage is given; False if
# --no-write given; True by default.
if write_stage is not None:
write_pickles = write_stage
else:
write_pickles = write
opt.update(write_pickles=write_pickles)
opt.update(gaussPsf=gpsf,
pixPsf=not gpsf)
return survey, opt
def main(args=None):
import datetime
from legacypipe.survey import get_git_version
print()
print('runbrick.py starting at', datetime.datetime.now().isoformat())
print('legacypipe git version:', get_git_version())
if args is None:
print('Command-line args:', sys.argv)
cmd = 'python'
for vv in sys.argv:
cmd += ' {}'.format(vv)
print(cmd)
else:
print('Args:', args)
print()
parser = get_parser()
parser.add_argument(
'--ps', help='Run "ps" and write results to given filename?')
parser.add_argument(
'--ps-t0', type=int, default=0, help='Unix-time start for "--ps"')
opt = parser.parse_args(args=args)
if opt.brick is None and opt.radec is None:
parser.print_help()
return -1
optdict = vars(opt)
ps_file = optdict.pop('ps', None)
ps_t0 = optdict.pop('ps_t0', 0)
verbose = optdict.pop('verbose')
rgb_stretch = optdict.pop('rgb_stretch', None)
survey, kwargs = get_runbrick_kwargs(**optdict)
if kwargs in [-1, 0]:
return kwargs
kwargs.update(command_line=' '.join(sys.argv))
if verbose == 0:
lvl = logging.INFO
else:
lvl = logging.DEBUG
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
# tractor logging is *soooo* chatty
logging.getLogger('tractor.engine').setLevel(lvl + 10)
# silence "findfont: score(<Font 'DejaVu Sans Mono' ...)" messages
logging.getLogger('matplotlib.font_manager').disabled = True
# route warnings through the logging system
logging.captureWarnings(True)
if opt.plots:
import matplotlib
matplotlib.use('Agg')
import pylab as plt
plt.figure(figsize=(12,9))
plt.subplots_adjust(left=0.07, right=0.99, bottom=0.07, top=0.93,
hspace=0.2, wspace=0.05)
if ps_file is not None:
import threading
from collections import deque
from legacypipe.utils import run_ps_thread
ps_shutdown = threading.Event()
ps_queue = deque()
def record_event(msg):
from time import time
ps_queue.append((time(), msg))
kwargs.update(record_event=record_event)
if ps_t0 > 0:
record_event('start')
ps_thread = threading.Thread(
target=run_ps_thread,
args=(os.getpid(), os.getppid(), ps_file, ps_shutdown, ps_queue),
name='run_ps')
ps_thread.daemon = True
info('Starting thread to run "ps"')
ps_thread.start()
if rgb_stretch is not None:
import legacypipe.survey
legacypipe.survey.rgb_stretch_factor = rgb_stretch
debug('kwargs:', kwargs)
rtn = -1
try:
run_brick(opt.brick, survey, **kwargs)
rtn = 0
except NothingToDoError as e:
print()
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
print()
rtn = 0
except RunbrickError as e:
print()
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
print()
rtn = -1
if ps_file is not None:
# Try to shut down ps thread gracefully
ps_shutdown.set()
info('Attempting to join the ps thread...')
ps_thread.join(1.0)
if ps_thread.is_alive():
info('ps thread is still alive.')
return rtn
if __name__ == '__main__':
from astrometry.util.ttime import MemMeas
Time.add_measurement(MemMeas)
sys.exit(main())
# Test bricks & areas
# A single, fairly bright star
# python -u legacypipe/runbrick.py -b 1498p017 -P 'pickles/runbrick-z-%(brick)s-%%(stage)s.pickle' --zoom 1900 2000 2700 2800
# python -u legacypipe/runbrick.py -b 0001p000 -P 'pickles/runbrick-z-%(brick)s-%%(stage)s.pickle' --zoom 80 380 2970 3270
| legacysurvey/legacypipe | py/legacypipe/runbrick.py | Python | bsd-3-clause | 165,331 | [
"Galaxy",
"Gaussian"
] | 64a35c5d7a707e8f585aa8470b806be32df22ccfebdeedbb9c41aa43ffda9140 |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""Application controller for BLAT v34"""
from os import remove
from os.path import isabs
from tempfile import mkstemp
from cogent import DNA
from cogent.core.genetic_code import GeneticCodes
from cogent.parse.blast import MinimalBlatParser9
from skbio.parse.sequences import parse_fasta
from burrito.util import (CommandLineApplication, ResultPath,
ApplicationError)
from burrito.parameters import FlagParameter, ValuedParameter, FilePath
class Blat(CommandLineApplication):
"""BLAT generic application controller"""
_command = 'blat'
_input_handler = "_input_as_list"
_database_types = ['dna', 'prot', 'dnax']
_query_types = ['dna', 'rna', 'prot', 'dnax', 'rnax']
_mask_types = ['lower', 'upper', 'out', 'file.out']
_out_types = ['psl', 'pslx', 'axt', 'maf', 'sim4', 'wublast', 'blast',
'blast8', 'blast9']
_valid_combinations = [('dna', 'dna'), ('dna', 'rna'), ('prot', 'prot'),
('dnax', 'prot'), ('dnax', 'dnax'),
('dnax', 'rnax')]
_database = None
_query = None
_output = None
_parameters = {
# database type (dna, prot, or dnax, where dnax is DNA sequence
# translated in six frames to protein
'-t': ValuedParameter('-', Delimiter='=', Name='t'),
# query type (dna, rna, prot, dnax, rnax, where rnax is DNA sequence
# translated in three frames to protein
'-q': ValuedParameter('-', Delimiter='=', Name='q'),
# Use overused tile file N.ooc, and N should correspond to the tileSize
'-ooc': ValuedParameter('-', Delimiter='=', Name='ooc', IsPath=True),
# Sets the size of at match that that triggers an alignment
'-tileSize': ValuedParameter('-', Delimiter='=', Name='tileSize'),
# Spacing between tiles.
'-stepSize': ValuedParameter('-', Delimiter='=', Name='stepSize'),
# If set to 1, allows one mismatch in the tile and still triggers
# an alignment.
'-oneOff': ValuedParameter('-', Delimiter='=', Name='oneOff'),
# sets the number of tile matches
'-minMatch': ValuedParameter('-', Delimiter='=', Name='minMatch'),
# sets the minimum score
'-minScore': ValuedParameter('-', Delimiter='=', Name='minScore'),
# sets the minimum sequence identity in percent
'-minIdentity':
ValuedParameter('-', Delimiter='=', Name='minIdentity'),
# sets the size o the maximum gap between tiles in a clump
'-maxGap': ValuedParameter('-', Delimiter='=', Name='maxGap'),
# make an overused tile file. Target needs to be complete genome.
'-makeOoc': ValuedParameter('-', Delimiter='=', Name='makeOoc',
IsPath=True),
# sets the number of repetitions of a tile allowed before it is marked
# as overused
'-repMatch': ValuedParameter('-', Delimiter='=', Name='repMatch'),
# mask out repeats. Alignments won't be started in masked region but
# may extend through it in nucleotide searches. Masked areas are
# ignored entirely in protein or translated searches. Types are:
# lower, upper, out, file.out (file.out - mask database according to
# RepeatMasker file.out
'-mask': ValuedParameter('-', Delimiter='=', Name='mask'),
# Mask out repeats in query sequence. similar to -mask but for query
# rather than target sequence
'-qMask': ValuedParameter('-', Delimiter='=', Name='qMask'),
# repeat bases will not be masked in any way, but matches in
# repeat areas will be reported separately from matches in other
# areas in the pls output
'-repeats': ValuedParameter('-', Delimiter='=', Name='repeats'),
# minimum percent divergence of repeats to allow them to be unmasked
'-minRepDivergence': ValuedParameter('-', Delimiter='=',
Name='minRepDivergence'),
# output dot every N sequences to show program's progress
'-dots': ValuedParameter('-', Delimiter='=', Name='dots'),
# controls output file format. One of:
# psl - Default. Tab separated format, no sequence
# pslx - Tab separated format with sequence
# axt - blastz-associated axt format
# maf - multiz-associated maf format
# sim4 - similar to sim4 format
# wublast - similar to wublast format
# blast - similar to NCBI blast format
# blast8- NCBI blast tabular format
# blast9 - NCBI blast tabular format with comments
'-out': ValuedParameter('-', Delimiter='=', Name='out'),
# sets maximum intron size
'-maxIntron': ValuedParameter('-', Delimiter='=', Name='maxIntron'),
# suppress column headers in psl output
'-noHead': FlagParameter('-', Name='noHead'),
# trim leading poly-T
'-trimT': FlagParameter('-', Name='trimT'),
# do not trim trailing poly-A
'-noTrimA': FlagParameter('-', Name='noTrimA'),
# Remove poly-A tail from qSize as well as alignments in psl output
'-trimHardA': FlagParameter('-', Name='trimHardA'),
# run for fast DNA/DNA remapping - not allowing introns,
# requiring high %ID
'-fastMap': FlagParameter('-', Name='fastMap'),
# for high quality mRNAs, look harder for small initial and terminal
# exons
'-fine': FlagParameter('-', Name='fine'),
# Allows extension of alignment through large blocks of N's
'-extendThroughN': FlagParameter('-', Name='extendThroughN')
}
def _get_result_paths(self, data):
"""Returns the file location for result output
"""
return {'output': ResultPath(data[2], IsWritten=True)}
def _get_base_command(self):
"""Gets the command that will be run when the app controller is
called.
"""
command_parts = []
cd_command = ''.join(['cd ', str(self.WorkingDir), ';'])
if self._command is None:
raise ApplicationError('_command has not been set.')
command = self._command
parameters = sorted([str(x) for x in self.Parameters.values()
if str(x)])
synonyms = self._synonyms
command_parts.append(cd_command)
command_parts.append(command)
command_parts.append(self._database) # Positional argument
command_parts.append(self._query) # Positional argument
command_parts += parameters
if self._output:
command_parts.append(self._output.Path) # Positional
return (
self._command_delimiter.join(filter(None, command_parts)).strip()
)
BaseCommand = property(_get_base_command)
def _input_as_list(self, data):
'''Takes the positional arguments as input in a list.
The list input here should be [query_file_path, database_file_path,
output_file_path]'''
query, database, output = data
if (not isabs(database)) \
or (not isabs(query)) \
or (not isabs(output)):
raise ApplicationError("Only absolute paths allowed.\n%s" %
', '.join(data))
self._database = FilePath(database)
self._query = FilePath(query)
self._output = ResultPath(output, IsWritten=True)
# check parameters that can only take a particular set of values
# check combination of databse and query type
if self.Parameters['-t'].isOn() and self.Parameters['-q'].isOn() and \
(self.Parameters['-t'].Value, self.Parameters['-q'].Value) not in \
self._valid_combinations:
error_message = "Invalid combination of database and query " + \
"types ('%s', '%s').\n" % \
(self.Paramters['-t'].Value,
self.Parameters['-q'].Value)
error_message += "Must be one of: %s\n" % \
repr(self._valid_combinations)
raise ApplicationError(error_message)
# check database type
if self.Parameters['-t'].isOn() and \
self.Parameters['-t'].Value not in self._database_types:
error_message = "Invalid database type %s\n" % \
self.Parameters['-t'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._database_types)
raise ApplicationError(error_message)
# check query type
if self.Parameters['-q'].isOn() and \
self.Parameters['-q'].Value not in self._query_types:
error_message = "Invalid query type %s\n" % \
self.Parameters['-q'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._query_types)
raise ApplicationError(error_message)
# check mask type
if self.Parameters['-mask'].isOn() and \
self.Parameters['-mask'].Value not in self._mask_types:
error_message = "Invalid mask type %s\n" % \
self.Parameters['-mask']
error_message += "Allowed Values: %s\n" % \
', '.join(self._mask_types)
raise ApplicationError(error_message)
# check qmask type
if self.Parameters['-qMask'].isOn() and \
self.Parameters['-qMask'].Value not in self._mask_types:
error_message = "Invalid qMask type %s\n" % \
self.Parameters['-qMask'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._mask_types)
raise ApplicationError(error_message)
# check repeat type
if self.Parameters['-repeats'].isOn() and \
self.Parameters['-repeats'].Value not in self._mask_types:
error_message = "Invalid repeat type %s\n" % \
self.Parameters['-repeat'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._mask_types)
raise ApplicationError(error_message)
# check output format
if self.Parameters['-out'].isOn() and \
self.Parameters['-out'].Value not in self._out_types:
error_message = "Invalid output type %s\n" % \
self.Parameters['-out']
error_message += "Allowed values: %s\n" % \
', '.join(self._out_types)
raise ApplicationError(error_message)
return ''
def assign_reads_to_database(query_fasta_fp, database_fasta_fp, output_fp,
params=None):
"""Assign a set of query sequences to a reference database
query_fasta_fp : absolute file path to query sequences
database_fasta_fp : absolute file path to the reference database
output_fp : absolute file path of the output file to write
params : dict of BLAT specific parameters.
This method returns an open file object. The output format
defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
"""
if params is None:
params = {}
if '-out' not in params:
params['-out'] = 'blast9'
blat = Blat(params=params)
result = blat([query_fasta_fp, database_fasta_fp, output_fp])
return result['output']
def assign_dna_reads_to_dna_database(query_fasta_fp, database_fasta_fp,
output_fp, params=None):
"""Assign DNA reads to a database fasta of DNA sequences.
Wraps assign_reads_to_database, setting database and query types. All
parameters are set to default unless params is passed.
query_fasta_fp: absolute path to the query fasta file containing DNA
sequences.
database_fasta_fp: absolute path to the database fasta file containing
DNA sequences.
output_fp: absolute path where the output file will be generated.
params: optional. dict containing parameter settings to be used
instead of default values. Cannot change database or query
file types from dna and dna, respectively.
This method returns an open file object. The output format
defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
"""
if params is None:
params = {}
my_params = {'-t': 'dna',
'-q': 'dna'
}
# if the user specified parameters other than default, then use them.
# However, if they try to change the database or query types, raise an
# applciation error.
if '-t' in params or '-q' in params:
raise ApplicationError("Cannot change database or query types when " +
"using assign_dna_reads_to_dna_database. " +
"Use assign_reads_to_database instead.\n")
my_params.update(params)
result = assign_reads_to_database(query_fasta_fp, database_fasta_fp,
output_fp, my_params)
return result
def assign_dna_reads_to_protein_database(query_fasta_fp, database_fasta_fp,
output_fp, temp_dir="/tmp", params=None):
"""Assign DNA reads to a database fasta of protein sequences.
Wraps assign_reads_to_database, setting database and query types. All
parameters are set to default unless params is passed. A temporary
file must be written containing the translated sequences from the input
query fasta file because BLAT cannot do this automatically.
query_fasta_fp: absolute path to the query fasta file containing DNA
sequences.
database_fasta_fp: absolute path to the database fasta file containing
protein sequences.
output_fp: absolute path where the output file will be generated.
temp_dir: optional. Change the location where the translated sequences
will be written before being used as the query. Defaults to
/tmp.
params: optional. dict containing parameter settings to be used
instead of default values. Cannot change database or query
file types from protein and dna, respectively.
This method returns an open file object. The output format
defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
"""
if params is None:
params = {}
my_params = {'-t': 'prot', '-q': 'prot'}
# make sure temp_dir specifies an absolute path
if not isabs(temp_dir):
raise ApplicationError("temp_dir must be an absolute path.")
# if the user specified parameters other than default, then use them.
# However, if they try to change the database or query types, raise an
# applciation error.
if '-t' in params or '-q' in params:
raise ApplicationError("Cannot change database or query types "
"when using assign_dna_reads_to_dna_database. Use "
"assign_reads_to_database instead.")
if 'genetic_code' in params:
my_genetic_code = GeneticCodes[params['genetic_code']]
del params['genetic_code']
else:
my_genetic_code = GeneticCodes[1]
my_params.update(params)
# get six-frame translation of the input DNA sequences and write them to
# temporary file.
_, tmp = mkstemp(dir=temp_dir)
tmp_out = open(tmp, 'w')
for label, sequence in parse_fasta(open(query_fasta_fp)):
seq_id = label.split()[0]
s = DNA.makeSequence(sequence)
translations = my_genetic_code.sixframes(s)
frames = [1, 2, 3, -1, -2, -3]
translations = dict(zip(frames, translations))
for frame, translation in sorted(translations.iteritems()):
entry = '>{seq_id}_frame_{frame}\n{trans}\n'
entry = entry.format(seq_id=seq_id, frame=frame, trans=translation)
tmp_out.write(entry)
tmp_out.close()
result = assign_reads_to_database(tmp, database_fasta_fp, output_fp,
params=my_params)
remove(tmp)
return result
| ekopylova/burrito-fillings | bfillings/blat.py | Python | bsd-3-clause | 16,807 | [
"BLAST"
] | f5c90cbc20ce14fa057cb6b44d845ee2486ddd86f5bab331a178d7e746cb1888 |
"""Steps used throughout lettuce tests for story app"""
from datetime import datetime
from django.core.urlresolvers import reverse
from lettuce import step, world
from lettuce.django import django_url
from nose.tools import assert_equal
from storybase_story.models import Story
@step(u'Then the Story\'s title should be "([^"]*)"')
def see_title(step, title):
world.assert_text_present(title)
@step(u'Then the Story\'s summary is listed as the following:')
def see_summary(step):
world.assert_text_present(step.multiline)
@step(u'Then the Story\'s byline should be "([^"]*)"')
def see_byline(step, byline):
world.assert_text_present(byline)
@step(u'Then "([^"]*)" should be listed in the Story\'s Organizations list')
def org_in_list(step, org_name):
world.assert_text_in_list('ul.organizations li', org_name)
@step(u'Then "([^"]*)" should be listed in the Story\'s Projects list')
def proj_in_list(step, proj_name):
world.assert_text_in_list('ul.projects li', proj_name)
@step(u'the following topics are listed in the Story\'s Topics list')
def topics_in_list(step):
topic_names = [topic_dict['name'] for topic_dict in step.hashes]
world.assert_list_equals('ul.topics li', topic_names)
@step(u'Then the Story\'s published date should be set the current date')
def published_today(step):
date_el = world.browser.find_by_css('time.published').first
now = datetime.now()
assert_equal(date_el.text, now.strftime("%B %d, %Y"))
@step(u'Then the Story\'s last edited date should be set to the current date')
def last_edited_today(step):
date_el = world.browser.find_by_css('time.last-edited').first
now = datetime.now()
assert_equal(date_el.text, now.strftime("%B %d, %Y"))
@step(u'Then the Story\'s contributor is "([^"]*)"')
def see_contributor(step, contributor):
contributor_el = world.browser.find_by_css('.contributor').first
assert_equal(contributor_el.text, contributor)
@step(u'Given the user navigates to the story detail page for the story "([^"]*)"')
def visit_story_detail(step, title):
story = Story.objects.get(storytranslation__title=title)
path = reverse('story_detail', kwargs={'slug': story.slug})
world.browser.visit(django_url(path))
@step(u'the title input is present')
def see_title_input(step):
assert world.browser.is_element_present_by_css('.story-title')
@step(u'the byline input is present')
def see_byline_input(step):
assert world.browser.is_element_present_by_css('.byline')
@step(u'the section list is not present')
def not_see_section_list(step):
assert world.browser.is_element_present_by_css('.section-list') is False
| denverfoundation/storybase | apps/storybase_story/features/steps.py | Python | mit | 2,647 | [
"VisIt"
] | 0ec16046c3776d5674c34d6e8a34ece21e9518f5703424fba1e1d59916849ced |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
"""Interface for manipulating Device Constants"""
import re
import gtk
from kiwi.decorators import signal_block
from kiwi.python import Settable
from kiwi.ui.objectlist import Column, ObjectList
from stoqdrivers.enum import TaxType
from stoqlib.gui.base.dialogs import BasicDialog, run_dialog
from stoqlib.gui.base.lists import AdditionListSlave
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.lib.defaults import UNKNOWN_CHARACTER
from stoqlib.lib.translation import stoqlib_gettext
from ecf.ecfdomain import ECFPrinter, DeviceConstant
_ = stoqlib_gettext
_HEX_REGEXP = re.compile("[0-9a-fA-F]{1,2}")
def dec2hex(dec):
return "".join([data.encode("hex") for data in dec])
def hex2dec(hex):
# pylint: disable=W0402
import string
dec = ""
for data in _HEX_REGEXP.findall(hex):
data = data.zfill(2).decode("hex")
if not data in string.printable:
data = UNKNOWN_CHARACTER
dec += data
# pylint: enable=W0402
return dec
class _DeviceConstantEditor(BaseEditor):
gladefile = 'DeviceConstantEditor'
model_type = DeviceConstant
model_name = _('Device constant')
proxy_widgets = ('constant_name',
'constant_value',
'constant_type_description',
'device_value',
'device_value_hex',
)
def __init__(self, store, model=None, printer=None, constant_type=None):
if not isinstance(printer, ECFPrinter):
raise TypeError("printer should be a ECFPrinter, not %s" % printer)
self.printer = printer
self.constant_type = constant_type
BaseEditor.__init__(self, store, model)
# Hide value label/entry for non tax types
if constant_type != DeviceConstant.TYPE_TAX:
self.label_value.hide()
self.constant_value.hide()
@signal_block('device_value.content_changed')
def _update_dec(self, value):
self.device_value.set_text(value)
@signal_block('device_value_hex.content_changed')
def _update_hex(self, value):
self.device_value_hex.set_text(value)
def create_model(self, store):
return DeviceConstant(store=store,
printer=self.printer,
constant_type=self.constant_type,
constant_value=None,
constant_name=u"Unnamed",
constant_enum=int(TaxType.CUSTOM),
device_value=None)
def setup_proxies(self):
self.proxy = self.add_proxy(self.model,
_DeviceConstantEditor.proxy_widgets)
self.proxy.update('device_value')
#
# Callbacks
#
def on_device_value_hex__content_changed(self, entry):
self._update_dec(hex2dec(entry.get_text()))
def on_device_value__content_changed(self, entry):
self._update_hex(dec2hex(entry.get_text()))
class _DeviceConstantsList(AdditionListSlave):
def __init__(self, store, printer):
self._printer = printer
self._constant_type = None
AdditionListSlave.__init__(self, store,
self._get_columns())
self.connect('on-add-item', self._on_list_slave__add_item)
self.connect('before-delete-items',
self._on_list_slave__before_delete_items)
def _get_columns(self):
return [Column('constant_name', _('Name'), expand=True),
Column('device_value', _('Value'), data_type=str,
width=120, format_func=lambda x: repr(x)[1:-1])]
def _before_delete_items(self, list_slave, items):
self.store.commit()
self._refresh()
def _refresh(self):
self.klist.clear()
self.klist.extend(self._printer.get_constants_by_type(
self._constant_type))
#
# AdditionListSlave
#
def run_editor(self, model):
return run_dialog(_DeviceConstantEditor, store=self.store,
model=model,
printer=self._printer,
constant_type=self._constant_type)
#
# Public API
#
def switch(self, constant_type):
self._constant_type = constant_type
self._refresh()
#
# Callbacks
#
def _on_list_slave__add_item(self, slave, item):
self._refresh()
def _on_list_slave__before_delete_items(self, slave, items):
for item in items:
DeviceConstant.delete(item.id, store=self.store)
class DeviceConstantsDialog(BasicDialog):
size = (500, 300)
def __init__(self, store, printer):
self._constant_slave = None
self.store = store
self.printer = printer
BasicDialog.__init__(self, hide_footer=False, title='edit',
size=self.size)
self.main.set_border_width(6)
self._create_ui()
def _create_ui(self):
hbox = gtk.HBox()
self.klist = ObjectList([Column('name')])
self.klist.set_size_request(150, -1)
self.klist.get_treeview().set_headers_visible(False)
self.klist.connect('selection-changed',
self._on_klist__selection_changed)
hbox.pack_start(self.klist)
hbox.show()
for name, ctype in [(_(u'Units'), DeviceConstant.TYPE_UNIT),
(_(u'Tax'), DeviceConstant.TYPE_TAX),
(_(u'Payments'), DeviceConstant.TYPE_PAYMENT)]:
self.klist.append(Settable(name=name, type=ctype))
self.klist.show()
self._constant_slave = _DeviceConstantsList(self.store, self.printer)
self._constant_slave.switch(DeviceConstant.TYPE_UNIT)
hbox.pack_start(self._constant_slave.get_toplevel())
# FIXME: redesign BasicDialog
self.main.remove(self.main_label)
self.main.add(hbox)
hbox.show_all()
def _on_klist__selection_changed(self, klist, selected):
self._constant_slave.switch(selected.type)
| tiagocardosos/stoq | plugins/ecf/deviceconstanteditor.py | Python | gpl-2.0 | 7,045 | [
"VisIt"
] | c4f65ed69979635453e175d22b768dc579a80892ded94c7c4845b11168869d37 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2014 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
from stoqlib.reporting.report import ObjectListReport
from stoqlib.lib.translation import stoqlib_gettext as _
class ClientsWithSaleReport(ObjectListReport):
"""Base report for Payable and Receivable reports"""
title = _("Clients With Sale")
main_object_name = (_("client"), _("clients"))
summary = ['n_sales', 'total_amount']
| tiagocardosos/stoq | stoqlib/reporting/person.py | Python | gpl-2.0 | 1,262 | [
"VisIt"
] | 270bba5dd23f3bff4735a6b214063b20230b14e6a19635f47dd52a5906bb94dd |
"""
Support for the Amazon Polly text to speech service.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tts.amazon_polly/
"""
import logging
import voluptuous as vol
from homeassistant.components.tts import Provider, PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['boto3==1.4.3']
CONF_REGION = 'region_name'
CONF_ACCESS_KEY_ID = 'aws_access_key_id'
CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key'
CONF_PROFILE_NAME = 'profile_name'
ATTR_CREDENTIALS = 'credentials'
DEFAULT_REGION = 'us-east-1'
SUPPORTED_REGIONS = ['us-east-1', 'us-east-2', 'us-west-2', 'eu-west-1']
CONF_VOICE = 'voice'
CONF_OUTPUT_FORMAT = 'output_format'
CONF_SAMPLE_RATE = 'sample_rate'
CONF_TEXT_TYPE = 'text_type'
SUPPORTED_VOICES = ['Geraint', 'Gwyneth', 'Mads', 'Naja', 'Hans', 'Marlene',
'Nicole', 'Russell', 'Amy', 'Brian', 'Emma', 'Raveena',
'Ivy', 'Joanna', 'Joey', 'Justin', 'Kendra', 'Kimberly',
'Salli', 'Conchita', 'Enrique', 'Miguel', 'Penelope',
'Chantal', 'Celine', 'Mathieu', 'Dora', 'Karl', 'Carla',
'Giorgio', 'Mizuki', 'Liv', 'Lotte', 'Ruben', 'Ewa',
'Jacek', 'Jan', 'Maja', 'Ricardo', 'Vitoria', 'Cristiano',
'Ines', 'Carmen', 'Maxim', 'Tatyana', 'Astrid', 'Filiz']
SUPPORTED_OUTPUT_FORMATS = ['mp3', 'ogg_vorbis', 'pcm']
SUPPORTED_SAMPLE_RATES = ['8000', '16000', '22050']
SUPPORTED_SAMPLE_RATES_MAP = {
'mp3': ['8000', '16000', '22050'],
'ogg_vorbis': ['8000', '16000', '22050'],
'pcm': ['8000', '16000']
}
SUPPORTED_TEXT_TYPES = ['text', 'ssml']
CONTENT_TYPE_EXTENSIONS = {
'audio/mpeg': 'mp3',
'audio/ogg': 'ogg',
'audio/pcm': 'pcm'
}
DEFAULT_VOICE = 'Joanna'
DEFAULT_OUTPUT_FORMAT = 'mp3'
DEFAULT_TEXT_TYPE = 'text'
DEFAULT_SAMPLE_RATES = {
'mp3': '22050',
'ogg_vorbis': '22050',
'pcm': '16000'
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_REGION, default=DEFAULT_REGION):
vol.In(SUPPORTED_REGIONS),
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES),
vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT):
vol.In(SUPPORTED_OUTPUT_FORMATS),
vol.Optional(CONF_SAMPLE_RATE): vol.All(cv.string,
vol.In(SUPPORTED_SAMPLE_RATES)),
vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE):
vol.In(SUPPORTED_TEXT_TYPES),
})
def get_engine(hass, config):
"""Set up Amazon Polly speech component."""
# pylint: disable=import-error
output_format = config.get(CONF_OUTPUT_FORMAT)
sample_rate = config.get(CONF_SAMPLE_RATE,
DEFAULT_SAMPLE_RATES[output_format])
if sample_rate not in SUPPORTED_SAMPLE_RATES_MAP.get(output_format):
_LOGGER.error("%s is not a valid sample rate for %s",
sample_rate, output_format)
return None
config[CONF_SAMPLE_RATE] = sample_rate
import boto3
profile = config.get(CONF_PROFILE_NAME)
if profile is not None:
boto3.setup_default_session(profile_name=profile)
aws_config = {
CONF_REGION: config.get(CONF_REGION),
CONF_ACCESS_KEY_ID: config.get(CONF_ACCESS_KEY_ID),
CONF_SECRET_ACCESS_KEY: config.get(CONF_SECRET_ACCESS_KEY),
}
del config[CONF_REGION]
del config[CONF_ACCESS_KEY_ID]
del config[CONF_SECRET_ACCESS_KEY]
polly_client = boto3.client('polly', **aws_config)
supported_languages = []
all_voices = {}
all_voices_req = polly_client.describe_voices()
for voice in all_voices_req.get('Voices'):
all_voices[voice.get('Id')] = voice
if voice.get('LanguageCode') not in supported_languages:
supported_languages.append(voice.get('LanguageCode'))
return AmazonPollyProvider(polly_client, config, supported_languages,
all_voices)
class AmazonPollyProvider(Provider):
"""Amazon Polly speech api provider."""
def __init__(self, polly_client, config, supported_languages,
all_voices):
"""Initialize Amazon Polly provider for TTS."""
self.client = polly_client
self.config = config
self.supported_langs = supported_languages
self.all_voices = all_voices
self.default_voice = self.config.get(CONF_VOICE)
self.name = 'Amazon Polly'
@property
def supported_languages(self):
"""Return a list of supported languages."""
return self.supported_langs
@property
def default_language(self):
"""Return the default language."""
return self.all_voices.get(self.default_voice).get('LanguageCode')
@property
def default_options(self):
"""Return dict include default options."""
return {CONF_VOICE: self.default_voice}
@property
def supported_options(self):
"""Return a list of supported options."""
return [CONF_VOICE]
def get_tts_audio(self, message, language=None, options=None):
"""Request TTS file from Polly."""
voice_id = options.get(CONF_VOICE, self.default_voice)
voice_in_dict = self.all_voices.get(voice_id)
if language != voice_in_dict.get('LanguageCode'):
_LOGGER.error("%s does not support the %s language",
voice_id, language)
return (None, None)
resp = self.client.synthesize_speech(
OutputFormat=self.config[CONF_OUTPUT_FORMAT],
SampleRate=self.config[CONF_SAMPLE_RATE],
Text=message,
TextType=self.config[CONF_TEXT_TYPE],
VoiceId=voice_id
)
return (CONTENT_TYPE_EXTENSIONS[resp.get('ContentType')],
resp.get('AudioStream').read())
| MungoRae/home-assistant | homeassistant/components/tts/amazon_polly.py | Python | apache-2.0 | 6,145 | [
"Brian"
] | 80c21d0e26641ef88971990430c6b0c2d0dcfbfdd45dfa2f1cb300e4c5224be7 |
# Copyright (C) 2016 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module contains functions for calculating coincident ranking statistic
values.
"""
import logging
import numpy
from . import ranking
from . import coinc_rate
class Stat(object):
"""Base class which should be extended to provide a coincident statistic"""
def __init__(self, files=None, ifos=None, **kwargs):
"""Create a statistic class instance
Parameters
----------
files: list of strs
A list containing the filenames of hdf format files used to help
construct the coincident statistics. The files must have a 'stat'
attribute which is used to associate them with the appropriate
statistic class.
ifos: list of detector names, optional
"""
import h5py
self.files = {}
files = files or []
for filename in files:
f = h5py.File(filename, 'r')
stat = (f.attrs['stat']).decode()
if stat in self.files:
raise RuntimeError("We already have one file with stat attr ="
" %s. Can't provide more than one!" % stat)
logging.info("Found file %s for stat %s", filename, stat)
self.files[stat] = f
# Provide the dtype of the single detector method's output
# This is used by background estimation codes that need to maintain
# a buffer of such values.
self.single_dtype = numpy.float32
# True if a larger single detector statistic will produce a larger
# coincident statistic
self.single_increasing = True
self.ifos = ifos or []
class NewSNRStatistic(Stat):
"""Calculate the NewSNR coincident detection statistic"""
def single(self, trigs):
"""Calculate the single detector statistic, here equal to newsnr
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object)
Dictionary-like object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
return ranking.get_newsnr(trigs)
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument
"""Calculate the coincident detection statistic.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
s1: numpy.ndarray
Single detector ranking statistic for the second detector.
slide: (unused in this statistic)
step: (unused in this statistic)
Returns
-------
numpy.ndarray
Array of coincident ranking statistic values
"""
return (s0 ** 2. + s1 ** 2.) ** 0.5
def coinc_lim_for_thresh(self, s0, thresh):
"""Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
thresh: float
The threshold on the coincident statistic.
Returns
-------
numpy.ndarray
Array of limits on the second detector single statistic to
exceed thresh.
"""
s1 = thresh ** 2. - s0 ** 2.
s1[s1 < 0] = 0
return s1 ** 0.5
def coinc_multiifo(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
"""Calculate the coincident detection statistic.
Parameters
----------
s: list
List of (ifo, single detector statistic) tuples
slide: (unused in this statistic)
step: (unused in this statistic)
to_shift: list
List of integers indicating what multiples of the time shift will
be applied (unused in this statistic)
Returns
-------
numpy.ndarray
Array of coincident ranking statistic values
"""
return sum(sngl[1] ** 2. for sngl in s) ** 0.5
def coinc_multiifo_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
"""Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s: list
List of (ifo, single detector statistic) tuples for all detectors
except limifo.
thresh: float
The threshold on the coincident statistic.
limifo: string
The ifo for which the limit is to be found.
Returns
-------
numpy.ndarray
Array of limits on the limifo single statistic to
exceed thresh.
"""
s0 = thresh ** 2. - sum(sngl[1] ** 2. for sngl in s)
s0[s0 < 0] = 0
return s0 ** 0.5
class NewSNRSGStatistic(NewSNRStatistic):
"""Calculate the NewSNRSG coincident detection statistic"""
def single(self, trigs):
"""Calculate the single detector statistic, here equal to newsnr_sgveto
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object)
Dictionary-like object holding single detector trigger information.
Returns
-------
numpy.ndarray
The array of single detector values
"""
return ranking.get_newsnr_sgveto(trigs)
class NewSNRSGPSDStatistic(NewSNRSGStatistic):
"""Calculate the NewSNRSGPSD coincident detection statistic"""
def single(self, trigs):
"""Calculate the single detector statistic, here equal to newsnr
combined with sgveto and psdvar statistic
Parameters
----------
trigs: dict of numpy.ndarrays
Returns
-------
numpy.ndarray
The array of single detector values
"""
return ranking.get_newsnr_sgveto_psdvar(trigs)
class NewSNRSGPSDScaledStatistic(NewSNRSGStatistic):
"""Calculate the NewSNRSGPSD coincident detection statistic"""
def single(self, trigs):
"""Calculate the single detector statistic, here equal to newsnr
combined with sgveto and psdvar statistic
Parameters
----------
trigs: dict of numpy.ndarrays
Returns
-------
numpy.ndarray
The array of single detector values
"""
return ranking.get_newsnr_sgveto_psdvar_scaled(trigs)
class NewSNRSGPSDScaledThresholdStatistic(NewSNRSGStatistic):
"""Calculate the NewSNRSGPSD coincident detection statistic"""
def single(self, trigs):
"""Calculate the single detector statistic, here equal to newsnr
combined with sgveto and psdvar statistic
Parameters
----------
trigs: dict of numpy.ndarrays
Returns
-------
numpy.ndarray
The array of single detector values
"""
return ranking.get_newsnr_sgveto_psdvar_scaled_threshold(trigs)
class NetworkSNRStatistic(NewSNRStatistic):
"""Same as the NewSNR statistic, but just sum of squares of SNRs"""
def single(self, trigs):
return trigs['snr']
class NewSNRCutStatistic(NewSNRStatistic):
"""Same as the NewSNR statistic, but demonstrates a cut of the triggers"""
def single(self, trigs):
"""Calculate the single detector statistic.
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object)
Dictionary-like object holding single detector trigger information.
Returns
-------
newsnr: numpy.ndarray
Array of single detector values
"""
newsnr = ranking.get_newsnr(trigs)
rchisq = trigs['chisq'][:] / (2. * trigs['chisq_dof'][:] - 2.)
newsnr[numpy.logical_and(newsnr < 10, rchisq > 2)] = -1
return newsnr
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument
"""Calculate the coincident detection statistic.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
s1: numpy.ndarray
Single detector ranking statistic for the second detector.
slide: (unused in this statistic)
step: (unused in this statistic)
Returns
-------
cstat: numpy.ndarray
Array of coincident ranking statistic values
"""
cstat = (s0 ** 2. + s1 ** 2.) ** 0.5
cstat[s0 == -1] = 0
cstat[s1 == -1] = 0
return cstat
def coinc_lim_for_thresh(self, s0, thresh):
"""Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
thresh: float
The threshold on the coincident statistic.
Returns
-------
numpy.ndarray
Array of limits on the second detector single statistic to
exceed thresh.
"""
s1 = thresh ** 2. - s0 ** 2.
s1[s0 == -1] = numpy.inf
s1[s1 < 0] = 0
return s1 ** 0.5
class PhaseTDNewStatistic(NewSNRStatistic):
"""Statistic that re-weights combined newsnr using coinc parameters.
The weighting is based on the PDF of time delays, phase differences and
amplitude ratios between triggers in different ifos.
"""
def __init__(self, files=None, ifos=None, **kwargs):
NewSNRStatistic.__init__(self, files=files, ifos=ifos, **kwargs)
self.single_dtype = [('snglstat', numpy.float32),
('coa_phase', numpy.float32),
('end_time', numpy.float64),
('sigmasq', numpy.float32),
('snr', numpy.float32)
]
# Assign attribute so that it can be replaced with other functions
self.get_newsnr = ranking.get_newsnr
self.has_hist = False
self.hist_ifos = None
self.ref_snr = 5.0
self.relsense = {}
self.swidth = self.pwidth = self.twidth = None
self.srbmin = self.srbmax = None
self.max_penalty = None
self.pdtype = []
self.weights = {}
self.param_bin = {}
self.two_det_flag = (len(ifos) == 2)
self.two_det_weights = {}
def get_hist(self, ifos=None):
"""Read in a signal density file for the ifo combination"""
ifos = ifos or self.ifos
selected = None
for name in self.files:
# Pick out the statistic files that provide phase / time/ amp
# relationships and match to the ifos in use
if 'phasetd_newsnr' in name:
ifokey = name.split('_')[2]
num = len(ifokey) / 2
if num != len(ifos):
continue
match = [ifo in name for ifo in ifos]
if False in match:
continue
else:
selected = name
break
if selected is None:
raise RuntimeError("Couldn't figure out which stat file to use")
logging.info("Using signal histogram %s for ifos %s", selected, ifos)
histfile = self.files[selected]
self.hist_ifos = histfile.attrs['ifos']
n_ifos = len(self.hist_ifos)
# Histogram bin attributes
self.twidth = histfile.attrs['twidth']
self.pwidth = histfile.attrs['pwidth']
self.swidth = histfile.attrs['swidth']
self.srbmin = histfile.attrs['srbmin']
self.srbmax = histfile.attrs['srbmax']
bin_volume = (self.twidth * self.pwidth * self.swidth) ** (n_ifos - 1)
self.hist_max = - 1. * numpy.inf
# Read histogram for each ifo, to use if that ifo has smallest SNR in
# the coinc
for ifo in self.hist_ifos:
weights = histfile[ifo]['weights'][:]
# renormalise to PDF
self.weights[ifo] = weights / (weights.sum() * bin_volume)
param = histfile[ifo]['param_bin'][:]
if param.dtype == numpy.int8:
# Older style, incorrectly sorted histogram file
ncol = param.shape[1]
self.pdtype = [('c%s' % i, param.dtype) for i in range(ncol)]
self.param_bin[ifo] = numpy.zeros(len(self.weights[ifo]),
dtype=self.pdtype)
for i in range(ncol):
self.param_bin[ifo]['c%s' % i] = param[:, i]
lsort = self.param_bin[ifo].argsort()
self.param_bin[ifo] = self.param_bin[ifo][lsort]
self.weights[ifo] = self.weights[ifo][lsort]
else:
# New style, efficient histogram file
# param bin and weights have already been sorted
self.param_bin[ifo] = param
self.pdtype = self.param_bin[ifo].dtype
# Max_penalty is a small number to assigned to any bins without
# histogram entries. All histograms in a given file have the same
# min entry by design, so use the min of the last one read in.
self.max_penalty = self.weights[ifo].min()
self.hist_max = max(self.hist_max, self.weights[ifo].max())
if self.two_det_flag:
# The density of signals is computed as a function of 3 binned
# parameters: time difference (t), phase difference (p) and
# SNR ratio (s). These are computed for each combination of
# detectors, so for detectors 6 differences are needed. However
# many combinations of these parameters are highly unlikely and
# no instances of these combinations occurred when generating
# the statistic files. Rather than storing a bunch of 0s, these
# values are just not stored at all. This reduces the size of
# the statistic file, but means we have to identify the correct
# value to read for every trigger. For 2 detectors we can
# expand the weights lookup table here, basically adding in all
# the "0" values. This makes looking up a value in the
# "weights" table a O(N) rather than O(NlogN) operation. It
# sacrifices RAM to do this, so is a good tradeoff for 2
# detectors, but not for 3!
if not hasattr(self, 'c0_size'):
self.c0_size = {}
self.c1_size = {}
self.c2_size = {}
self.c0_size[ifo] = 2 * (abs(self.param_bin[ifo]['c0']).max() + 1)
self.c1_size[ifo] = 2 * (abs(self.param_bin[ifo]['c1']).max() + 1)
self.c2_size[ifo] = 2 * (abs(self.param_bin[ifo]['c2']).max() + 1)
array_size = [self.c0_size[ifo], self.c1_size[ifo],
self.c2_size[ifo]]
dtypec = self.weights[ifo].dtype
self.two_det_weights[ifo] = \
numpy.zeros(array_size, dtype=dtypec) + self.max_penalty
id0 = self.param_bin[ifo]['c0'].astype(numpy.int32) + self.c0_size[ifo] // 2
id1 = self.param_bin[ifo]['c1'].astype(numpy.int32) + self.c1_size[ifo] // 2
id2 = self.param_bin[ifo]['c2'].astype(numpy.int32) + self.c2_size[ifo] // 2
self.two_det_weights[ifo][id0, id1, id2] = self.weights[ifo]
relfac = histfile.attrs['sensitivity_ratios']
for ifo, sense in zip(self.hist_ifos, relfac):
self.relsense[ifo] = sense
self.has_hist = True
def single(self, trigs):
"""Calculate the single detector statistic & assemble other parameters
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information. 'snr', 'chisq',
'chisq_dof', 'coa_phase', 'end_time', and 'sigmasq' are required keys.
Returns
-------
numpy.ndarray
Array of single detector parameter values
"""
sngl_stat = self.get_newsnr(trigs)
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase'][:]
singles['end_time'] = trigs['end_time'][:]
singles['sigmasq'] = trigs['sigmasq'][:]
singles['snr'] = trigs['snr'][:]
return numpy.array(singles, ndmin=1)
def logsignalrate(self, s0, s1, shift):
to_shift = [-1, 0]
stats = {self.ifos[0]: s0, self.ifos[1]: s1}
return self.logsignalrate_multiifo(stats, shift, to_shift)
def logsignalrate_multiifo(self, stats, shift, to_shift):
"""Calculate the normalized log rate density of signals via lookup
Parameters
----------
stats: list of dicts giving single-ifo quantities, ordered as
self.ifos
shift: numpy array of float, size of the time shift vector for each
coinc to be ranked
to_shift: list of int, multiple of the time shift to apply ordered
as self.ifos
Returns
-------
value: log of coinc signal rate density for the given single-ifo
triggers and time shifts
"""
# Convert time shift vector to dict, as hist ifos and self.ifos may
# not be in same order
to_shift = {ifo: s for ifo, s in zip(self.ifos, to_shift)}
if not self.has_hist:
self.get_hist()
# Figure out which ifo of the contributing ifos has the smallest SNR,
# to use as reference for choosing the signal histogram.
snrs = numpy.array([numpy.array(stats[ifo]['snr'], ndmin=1)
for ifo in self.ifos])
smin = numpy.argmin(snrs, axis=0)
# Store a list of the triggers using each ifo as reference
rtypes = {ifo: numpy.where(smin == j)[0]
for j, ifo in enumerate(self.ifos)}
# Get reference ifo information
rate = numpy.zeros(len(shift), dtype=numpy.float32)
for ref_ifo in self.ifos:
rtype = rtypes[ref_ifo]
ref = stats[ref_ifo]
pref = numpy.array(ref['coa_phase'], ndmin=1)[rtype]
tref = numpy.array(ref['end_time'], ndmin=1)[rtype]
sref = numpy.array(ref['snr'], ndmin=1)[rtype]
sigref = numpy.array(ref['sigmasq'], ndmin=1) ** 0.5
sigref = sigref[rtype]
senseref = self.relsense[self.hist_ifos[0]]
binned = []
other_ifos = [ifo for ifo in self.ifos if ifo != ref_ifo]
for ifo in other_ifos:
sc = stats[ifo]
p = numpy.array(sc['coa_phase'], ndmin=1)[rtype]
t = numpy.array(sc['end_time'], ndmin=1)[rtype]
s = numpy.array(sc['snr'], ndmin=1)[rtype]
sense = self.relsense[ifo]
sig = numpy.array(sc['sigmasq'], ndmin=1) ** 0.5
sig = sig[rtype]
# Calculate differences
pdif = (pref - p) % (numpy.pi * 2.0)
tdif = shift[rtype] * to_shift[ref_ifo] + \
tref - shift[rtype] * to_shift[ifo] - t
sdif = s / sref * sense / senseref * sigref / sig
# Put into bins
tbin = (tdif / self.twidth).astype(numpy.int)
pbin = (pdif / self.pwidth).astype(numpy.int)
sbin = (sdif / self.swidth).astype(numpy.int)
binned += [tbin, pbin, sbin]
# Convert binned to same dtype as stored in hist
nbinned = numpy.zeros(len(pbin), dtype=self.pdtype)
for i, b in enumerate(binned):
nbinned['c%s' % i] = b
# Read signal weight from precalculated histogram
if self.two_det_flag:
# High-RAM, low-CPU option for two-det
rate[rtype] = numpy.zeros(len(nbinned)) + self.max_penalty
id0 = nbinned['c0'].astype(numpy.int32) + self.c0_size[ref_ifo] // 2
id1 = nbinned['c1'].astype(numpy.int32) + self.c1_size[ref_ifo] // 2
id2 = nbinned['c2'].astype(numpy.int32) + self.c2_size[ref_ifo] // 2
# look up keys which are within boundaries
within = (id0 > 0) & (id0 < self.c0_size[ref_ifo])
within = within & (id1 > 0) & (id1 < self.c1_size[ref_ifo])
within = within & (id2 > 0) & (id2 < self.c2_size[ref_ifo])
within = numpy.where(within)[0]
rate[rtype[within]] = self.two_det_weights[ref_ifo][id0[within], id1[within], id2[within]]
else:
# Low[er]-RAM, high[er]-CPU option for >two det
loc = numpy.searchsorted(self.param_bin[ref_ifo], nbinned)
loc[loc == len(self.weights[ref_ifo])] = 0
rate[rtype] = self.weights[ref_ifo][loc]
# These weren't in our histogram so give them max penalty
# instead of random value
missed = numpy.where(
self.param_bin[ref_ifo][loc] != nbinned
)[0]
rate[rtype[missed]] = self.max_penalty
# Scale by signal population SNR
rate[rtype] *= (sref / self.ref_snr) ** -4.0
return numpy.log(rate)
class PhaseTDStatistic(NewSNRStatistic):
"""Statistic that re-weights combined newsnr using coinc parameters.
The weighting is based on the PDF of time delays, phase differences and
amplitude ratios between triggers in different ifos.
"""
def __init__(self, files=None, ifos=None, **kwargs):
NewSNRStatistic.__init__(self, files=files, ifos=ifos, **kwargs)
self.single_dtype = [('snglstat', numpy.float32),
('coa_phase', numpy.float32),
('end_time', numpy.float64),
('sigmasq', numpy.float32),
('snr', numpy.float32)
]
# Assign attribute so that it can be replaced with other functions
self.get_newsnr = ranking.get_newsnr
self.hist = None
self.bins = {}
self.hist_ifos = []
def get_hist(self, ifos=None, norm='max'):
"""Read in a signal density file for the ifo combination"""
# default name for old 2-ifo workflow
if 'phasetd_newsnr' in self.files:
histfile = self.files['phasetd_newsnr']
else:
ifos = ifos or self.ifos # if None, use the instance attribute
if len(ifos) != 2:
raise RuntimeError("Need exactly 2 ifos for the p/t/a "
"statistic! Ifos given were " + ifos)
matching = [k for k in self.files.keys() if \
'phasetd' in k and (ifos[0] in k and ifos[1] in k)]
if len(matching) == 1:
histfile = self.files[matching[0]]
else:
raise RuntimeError(
"%i statistic files had an attribute matching phasetd*%s%s !"
"Should be exactly 1" % (len(matching), ifos[0], ifos[1]))
logging.info("Using signal histogram %s for ifos %s", matching,
ifos)
self.hist = histfile['map'][:]
self.hist_ifos = ifos
if norm == 'max':
# Normalize so that peak of hist is equal to unity
self.hist = self.hist / float(self.hist.max())
self.hist = numpy.log(self.hist)
else:
raise NotImplementedError("Sorry, we have no other normalizations")
# Bin boundaries are stored in the hdf file
self.bins['dt'] = histfile['tbins'][:]
self.bins['dphi'] = histfile['pbins'][:]
self.bins['snr'] = histfile['sbins'][:]
self.bins['sigma_ratio'] = histfile['rbins'][:]
self.hist_max = self.hist.max()
def single(self, trigs):
"""Calculate the single detector statistic & assemble other parameters
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group or similar dict-like object
Object holding single detector trigger information. 'snr', 'chisq',
'chisq_dof', 'coa_phase', 'end_time', and 'sigmasq' are required keys.
Returns
-------
numpy.ndarray
Array of single detector parameter values
"""
sngl_stat = self.get_newsnr(trigs)
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase'][:]
singles['end_time'] = trigs['end_time'][:]
singles['sigmasq'] = trigs['sigmasq'][:]
singles['snr'] = trigs['snr'][:]
return numpy.array(singles, ndmin=1)
def signal_hist(self, td, pd, sn0, sn1, rd):
assert self.hist is not None
# enforce that sigma ratio is < 1 by swapping values
snr0 = sn0 * 1
snr1 = sn1 * 1
snr0[rd > 1] = sn1[rd > 1]
snr1[rd > 1] = sn0[rd > 1]
rd[rd > 1] = 1. / rd[rd > 1]
# Find which bin each coinc falls into
tv = numpy.searchsorted(self.bins['dt'], td) - 1
pv = numpy.searchsorted(self.bins['dphi'], pd) - 1
s0v = numpy.searchsorted(self.bins['snr'], snr0) - 1
s1v = numpy.searchsorted(self.bins['snr'], snr1) - 1
rv = numpy.searchsorted(self.bins['sigma_ratio'], rd) - 1
# Enforce that points fit into the bin boundaries: if a point lies
# outside the boundaries it is pushed back to the nearest bin.
for binnum, axis in zip([tv, pv, rv, s0v, s1v],
['dt', 'dphi', 'sigma_ratio', 'snr', 'snr']):
binend = len(self.bins[axis])
binnum[binnum < 0] = 0
binnum[binnum >= binend - 1] = binend - 2
return self.hist[tv, pv, s0v, s1v, rv]
def slide_dt(self, singles, shift, slide_vec):
# Apply time shifts in the multiples specified by slide_vec
# and return resulting time difference
assert len(singles) == 2
assert len(slide_vec) == 2
dt = singles[0]['end_time'] + shift * slide_vec[0] -\
(singles[1]['end_time'] + shift * slide_vec[1])
return dt
def logsignalrate(self, s0, s1, shift):
"""Calculate the normalized log rate density of signals via lookup"""
# does not require ifos to be specified, only 1 p/t/a file
if self.hist is None:
self.get_hist()
# for 2-ifo pipeline, add time shift to 2nd ifo ('s1')
slidevec = [0, 1]
td = numpy.array(self.slide_dt([s0, s1], shift, slidevec),
ndmin=1)
if numpy.any(td > 1.):
raise RuntimeError(
"Time difference bigger than 1 second after applying any time "
"shifts! This should not happen")
pd = numpy.array((s0['coa_phase'] - s1['coa_phase']) % \
(2. * numpy.pi), ndmin=1)
sn0 = numpy.array(s0['snr'], ndmin=1)
sn1 = numpy.array(s1['snr'], ndmin=1)
rd = numpy.array((s0['sigmasq'] / s1['sigmasq']) ** 0.5, ndmin=1)
return self.signal_hist(td, pd, sn0, sn1, rd)
def logsignalrate_multiifo(self, s, shift, to_shift):
"""
Parameters
----------
s: list, length 2
List of sets of single-ifo trigger parameter values
shift: numpy.ndarray
Array of floats giving the time shifts to be applied with
multiples given by to_shift
to_shift: list, length 2
List of time shift multiples
"""
assert len(s) == 2
assert len(to_shift) == 2
# At present for triples use the H/L signal histogram
hist_ifos = self.ifos if len(self.ifos) == 2 else ['H1', 'L1']
if self.hist is None:
self.get_hist(hist_ifos)
else:
assert self.hist_ifos == hist_ifos
logging.info("Using pre-set signal histogram for %s",
self.hist_ifos)
td = self.slide_dt(s, shift, to_shift)
if numpy.any(td > 1.):
raise RuntimeError(
"Time difference bigger than 1 second after applying any time "
"shifts! This should not happen")
pd = numpy.array((s[0]['coa_phase'] - s[1]['coa_phase']) % \
(2. * numpy.pi), ndmin=1)
sn0 = numpy.array(s[0]['snr'], ndmin=1)
sn1 = numpy.array(s[1]['snr'], ndmin=1)
rd = numpy.array((s[0]['sigmasq'] / s[1]['sigmasq']) ** 0.5, ndmin=1)
return self.signal_hist(td, pd, sn0, sn1, rd)
def coinc(self, s0, s1, slide, step):
"""Calculate the coincident detection statistic.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
s1: numpy.ndarray
Single detector ranking statistic for the second detector.
slide: numpy.ndarray
Array of ints. These represent the multiple of the timeslide
interval to bring a pair of single detector triggers into coincidence.
step: float
The timeslide interval in seconds.
Returns
-------
coinc_stat: numpy.ndarray
An array of the coincident ranking statistic values
"""
rstat = s0['snglstat'] ** 2. + s1['snglstat'] ** 2.
cstat = rstat + 2. * self.logsignalrate(s0, s1, slide * step)
cstat[cstat < 0] = 0
return cstat ** 0.5
def coinc_lim_for_thresh(self, s0, thresh):
"""Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
thresh: float
The threshold on the coincident statistic.
Returns
-------
numpy.ndarray
Array of limits on the second detector single statistic to
exceed thresh.
"""
if self.hist is None:
self.get_hist()
s1 = thresh ** 2. - s0['snglstat'] ** 2.
# Assume best case scenario and use maximum signal rate
s1 -= 2. * self.hist_max
s1[s1 < 0] = 0
return s1 ** 0.5
class PhaseTDSGStatistic(PhaseTDStatistic):
"""PhaseTDStatistic but with sine-Gaussian veto added to the
single-detector ranking
"""
def __init__(self, files=None, ifos=None, **kwargs):
PhaseTDStatistic.__init__(self, files=files, ifos=ifos, **kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto
class ExpFitStatistic(NewSNRStatistic):
"""Detection statistic using an exponential falloff noise model.
Statistic approximates the negative log noise coinc rate density per
template over single-ifo newsnr values.
"""
def __init__(self, files=None, ifos=None, **kwargs):
if not len(files):
raise RuntimeError("Can't find any statistic files !")
NewSNRStatistic.__init__(self, files=files, ifos=ifos, **kwargs)
# the stat file attributes are hard-coded as '%{ifo}-fit_coeffs'
parsed_attrs = [f.split('-') for f in self.files.keys()]
self.bg_ifos = [at[0] for at in parsed_attrs if
(len(at) == 2 and at[1] == 'fit_coeffs')]
if not len(self.bg_ifos):
raise RuntimeError("None of the statistic files has the required "
"attribute called {ifo}-fit_coeffs !")
self.fits_by_tid = {}
self.alphamax = {}
for i in self.bg_ifos:
self.fits_by_tid[i] = self.assign_fits(i)
self.get_ref_vals(i)
self.get_newsnr = ranking.get_newsnr
self.single_increasing = False
def assign_fits(self, ifo):
coeff_file = self.files[ifo+'-fit_coeffs']
template_id = coeff_file['template_id'][:]
alphas = coeff_file['fit_coeff'][:]
rates = coeff_file['count_above_thresh'][:]
# the template_ids and fit coeffs are stored in an arbitrary order
# create new arrays in template_id order for easier recall
tid_sort = numpy.argsort(template_id)
return {'alpha': alphas[tid_sort],
'rate': rates[tid_sort],
'thresh': coeff_file.attrs['stat_threshold']
}
def get_ref_vals(self, ifo):
self.alphamax[ifo] = self.fits_by_tid[ifo]['alpha'].max()
def find_fits(self, trigs):
"""Get fit coeffs for a specific ifo and template id(s)"""
try:
tnum = trigs.template_num # exists if accessed via coinc_findtrigs
ifo = trigs.ifo
except AttributeError:
tnum = trigs['template_id'] # exists for SingleDetTriggers
assert len(self.ifos) == 1
# Should be exactly one ifo provided
ifo = self.ifos[0]
# fits_by_tid is a dictionary of dictionaries of arrays
# indexed by ifo / coefficient name / template_id
alphai = self.fits_by_tid[ifo]['alpha'][tnum]
ratei = self.fits_by_tid[ifo]['rate'][tnum]
thresh = self.fits_by_tid[ifo]['thresh']
return alphai, ratei, thresh
def lognoiserate(self, trigs):
"""Calculate the log noise rate density over single-ifo newsnr
Read in single trigger information, make the newsnr statistic
and rescale by the fitted coefficients alpha and rate
"""
alphai, ratei, thresh = self.find_fits(trigs)
newsnr = self.get_newsnr(trigs)
# alphai is constant of proportionality between single-ifo newsnr and
# negative log noise likelihood in given template
# ratei is rate of trigs in given template compared to average
# thresh is stat threshold used in given ifo
lognoisel = - alphai * (newsnr - thresh) + numpy.log(alphai) + \
numpy.log(ratei)
return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32)
def single(self, trigs):
"""Single-detector statistic, here just equal to the log noise rate"""
return self.lognoiserate(trigs)
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument
"""Calculate the final coinc ranking statistic"""
# Approximate log likelihood ratio by summing single-ifo negative
# log noise likelihoods
loglr = - s0 - s1
# add squares of threshold stat values via idealized Gaussian formula
threshes = [self.fits_by_tid[i]['thresh'] for i in self.bg_ifos]
loglr += sum([t**2. / 2. for t in threshes])
# convert back to a coinc-SNR-like statistic
# via log likelihood ratio \propto rho_c^2 / 2
return (2. * loglr) ** 0.5
def coinc_lim_for_thresh(self, s0, thresh):
"""Calculate the required single detector statistic to exceed
the threshold for each of the input triggers.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
thresh: float
The threshold on the coincident statistic.
Returns
-------
numpy.ndarray
Array of limits on the second detector single statistic to
exceed thresh.
"""
s1 = - (thresh ** 2.) / 2. - s0
threshes = [self.fits_by_tid[i]['thresh'] for i in self.bg_ifos]
s1 += sum([t**2. / 2. for t in threshes])
return s1
class ExpFitCombinedSNR(ExpFitStatistic):
"""Reworking of ExpFitStatistic designed to resemble network SNR
Use a monotonic function of the negative log noise rate density which
approximates combined (new)snr for coincs with similar newsnr in each ifo
"""
def __init__(self, files=None, ifos=None, **kwargs):
ExpFitStatistic.__init__(self, files=files, ifos=ifos, **kwargs)
# for low-mass templates the exponential slope alpha \approx 6
self.alpharef = 6.
self.single_increasing = True
def use_alphamax(self):
# take reference slope as the harmonic mean of individual ifo slopes
inv_alphas = [1. / self.alphamax[i] for i in self.bg_ifos]
self.alpharef = 1. / (sum(inv_alphas) / len(inv_alphas))
def single(self, trigs):
logr_n = self.lognoiserate(trigs)
_, _, thresh = self.find_fits(trigs)
# shift by log of reference slope alpha
logr_n += -1. * numpy.log(self.alpharef)
# add threshold and rescale by reference slope
stat = thresh - (logr_n / self.alpharef)
return numpy.array(stat, ndmin=1, dtype=numpy.float32)
def single_multiifo(self, s):
if self.single_increasing:
sngl_multiifo = s[1]['snglstat']
else:
sngl_multiifo = -1.0 * s[1]['snglstat']
return sngl_multiifo
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument
# scale by 1/sqrt(2) to resemble network SNR
return (s0 + s1) / 2.**0.5
def coinc_lim_for_thresh(self, s0, thresh):
return thresh * (2. ** 0.5) - s0
def coinc_multiifo(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
# scale by 1/sqrt(number of ifos) to resemble network SNR
return sum(sngl[1] for sngl in s) / len(s)**0.5
def coinc_multiifo_lim_for_thresh(self, s, thresh,
limifo, **kwargs): # pylint:disable=unused-argument
return thresh * ((len(s) + 1) ** 0.5) - sum(sngl[1] for sngl in s)
class ExpFitSGCombinedSNR(ExpFitCombinedSNR):
"""ExpFitCombinedSNR but with sine-Gaussian veto added to the single
detector ranking
"""
def __init__(self, files=None, ifos=None, **kwargs):
ExpFitCombinedSNR.__init__(self, files=files, ifos=ifos, **kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto
class ExpFitSGPSDCombinedSNR(ExpFitCombinedSNR):
"""ExpFitCombinedSNR but with sine-Gaussian veto and PSD variation added to
the single detector ranking
"""
def __init__(self, files=None, ifos=None, **kwargs):
ExpFitCombinedSNR.__init__(self, files=files, ifos=ifos, **kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto_psdvar
class PhaseTDExpFitStatistic(PhaseTDStatistic, ExpFitCombinedSNR):
"""Statistic combining exponential noise model with signal histogram PDF"""
# default is 2-ifo operation with exactly 1 'phasetd' file
def __init__(self, files=None, ifos=None, **kwargs):
# read in both foreground PDF and background fit info
ExpFitCombinedSNR.__init__(self, files=files, ifos=ifos, **kwargs)
# need the self.single_dtype value from PhaseTDStatistic
PhaseTDStatistic.__init__(self, files=files, ifos=ifos, **kwargs)
def single(self, trigs):
# same single-ifo stat as ExpFitCombinedSNR
sngl_stat = ExpFitCombinedSNR.single(self, trigs)
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase'][:]
singles['end_time'] = trigs['end_time'][:]
singles['sigmasq'] = trigs['sigmasq'][:]
singles['snr'] = trigs['snr'][:]
return numpy.array(singles, ndmin=1)
def coinc(self, s0, s1, slide, step):
# logsignalrate function inherited from PhaseTDStatistic
logr_s = self.logsignalrate(s0, s1, slide * step)
# rescale by ExpFitCombinedSNR reference slope as for sngl stat
cstat = s0['snglstat'] + s1['snglstat'] + logr_s / self.alpharef
# cut off underflowing and very small values
cstat[cstat < 8.] = 8.
# scale to resemble network SNR
return cstat / (2.**0.5)
def coinc_lim_for_thresh(self, s0, thresh):
# if the threshold is below this value all triggers will
# pass because of rounding in the coinc method
if thresh <= (8. / (2.**0.5)):
return -1. * numpy.ones(len(s0['snglstat'])) * numpy.inf
if self.hist is None:
self.get_hist()
# Assume best case scenario and use maximum signal rate
logr_s = self.hist_max
s1 = (2. ** 0.5) * thresh - s0['snglstat'] - logr_s / self.alpharef
return s1
class PhaseTDNewExpFitStatistic(PhaseTDNewStatistic, ExpFitCombinedSNR):
"""Statistic combining exponential noise model with signal histogram PDF"""
# default is 2-ifo operation with exactly 1 'phasetd' file
def __init__(self, files=None, ifos=None, **kwargs):
# read in both foreground PDF and background fit info
ExpFitCombinedSNR.__init__(self, files=files, ifos=ifos, **kwargs)
# need the self.single_dtype value from PhaseTDStatistic
PhaseTDNewStatistic.__init__(self, files=files, ifos=ifos, **kwargs)
def single(self, trigs):
# same single-ifo stat as ExpFitCombinedSNR
sngl_stat = ExpFitCombinedSNR.single(self, trigs)
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase'][:]
singles['end_time'] = trigs['end_time'][:]
singles['sigmasq'] = trigs['sigmasq'][:]
singles['snr'] = trigs['snr'][:]
return numpy.array(singles, ndmin=1)
def coinc(self, s0, s1, slide, step):
# logsignalrate function inherited from PhaseTDStatistic
logr_s = self.logsignalrate(s0, s1, slide * step)
# rescale by ExpFitCombinedSNR reference slope as for sngl stat
cstat = s0['snglstat'] + s1['snglstat'] + logr_s / self.alpharef
# cut off underflowing and very small values
cstat[cstat < 8.] = 8.
# scale to resemble network SNR
return cstat / (2.**0.5)
def coinc_lim_for_thresh(self, s0, thresh):
# if the threshold is below this value all triggers will
# pass because of rounding in the coinc method
if thresh <= (8. / (2.**0.5)):
return -1. * numpy.ones(len(s0['snglstat'])) * numpy.inf
if not self.has_hist:
self.get_hist()
# Assume best case scenario and use maximum signal rate
logr_s = self.hist_max
s1 = (2 ** 0.5) * thresh - s0['snglstat'] - logr_s / self.alpharef
return s1
class PhaseTDExpFitSGStatistic(PhaseTDExpFitStatistic):
"""Statistic combining exponential noise model with signal histogram PDF
adding the sine-Gaussian veto to the single detector ranking
"""
def __init__(self, files=None, ifos=None, **kwargs):
PhaseTDExpFitStatistic.__init__(self, files=files, ifos=ifos, **kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto
class PhaseTDNewExpFitSGStatistic(PhaseTDNewExpFitStatistic):
"""Statistic combining exponential noise model with signal histogram PDF
adding the sine-Gaussian veto to the single detector ranking
"""
def __init__(self, files=None, ifos=None, **kwargs):
PhaseTDNewExpFitStatistic.__init__(self, files=files, ifos=ifos,
**kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto
class PhaseTDExpFitSGPSDStatistic(PhaseTDExpFitSGStatistic):
"""Statistic combining exponential noise model with signal histogram PDF
adding the sine-Gaussian veto and PSD variation statistic to the
single detector ranking
"""
def __init__(self, files=None, ifos=None, **kwargs):
PhaseTDExpFitSGStatistic.__init__(self, files=files, ifos=ifos,
**kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto_psdvar
class PhaseTDExpFitSGPSDScaledStatistic(PhaseTDExpFitSGStatistic):
"""Statistic combining exponential noise model with signal histogram PDF
adding the sine-Gaussian veto and PSD variation statistic to the
single detector ranking
"""
def __init__(self, files=None, ifos=None, **kwargs):
PhaseTDExpFitSGStatistic.__init__(self, files=files, ifos=ifos,
**kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto_psdvar_scaled
class MaxContTradNewSNRStatistic(NewSNRStatistic):
"""Combination of NewSNR with the power chisq and auto chisq"""
def single(self, trigs):
"""Calculate the single detector statistic.
Parameters
----------
trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object)
Dictionary-like object holding single detector trigger information.
'snr', 'cont_chisq', 'cont_chisq_dof', 'chisq_dof' and 'chisq'
are required keys for this statistic.
Returns
-------
stat: numpy.ndarray
The array of single detector values
"""
chisq_newsnr = ranking.get_newsnr(trigs)
rautochisq = trigs['cont_chisq'][:] / trigs['cont_chisq_dof'][:]
autochisq_newsnr = ranking.newsnr(trigs['snr'][:], rautochisq)
return numpy.array(numpy.minimum(chisq_newsnr, autochisq_newsnr,
dtype=numpy.float32), ndmin=1, copy=False)
class ExpFitSGBgRateStatistic(ExpFitStatistic):
"""Detection statistic using an exponential falloff noise model.
Statistic calculates the log noise coinc rate for each
template over single-ifo newsnr values.
"""
def __init__(self, files=None, ifos=None, benchmark_lograte=-14.6,
**kwargs):
# benchmark_lograte is log of a representative noise trigger rate
# This comes from H1L1 (O2) and is 4.5e-7 Hz
super(ExpFitSGBgRateStatistic, self).__init__(files=files, ifos=ifos,
**kwargs)
self.benchmark_lograte = benchmark_lograte
self.get_newsnr = ranking.get_newsnr_sgveto
# Reassign the rate to be number per time rather than an arbitrarily
# normalised number
for ifo in self.bg_ifos:
self.reassign_rate(ifo)
def reassign_rate(self, ifo):
coeff_file = self.files[ifo+'-fit_coeffs']
template_id = coeff_file['template_id'][:]
# create arrays in template_id order for easier recall
tid_sort = numpy.argsort(template_id)
self.fits_by_tid[ifo]['rate'] = \
coeff_file['count_above_thresh'][:][tid_sort] / \
float(coeff_file.attrs['analysis_time'])
def coinc_multiifo(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
# ranking statistic is -ln(expected rate density of noise triggers)
# plus normalization constant
sngl_dict = {sngl[0]: sngl[1] for sngl in s}
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_dict, kwargs['time_addition'])
loglr = - ln_noise_rate + self.benchmark_lograte
return loglr
def coinc_multiifo_lim_for_thresh(self, s, thresh, limifo, **kwargs):
sngl_dict = {sngl[0]: sngl[1] for sngl in s}
sngl_dict[limifo] = numpy.zeros(len(s[0][1]))
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_dict, kwargs['time_addition'])
loglr = - thresh - ln_noise_rate + self.benchmark_lograte
return loglr
class ExpFitSGFgBgRateStatistic(PhaseTDStatistic, ExpFitSGBgRateStatistic):
def __init__(self, files=None, ifos=None, **kwargs):
# read in background fit info and store it
ExpFitSGBgRateStatistic.__init__(self, files=files, ifos=ifos,
**kwargs)
# if ifos not already set, determine via background fit info
self.ifos = self.ifos or self.bg_ifos
# PhaseTD statistic single_dtype plus network sensitivity benchmark
PhaseTDStatistic.__init__(self, files=files, ifos=self.ifos, **kwargs)
self.single_dtype.append(('benchmark_logvol', numpy.float32))
self.get_newsnr = ranking.get_newsnr_sgveto
for ifo in self.bg_ifos:
self.assign_median_sigma(ifo)
# benchmark_logvol is a benchmark sensitivity array over template id
hl_net_med_sigma = numpy.amin([self.fits_by_tid[ifo]['median_sigma']
for ifo in ['H1', 'L1']], axis=0)
self.benchmark_logvol = 3.0 * numpy.log(hl_net_med_sigma)
self.single_increasing = False
def assign_median_sigma(self, ifo):
coeff_file = self.files[ifo + '-fit_coeffs']
template_id = coeff_file['template_id'][:]
tid_sort = numpy.argsort(template_id)
self.fits_by_tid[ifo]['median_sigma'] = \
coeff_file['median_sigma'][:][tid_sort]
def single(self, trigs):
# single-ifo stat = log of noise rate
sngl_stat = self.lognoiserate(trigs)
# populate other fields to calculate phase/time/amp consistency
# and sigma comparison
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase'][:]
singles['end_time'] = trigs['end_time'][:]
singles['sigmasq'] = trigs['sigmasq'][:]
singles['snr'] = trigs['snr'][:]
try:
tnum = trigs.template_num # exists if accessed via coinc_findtrigs
except AttributeError:
tnum = trigs['template_id'] # exists for SingleDetTriggers
# Should only be one ifo fit file provided
assert len(self.ifos) == 1
# store benchmark log volume as single-ifo information since the coinc
# method does not have access to template id
singles['benchmark_logvol'] = self.benchmark_logvol[tnum]
return numpy.array(singles, ndmin=1)
def coinc_multiifo(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s}
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_rates, kwargs['time_addition'])
ln_noise_rate -= self.benchmark_lograte
# Network sensitivity for a given coinc type is approximately
# determined by the least sensitive ifo
network_sigmasq = numpy.amin([sngl[1]['sigmasq'] for sngl in s],
axis=0)
# Volume \propto sigma^3 or sigmasq^1.5
network_logvol = 1.5 * numpy.log(network_sigmasq)
# Get benchmark log volume as single-ifo information
# NB benchmark logvol for a given template is not ifo-dependent
# - choose the first ifo for convenience
benchmark_logvol = s[0][1]['benchmark_logvol']
network_logvol -= benchmark_logvol
coincifos = [sngl[0] for sngl in s]
# logsignalrate function from PhaseTDStatistic
if ('H1' in coincifos and 'L1' in coincifos):
# apply HL hist for HL & HLV coincs, keep only H/L info
s_hl = [sngl[1] for sngl in s if sngl[0] in ['H1', 'L1']]
shift_hl = [sh for sngl, sh in zip(s, to_shift) if \
sngl[0] in ['H1', 'L1']]
logr_s = self.logsignalrate_multiifo(s_hl, slide * step, shift_hl)
else:
logr_s = self.logsignalrate_multiifo([sngl[1] for sngl in s],
slide * step, to_shift)
loglr = logr_s + network_logvol - ln_noise_rate
# cut off underflowing and very small values
loglr[loglr < -30.] = -30.
return loglr
def coinc_multiifo_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
if self.hist is None:
self.get_hist()
# if the threshold is below this value all triggers will
# pass because of rounding in the coinc method
if thresh <= -30.:
return numpy.ones(len(s[0][1]['snglstat'])) * numpy.inf
sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s}
# Add limifo to singles dict so that overlap time is calculated correctly
sngl_rates[limifo] = numpy.zeros(len(s[0][1]))
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_rates, kwargs['time_addition'])
ln_noise_rate -= self.benchmark_lograte
# Assume best case and use the maximum sigma squared from all triggers
network_sigmasq = numpy.ones(len(s[0][1])) * kwargs['max_sigmasq']
# Volume \propto sigma^3 or sigmasq^1.5
network_logvol = 1.5 * numpy.log(network_sigmasq)
# Get benchmark log volume as single-ifo information
# NB benchmark logvol for a given template is not ifo-dependent
# - choose the first ifo for convenience
benchmark_logvol = s[0][1]['benchmark_logvol']
network_logvol -= benchmark_logvol
loglr = - thresh + self.hist_max + network_logvol - ln_noise_rate
return loglr
class ExpFitSGFgBgNormNewStatistic(PhaseTDNewStatistic,
ExpFitSGBgRateStatistic):
def __init__(self, files=None, ifos=None, **kwargs):
# read in background fit info and store it
ExpFitSGBgRateStatistic.__init__(self, files=files, ifos=ifos,
**kwargs)
# if ifos not already set, determine via background fit info
self.ifos = self.ifos or self.bg_ifos
# PhaseTD statistic single_dtype plus network sensitivity benchmark
PhaseTDNewStatistic.__init__(self, files=files, ifos=self.ifos,
**kwargs)
self.single_dtype.append(('benchmark_logvol', numpy.float32))
self.get_newsnr = ranking.get_newsnr_sgveto
for ifo in self.bg_ifos:
self.assign_median_sigma(ifo)
# benchmark_logvol is a benchmark sensitivity array over template id
hl_net_med_sigma = numpy.amin([self.fits_by_tid[ifo]['median_sigma']
for ifo in ['H1', 'L1']], axis=0)
self.benchmark_logvol = 3.0 * numpy.log(hl_net_med_sigma)
self.single_increasing = False
def assign_median_sigma(self, ifo):
coeff_file = self.files[ifo + '-fit_coeffs']
template_id = coeff_file['template_id'][:]
tid_sort = numpy.argsort(template_id)
self.fits_by_tid[ifo]['median_sigma'] = \
coeff_file['median_sigma'][:][tid_sort]
def lognoiserate(self, trigs, alphabelow=6):
"""Calculate the log noise rate density over single-ifo newsnr
Read in single trigger information, make the newsnr statistic
and rescale by the fitted coefficients alpha and rate
"""
alphai, ratei, thresh = self.find_fits(trigs)
newsnr = self.get_newsnr(trigs)
# Above the threshold we use the usual fit coefficient (alpha)
# below threshold use specified alphabelow
bt = newsnr < thresh
lognoisel = - alphai * (newsnr - thresh) + numpy.log(alphai) + \
numpy.log(ratei)
lognoiselbt = - alphabelow * (newsnr - thresh) + \
numpy.log(alphabelow) + numpy.log(ratei)
lognoisel[bt] = lognoiselbt[bt]
return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32)
def single(self, trigs):
# single-ifo stat = log of noise rate
sngl_stat = self.lognoiserate(trigs)
# populate other fields to calculate phase/time/amp consistency
# and sigma comparison
singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype)
singles['snglstat'] = sngl_stat
singles['coa_phase'] = trigs['coa_phase'][:]
singles['end_time'] = trigs['end_time'][:]
singles['sigmasq'] = trigs['sigmasq'][:]
singles['snr'] = trigs['snr'][:]
try:
tnum = trigs.template_num # exists if accessed via coinc_findtrigs
except AttributeError:
tnum = trigs['template_id'] # exists for SingleDetTriggers
# Should only be one ifo fit file provided
assert len(self.ifos) == 1
# Store benchmark log volume as single-ifo information since the coinc
# method does not have access to template id
singles['benchmark_logvol'] = self.benchmark_logvol[tnum]
return numpy.array(singles, ndmin=1)
def single_multiifo(self, s):
ln_noise_rate = s[1]['snglstat']
ln_noise_rate -= self.benchmark_lograte
network_sigmasq = s[1]['sigmasq']
network_logvol = 1.5 * numpy.log(network_sigmasq)
benchmark_logvol = s[1]['benchmark_logvol']
network_logvol -= benchmark_logvol
ln_s = -4 * numpy.log(s[1]['snr'] / self.ref_snr)
loglr = network_logvol - ln_noise_rate + ln_s
# cut off underflowing and very small values
loglr[loglr < -30.] = -30.
return loglr
def coinc_multiifo(self, s, slide, step, to_shift,
**kwargs): # pylint:disable=unused-argument
sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s}
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_rates, kwargs['time_addition'])
ln_noise_rate -= self.benchmark_lograte
# Network sensitivity for a given coinc type is approximately
# determined by the least sensitive ifo
network_sigmasq = numpy.amin([sngl[1]['sigmasq'] for sngl in s],
axis=0)
# Volume \propto sigma^3 or sigmasq^1.5
network_logvol = 1.5 * numpy.log(network_sigmasq)
# Get benchmark log volume as single-ifo information :
# benchmark_logvol for a given template is not ifo-dependent, so
# choose the first ifo for convenience
benchmark_logvol = s[0][1]['benchmark_logvol']
network_logvol -= benchmark_logvol
# Use prior histogram to get Bayes factor for signal vs noise
# given the time, phase and SNR differences between IFOs
# First get signal PDF logr_s
stat = {ifo: st for ifo, st in s}
logr_s = self.logsignalrate_multiifo(stat,
slide * step, to_shift)
# Find total volume of phase-time-amplitude space occupied by noise
# coincs
# Extent of time-difference space occupied
noise_twindow = coinc_rate.multiifo_noise_coincident_area(
self.hist_ifos, kwargs['time_addition'])
# Volume is the allowed time difference window, multiplied by 2pi for
# each phase difference dimension and by allowed range of SNR ratio
# for each SNR ratio dimension : there are (n_ifos - 1) dimensions
# for both phase and SNR
n_ifos = len(self.hist_ifos)
hist_vol = noise_twindow * \
(2 * numpy.pi * (self.srbmax - self.srbmin) * self.swidth) ** \
(n_ifos - 1)
# Noise PDF is 1/volume, assuming a uniform distribution of noise
# coincs
logr_n = - numpy.log(hist_vol)
# Combine to get final statistic: log of
# ((rate of signals / rate of noise) * PTA Bayes factor)
loglr = network_logvol - ln_noise_rate + logr_s - logr_n
# cut off underflowing and very small values
loglr[loglr < -30.] = -30.
return loglr
def coinc_multiifo_lim_for_thresh(self, s, thresh, limifo,
**kwargs): # pylint:disable=unused-argument
if not self.has_hist:
self.get_hist()
# if the threshold is below this value all triggers will
# pass because of rounding in the coinc method
if thresh <= -30:
return numpy.ones(len(s[0][1]['snglstat'])) * numpy.inf
sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s}
# Add limifo to singles dict so that overlap time is calculated correctly
sngl_rates[limifo] = numpy.zeros(len(s[0][1]))
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_rates, kwargs['time_addition'])
ln_noise_rate -= self.benchmark_lograte
# Assume best case and use the maximum sigma squared from all triggers
network_sigmasq = numpy.ones(len(s[0][1])) * kwargs['max_sigmasq']
# Volume \propto sigma^3 or sigmasq^1.5
network_logvol = 1.5 * numpy.log(network_sigmasq)
# Get benchmark log volume as single-ifo information :
# benchmark_logvol for a given template is not ifo-dependent, so
# choose the first ifo for convenience
benchmark_logvol = s[0][1]['benchmark_logvol']
network_logvol -= benchmark_logvol
# Assume best case scenario and use maximum signal rate
logr_s = numpy.log(self.hist_max
* (kwargs['min_snr'] / self.ref_snr) ** -4.0)
# Find total volume of phase-time-amplitude space occupied by noise
# coincs
# Extent of time-difference space occupied
noise_twindow = coinc_rate.multiifo_noise_coincident_area(
self.hist_ifos, kwargs['time_addition'])
# Volume is the allowed time difference window, multiplied by 2pi for
# each phase difference dimension and by allowed range of SNR ratio
# for each SNR ratio dimension : there are (n_ifos - 1) dimensions
# for both phase and SNR
n_ifos = len(self.hist_ifos)
hist_vol = noise_twindow * \
(2 * numpy.pi * (self.srbmax - self.srbmin) * self.swidth) ** \
(n_ifos - 1)
# Noise PDF is 1/volume, assuming a uniform distribution of noise
# coincs
logr_n = - numpy.log(hist_vol)
loglr = - thresh + network_logvol - ln_noise_rate + logr_s - logr_n
return loglr
class ExpFitSGPSDFgBgNormStatistic(ExpFitSGFgBgNormNewStatistic):
def __init__(self, files=None, ifos=None, **kwargs):
ExpFitSGFgBgNormNewStatistic.__init__(self, files=files, ifos=ifos,
**kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto_psdvar
class ExpFitSGPSDScaledFgBgNormStatistic(ExpFitSGFgBgNormNewStatistic):
def __init__(self, files=None, ifos=None, **kwargs):
ExpFitSGFgBgNormNewStatistic.__init__(self, files=files, ifos=ifos,
**kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto_psdvar_scaled
class ExpFitSGPSDFgBgNormBBHStatistic(ExpFitSGFgBgNormNewStatistic):
def __init__(self, files=None, ifos=None, max_chirp_mass=None, **kwargs):
ExpFitSGFgBgNormNewStatistic.__init__(self, files=files, ifos=ifos,
**kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto_psdvar
self.mcm = max_chirp_mass
self.curr_mchirp = None
def single(self, trigs):
from pycbc.conversions import mchirp_from_mass1_mass2
self.curr_mchirp = mchirp_from_mass1_mass2(trigs.param['mass1'],
trigs.param['mass2'])
if self.mcm is not None:
# Careful - input might be a str, so cast to float
self.curr_mchirp = min(self.curr_mchirp, float(self.mcm))
return ExpFitSGFgBgNormNewStatistic.single(self, trigs)
def logsignalrate_multiifo(self, stats, shift, to_shift):
# model signal rate as uniform over chirp mass, background rate is
# proportional to mchirp^(-11/3) due to density of templates
logr_s = ExpFitSGFgBgNormNewStatistic.logsignalrate_multiifo(
self, stats, shift, to_shift)
logr_s += numpy.log((self.curr_mchirp / 20.0) ** (11./3.0))
return logr_s
class ExpFitSGPSDSTFgBgNormBBHStatistic(ExpFitSGPSDFgBgNormBBHStatistic):
def __init__(self, files=None, ifos=None, max_chirp_mass=None, **kwargs):
ExpFitSGPSDFgBgNormBBHStatistic.__init__(self, files=files, ifos=ifos,
max_chirp_mass=None, **kwargs)
self.get_newsnr = ranking.get_newsnr_sgveto_psdvar_scaled_threshold
statistic_dict = {
'newsnr': NewSNRStatistic,
'network_snr': NetworkSNRStatistic,
'newsnr_cut': NewSNRCutStatistic,
'phasetd_newsnr': PhaseTDStatistic,
'phasetd_newsnr_sgveto': PhaseTDSGStatistic,
'exp_fit_stat': ExpFitStatistic,
'exp_fit_csnr': ExpFitCombinedSNR,
'exp_fit_sg_csnr': ExpFitSGCombinedSNR,
'exp_fit_sg_csnr_psdvar': ExpFitSGPSDCombinedSNR,
'phasetd_exp_fit_stat': PhaseTDExpFitStatistic,
'max_cont_trad_newsnr': MaxContTradNewSNRStatistic,
'phasetd_exp_fit_stat_sgveto': PhaseTDExpFitSGStatistic,
'phasetd_new_exp_fit_stat_sgveto': PhaseTDNewExpFitSGStatistic,
'newsnr_sgveto': NewSNRSGStatistic,
'newsnr_sgveto_psdvar': NewSNRSGPSDStatistic,
'phasetd_exp_fit_stat_sgveto_psdvar': PhaseTDExpFitSGPSDStatistic,
'phasetd_exp_fit_stat_sgveto_psdvar_scaled':
PhaseTDExpFitSGPSDScaledStatistic,
'exp_fit_sg_bg_rate': ExpFitSGBgRateStatistic,
'exp_fit_sg_fgbg_rate': ExpFitSGFgBgRateStatistic,
'exp_fit_sg_fgbg_norm_new': ExpFitSGFgBgNormNewStatistic,
'2ogc': ExpFitSGPSDScaledFgBgNormStatistic, # backwards compatible
'2ogcbbh': ExpFitSGPSDSTFgBgNormBBHStatistic, # backwards compatible
'exp_fit_sg_fgbg_norm_psdvar': ExpFitSGPSDFgBgNormStatistic,
'exp_fit_sg_fgbg_norm_psdvar_bbh': ExpFitSGPSDFgBgNormBBHStatistic
}
sngl_statistic_dict = {
'newsnr': NewSNRStatistic,
'new_snr': NewSNRStatistic, # For backwards compatibility
'snr': NetworkSNRStatistic,
'newsnr_cut': NewSNRCutStatistic,
'exp_fit_csnr': ExpFitCombinedSNR,
'exp_fit_sg_csnr': ExpFitSGCombinedSNR,
'max_cont_trad_newsnr': MaxContTradNewSNRStatistic,
'newsnr_sgveto': NewSNRSGStatistic,
'newsnr_sgveto_psdvar': NewSNRSGPSDStatistic,
'newsnr_sgveto_psdvar_scaled': NewSNRSGPSDScaledStatistic,
'newsnr_sgveto_psdvar_scaled_threshold':
NewSNRSGPSDScaledThresholdStatistic,
'exp_fit_sg_csnr_psdvar': ExpFitSGPSDCombinedSNR
}
def get_statistic(stat):
"""
Error-handling sugar around dict lookup for coincident statistics
Parameters
----------
stat : string
Name of the coincident statistic
Returns
-------
class
Subclass of Stat base class
Raises
------
RuntimeError
If the string is not recognized as corresponding to a Stat subclass
"""
try:
return statistic_dict[stat]
except KeyError:
raise RuntimeError('%s is not an available detection statistic' % stat)
def get_sngl_statistic(stat):
"""
Error-handling sugar around dict lookup for single-detector statistics
Parameters
----------
stat : string
Name of the single-detector statistic
Returns
-------
class
Subclass of Stat base class
Raises
------
RuntimeError
If the string is not recognized as corresponding to a Stat subclass
"""
try:
return sngl_statistic_dict[stat]
except KeyError:
raise RuntimeError('%s is not an available detection statistic' % stat)
| ahnitz/pycbc | pycbc/events/stat.py | Python | gpl-3.0 | 69,021 | [
"Gaussian"
] | 9278214ad51d3c06ffd407c1017278c605d31cd2ff799ae9e058b5afd8d2f3b5 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""op_reg_gen: Generate op registration code from composite op code."""
# pylint: disable=invalid-name
# pylint: disable=missing-function-docstring
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast as ast
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct import transpiler
from tensorflow.python.framework import op_def_registry
from tensorflow.python.util import tf_inspect
_COMPOSITE_ARG_LIST = ['op_name', 'inputs', 'attrs', 'derived_attrs', 'outputs']
class OpRegGenImpl(transformer.CodeGenerator):
"""Visit the AST and generate C++ op registration functions."""
def __init__(self, ctx):
super(OpRegGenImpl, self).__init__(ctx)
self.ctx = ctx
def visit_Name(self, node):
return node.id
def visit_Constant(self, node):
return node.value
def visit_keyword(self, node):
return node.arg, self.visit(node.value)
def visit_List(self, node):
return [self.visit(cst) for cst in node.elts]
def visit_arguments(self, node):
return [self.visit(arg) for arg in node.args]
def visit_FunctionDef(self, node):
# TODO(fengliuai): create one utility method to match different apis and
# shared it with the tfr_gen.py module.
compose_dec = []
for dec in node.decorator_list:
if isinstance(dec, ast.Call):
if isinstance(dec.func, ast.Attribute) and dec.func.attr == 'Composite':
compose_dec.append(dec)
if isinstance(dec.func, ast.Name) and dec.func.id == 'Composite':
compose_dec.append(dec)
if not compose_dec:
# skip a non-composition function
return
elif len(compose_dec) > 1:
raise KeyError('More than one TF ops decomposes for.')
all_dec_args = {}
for arg_name, arg_value in zip(_COMPOSITE_ARG_LIST, compose_dec[0].args):
all_dec_args[arg_name] = self.visit(arg_value)
kw_dec_args = dict([self.visit(kw) for kw in compose_dec[0].keywords])
if all_dec_args.keys() & kw_dec_args.keys():
raise KeyError('More arguments than expected.')
all_dec_args.update(kw_dec_args)
op_name = all_dec_args['op_name']
op_def = op_def_registry.get(op_name)
if op_def:
if len(all_dec_args) > 1:
# Op has been registered, so it is a user error to specify op def.
raise ValueError('Op has been registered: ' + op_name)
else:
# Op has been registered, then we don't need to generate register code.
return
# Validates the function inputs match what are in the decorator.
inputs = all_dec_args.get('inputs', [])
attrs = all_dec_args.get('attrs', [])
expected_args = [arg.split(':')[0] for arg in inputs + attrs]
all_func_args = self.visit(node.args)
if len(expected_args) != len(all_func_args):
raise KeyError('Composition arguments do not match the registration.')
cxx_reg_code = '\nREGISTER_OP("{0}")'.format(op_name)
for input_ in inputs:
cxx_reg_code += '\n .Input("{0}")'.format(input_)
for attr in attrs:
py_str = attr.replace('"', '\'')
cxx_reg_code += '\n .Attr("{0}")'.format(py_str)
for attr in all_dec_args.get('derived_attrs', []):
py_str = attr.replace('"', '\'')
cxx_reg_code += '\n .Attr("{0}")'.format(py_str)
for output_ in all_dec_args.get('outputs', []):
cxx_reg_code += '\n .Output("{0}")'.format(output_)
cxx_reg_code += ';\n'
self.emit(cxx_reg_code)
class OpRegGen(transpiler.GenericTranspiler):
"""Transforms Python objects into TFR MLIR source code."""
def transform_ast(self, node, ctx):
gen = OpRegGenImpl(ctx)
gen.visit(node)
return gen.code_buffer
def op_reg_gen(func):
"""Parse a function and emit the TFR functions."""
op_reg_code, _ = OpRegGen().transform(func, None)
return op_reg_code
def gen_register_op(source, method_prefix=None):
"""Parse a python code and emit the TFR functions from a target class."""
mlir_funcs = [
op_reg_gen(func)
for name, func in tf_inspect.getmembers(source, tf_inspect.isfunction)
if not method_prefix or name.startswith(method_prefix)
]
headers = r"""
#include "third_party/tensorflow/core/framework/node_def_builder.h"
#include "third_party/tensorflow/core/framework/op.h"
namespace tensorflow {
"""
code = '\n'.join(mlir_funcs)
return headers + code + '} // namespace tensorflow\n'
| karllessard/tensorflow | tensorflow/compiler/mlir/tfr/python/op_reg_gen.py | Python | apache-2.0 | 5,172 | [
"VisIt"
] | 568dc2e06873f1da92d4d9507edd571ceaee797bedcd888affd6053fb8982178 |
import sys, os
from getIQU import IQU
from subprocess import call
import sys, os
import numpy as np
import glob
import matplotlib.pyplot as plt
from astropy.convolution import convolve_fft, Gaussian2DKernel
from astropy.visualization import AsinhStretch
from astropy.io import fits
from astropy.wcs import WCS
import scipy.ndimage as ndimage
from skimage import filters
import aplpy
from astropy.visualization import (MinMaxInterval, SqrtStretch, ImageNormalize, PowerStretch)
plt.ion()
save_files_here = "/home/wizwit/SESE_dissertation/figures/chapter6"
stokes = ['I', 'Q', 'U']
planck_dir = './carinaData/planckData'
filename = os.path.join(planck_dir, "planck_353_carinaneb_pol.fits")
hdulist = fits.open(filename)
for s, param in enumerate(stokes):
if (stokes[s] == 'I'):
Ivals = hdulist[s+1].data
Ivals[Ivals == 0.0] = np.nan
#wcs = WCS(hdulist[s+1].header)
for s, param in enumerate(stokes):
if (stokes[s] == 'Q'):
Qvals = hdulist[s+1].data
Qvals[Qvals == 0.0] = np.nan
#wcs = WCS(hdulist[s+1].header)
for s, param in enumerate(stokes):
if (stokes[s] == 'U'):
Uvals = hdulist[s+1].data
Uvals[Uvals == 0.0] = np.nan
wcs = WCS(hdulist[s+1].header)
wcs.wcs.crpix[0] -= 260
wcs.wcs.crpix[1] -= 30
I = Ivals[30:-30,260:-260]
Q = Qvals[30:-30,260:-260]
U = Uvals[30:-30,260:-260]
Pvals = np.sqrt(Q**2 + U**2)
pvals = Pvals/I
phi = 0.5*np.arctan2(U,Q)
dx = pvals*np.cos(phi)
dy = pvals*np.sin(phi)
mag = np.sqrt(dx**2 + dy**2)
X = np.linspace(0, I.shape[1], I.shape[1])
Y = np.linspace(0, I.shape[0], I.shape[0])
xs, ys = np.meshgrid(X,Y)
"""
plt.figure()
nskip = 2
skip = (slice(None, None, nskip), slice(None, None, nskip))
#f = aplpy.FITSFigure(I, figsize = (10.24,7.68), dpi = 100)
ax = plt.gca()
ax.imshow(I, cmap = "gist_heat")
#f.tick_labels.set_font(size='small')
#f.show_colorscale(cmap='gist_heat')
# Add polarization vectors
ax.quiver(xs[skip],ys[skip],(dx/mag)[skip],(dy/mag)[skip], color = "white", angles = 'xy', units = 'xy', scale_units = 'xy', scale = 0.3)
#f.show_vectors(pvals, phi, color = 'white', rotate = 90., scale = 50, step = 10)
ax.set_facecolor('black')
plt.tight_layout()
"""
xsize, ysize = len(X), len(Y)
vectors = np.array([dx,dy])
#white = np.random.rand(xsize, ysize)
#white = np.random.uniform(low = 0., high = 1., size = (xsize, ysize))
white = np.random.normal(0., 1., size = (xsize,ysize))
sigma = 1.2
white = ndimage.gaussian_filter(white, sigma)
with file('texture_planck.dat', 'w') as outfile:
for row in white:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dx_planck.dat', 'w') as outfile:
for row in dx:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dy_planck.dat', 'w') as outfile:
for row in dy:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
#command = ["./planck_lic", str(xsize), str(ysize)]
#call(command)
lic = np.loadtxt("./lic_planck.dat")
lic = np.transpose(lic)
#lic += np.abs(np.nanmin(lic))
mult = lic * I
hdu2 = fits.PrimaryHDU(data=np.zeros_like(I), header=wcs.to_header())
f2 = aplpy.FITSFigure(hdu2, figsize = (10,10))
f2.set_theme('publication')
ax = plt.gca()
ax.set_facecolor("k")
f2.add_scalebar(15/60.) # arcmin
f2.scalebar.set_color('white')
f2.scalebar.set_corner('bottom right')
f2.scalebar.set_label('10 pc')
f2.tick_labels.set_yformat('dd.dd')
f2.tick_labels.set_xformat('dd.dd')
f2.axis_labels.set_font(size=16)
f2.tick_labels.set_font(size=14)
plt.imshow(I, origin = 'lower', cmap = "inferno", alpha = 1)
#plt.imshow(lic, vmin = -0.07, vmax = 0.3, origin = 'lower', cmap = "gray", alpha = 0.4, interpolation = "bilinear")
plt.imshow(lic, vmin = -0.05, vmax = 0.2, origin = 'lower', cmap = "gray", alpha = 0.5, interpolation = "bilinear")
plt.tight_layout()
#plt.savefig(os.path.join(save_files_here, 'planck_han_51.eps'), format='eps', dpi=1000, bbox_inches = 'tight')
plt.savefig(os.path.join(save_files_here, 'planck_han_51.png'), format='png', bbox_inches = 'tight')
hdu3 = fits.PrimaryHDU(data=np.zeros_like(I), header=wcs.to_header())
f3 = aplpy.FITSFigure(hdu3, figsize = (10,10))
f3.set_theme('publication')
# scalebar
ax = plt.gca()
ax.set_facecolor("k")
f3.add_scalebar(15/60.) # arcmin
f3.scalebar.set_color('white')
f3.scalebar.set_corner('bottom right')
f3.scalebar.set_label('10 pc')
f3.tick_labels.set_yformat('dd.dd')
f3.tick_labels.set_xformat('dd.dd')
f3.axis_labels.set_font(size=16)
f3.tick_labels.set_font(size=14)
vmin = [-0.00007, 0.4, 0.5]
vmax = [0.02, 0.4, 0.5]
plt.imshow(mult, origin = 'lower',\
cmap = "inferno", vmin = vmin[0],\
vmax = vmax[0], interpolation = 'bilinear')
#plt.tight_layout()
#plt.savefig(os.path.join(save_files_here, 'planck2_han51.eps'), format='eps', dpi=500, bbox_inches = 'tight')
plt.savefig(os.path.join(save_files_here, 'planck2_han51.png'), bbox_inches = 'tight')
"""
#stretch = SqrtStretch()
#lic_stretch = stretch(lic)
hdu1 = fits.PrimaryHDU(data=lic, header=wcs.to_header())
f1 = aplpy.FITSFigure(hdu1)
f1.set_theme('publication')
f1.show_colorscale(stretch='linear', cmap='inferno', smooth = None, kernel='gauss', aspect='equal', interpolation='hamming')
ax = plt.gca()
ax.set_facecolor("k")
f1.axis_labels.set_font(size=16)
f1.tick_labels.set_font(size=14)
# scalebar
f1.add_scalebar(15/60.) # arcmin
f1.scalebar.set_label('10 pc')
f1.scalebar.set_color('white')
f1.scalebar.set_corner('bottom right')
f1.scalebar.set_label('10 pc')
f1.scalebar.set_linewidth(2)
f1.scalebar.set_font_size(size = 'large')
#f1.add_grid()
#f1.grid.set_color('yellow')
#f1.grid.set_alpha(0.3)
plt.tight_layout()
hdu2 = fits.PrimaryHDU(data=I, header=wcs.to_header())
f2 = aplpy.FITSFigure(hdu2)
f2.set_theme('publication')
ax = plt.gca()
ax.set_facecolor("k")
f2.show_colorscale(cmap = 'inferno')
#f2.show_colorscale(stretch='linear', cmap='inferno', aspect='equal', interpolation='hamming', vmin=0., vmax=0.0018)
f2.axis_labels.set_font(size=16)
f2.tick_labels.set_font(size = 14)
# scalebar
f2.add_scalebar(15/60.) # arcmin
f2.scalebar.set_label('10 pc')
f2.scalebar.set_color('white')
f2.scalebar.set_corner('bottom right')
f2.scalebar.set_label('10 pc')
f2.scalebar.set_linewidth(2)
f2.scalebar.set_font_size(size = 'large')
#f2.add_grid()
#f2.grid.set_color('yellow')
#f2.grid.set_alpha(0.2)
norm = ImageNormalize(lic, interval=MinMaxInterval(), stretch=PowerStretch(1.05))
plt.imshow(lic, alpha = 0.4, origin = 'lower', interpolation = 'hamming', cmap = 'gray')
#plt.savefig('./lic_overplot.png', dpi = 100, bbox_inches = 'tight')
plt.tight_layout()
hdu3 = fits.PrimaryHDU(data=mult, header=wcs.to_header())
f3 = aplpy.FITSFigure(hdu3)
#plt.imshow(mult, origin = 'lower', interpolation = 'gaussian', cmap = 'inferno')
f3.set_theme('publication')
ax = plt.gca()
ax.set_facecolor("k")
#f3.show_colorscale(cmap = 'inferno')
f3.show_colorscale(vmin = 0., vmax = 0.6, stretch='linear', cmap='inferno',smooth = None, kernel='gauss', aspect='equal', interpolation='hamming')
#f3.show_colorscale(vmin=0., vmax=100, cmap='inferno',smooth = None, kernel='gauss', aspect='equal', interpolation='hamming')
f3.axis_labels.set_font(size=16)
f3.tick_labels.set_font(size = 14)
# scalebar
f3.add_scalebar(15/60.) # arcmin
f3.scalebar.set_label('10 pc')
f3.scalebar.set_color('white')
f3.scalebar.set_corner('bottom right')
f3.scalebar.set_label('10 pc')
f3.scalebar.set_linewidth(2)
f3.scalebar.set_font_size(size = 'large')
#f3.add_grid()
#f3.grid.set_color('yellow')
#f3.grid.set_alpha(0.2)
#fits.writeto('./mult2.fits', mult, header=wcs.to_header())
#plt.savefig('./mult.png', dpi = 100, bbox_inches = 'tight')
plt.tight_layout()
#im = convolve_fft(lic, I, fft_pad=True, psf_pad=True)
#im = ndimage.sobel(mult)
#plt.figure()
#plt.imshow(im, origin = 'lower', cmap = 'inferno')
"""
| sbg2133/miscellaneous_projects | carina/planck_lic_aplpy.py | Python | gpl-3.0 | 7,820 | [
"Gaussian"
] | cf0f06ed2d8bdb1b6f6fa0630fef0fdca2ab5fc5ee5eb449d6e3cc7ad7845074 |
"""
Distributions
-------------
A widget for plotting attribute distributions.
"""
from math import sqrt
import sys
import collections
from xml.sax.saxutils import escape
from AnyQt.QtWidgets import QSizePolicy, QLabel, QListView,QToolTip
from AnyQt.QtGui import QColor, QPen, QBrush, QPainter, QPicture, QPalette
from AnyQt.QtCore import Qt, QRectF
import numpy
import pyqtgraph as pg
import Orange.data
from Orange.statistics import distribution, contingency
from Orange.widgets import widget, gui, settings
from Orange.widgets.utils import itemmodels
from Orange.widgets.widget import InputSignal
from Orange.widgets.visualize.owlinearprojection import LegendItem, ScatterPlotItem
from Orange.widgets.io import FileFormat
from Orange.widgets.visualize.owscatterplotgraph import HelpEventDelegate
def selected_index(view):
"""Return the selected integer `index` (row) in the view.
If no index is selected return -1
`view` must be in single selection mode.
"""
indices = view.selectedIndexes()
assert len(indices) < 2, "View must be in single selection mode"
if indices:
return indices[0].row()
else:
return -1
class DistributionBarItem(pg.GraphicsObject):
def __init__(self, geometry, dist, colors):
super().__init__()
self.geometry = geometry
self.dist = dist
self.colors = colors
self.__picture = None
def paint(self, painter, options, widget):
if self.__picture is None:
self.__paint()
painter.drawPicture(0, 0, self.__picture)
def boundingRect(self):
return self.geometry
def __paint(self):
picture = QPicture()
painter = QPainter(picture)
pen = QPen(QBrush(Qt.white), 0.5)
pen.setCosmetic(True)
painter.setPen(pen)
geom = self.geometry
x, y = geom.x(), geom.y()
w, h = geom.width(), geom.height()
wsingle = w / len(self.dist)
for d, c in zip(self.dist, self.colors):
painter.setBrush(QBrush(c))
painter.drawRect(QRectF(x, y, wsingle, d * h))
x += wsingle
painter.end()
self.__picture = picture
class OWDistributions(widget.OWWidget):
name = "Distributions"
description = "Display value distributions of a data feature in a graph."
icon = "icons/Distribution.svg"
priority = 120
inputs = [InputSignal("Data", Orange.data.Table, "set_data",
doc="Set the input data set")]
settingsHandler = settings.DomainContextHandler(
match_values=settings.DomainContextHandler.MATCH_VALUES_ALL)
#: Selected variable index
variable_idx = settings.ContextSetting(-1)
#: Selected group variable
groupvar_idx = settings.ContextSetting(0)
relative_freq = settings.Setting(False)
disc_cont = settings.Setting(False)
smoothing_index = settings.Setting(5)
show_prob = settings.ContextSetting(0)
graph_name = "plot"
ASH_HIST = 50
bins = [ 2, 3, 4, 5, 8, 10, 12, 15, 20, 30, 50 ]
smoothing_facs = list(reversed([ 0.1, 0.2, 0.4, 0.6, 0.8, 1, 1.5, 2, 4, 6, 10 ]))
def __init__(self):
super().__init__()
self.data = None
self.distributions = None
self.contingencies = None
self.var = self.cvar = None
varbox = gui.vBox(self.controlArea, "Variable")
self.varmodel = itemmodels.VariableListModel()
self.groupvarmodel = []
self.varview = QListView(
selectionMode=QListView.SingleSelection)
self.varview.setSizePolicy(
QSizePolicy.Minimum, QSizePolicy.Expanding)
self.varview.setModel(self.varmodel)
self.varview.setSelectionModel(
itemmodels.ListSingleSelectionModel(self.varmodel))
self.varview.selectionModel().selectionChanged.connect(
self._on_variable_idx_changed)
varbox.layout().addWidget(self.varview)
box = gui.vBox(self.controlArea, "Precision")
gui.separator(self.controlArea, 4, 4)
box2 = gui.hBox(box)
self.l_smoothing_l = gui.widgetLabel(box2, "Smooth")
gui.hSlider(box2, self, "smoothing_index",
minValue=0, maxValue=len(self.smoothing_facs) - 1,
callback=self._on_set_smoothing, createLabel=False)
self.l_smoothing_r = gui.widgetLabel(box2, "Precise")
self.cb_disc_cont = gui.checkBox(
gui.indentedBox(box, sep=4),
self, "disc_cont", "Bin continuous variables",
callback=self._on_groupvar_idx_changed,
tooltip="Show continuous variables as discrete.")
box = gui.vBox(self.controlArea, "Group by")
self.icons = gui.attributeIconDict
self.groupvarview = gui.comboBox(box, self, "groupvar_idx",
callback=self._on_groupvar_idx_changed, valueType=str,
contentsLength=12)
box2 = gui.indentedBox(box, sep=4)
self.cb_rel_freq = gui.checkBox(
box2, self, "relative_freq", "Show relative frequencies",
callback=self._on_relative_freq_changed,
tooltip="Normalize probabilities so that probabilities for each group-by value sum to 1.")
gui.separator(box2)
self.cb_prob = gui.comboBox(
box2, self, "show_prob", label="Show probabilities:",
orientation=Qt.Horizontal,
callback=self._on_relative_freq_changed,
tooltip="Show probabilities for a chosen group-by value (at each point probabilities for all group-by values sum to 1).")
self.plotview = pg.PlotWidget(background=None)
self.plotview.setRenderHint(QPainter.Antialiasing)
self.mainArea.layout().addWidget(self.plotview)
w = QLabel()
w.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.mainArea.layout().addWidget(w, Qt.AlignCenter)
self.ploti = pg.PlotItem()
self.plot = self.ploti.vb
self.ploti.hideButtons()
self.plotview.setCentralItem(self.ploti)
self.plot_prob = pg.ViewBox()
self.ploti.hideAxis('right')
self.ploti.scene().addItem(self.plot_prob)
self.ploti.getAxis("right").linkToView(self.plot_prob)
self.ploti.getAxis("right").setLabel("Probability")
self.plot_prob.setZValue(10)
self.plot_prob.setXLink(self.ploti)
self.update_views()
self.ploti.vb.sigResized.connect(self.update_views)
self.plot_prob.setRange(yRange=[0,1])
def disable_mouse(plot):
plot.setMouseEnabled(False, False)
plot.setMenuEnabled(False)
disable_mouse(self.plot)
disable_mouse(self.plot_prob)
self.tooltip_items = []
self.plot.scene().installEventFilter(
HelpEventDelegate(self.help_event, self))
pen = QPen(self.palette().color(QPalette.Text))
for axis in ("left", "bottom"):
self.ploti.getAxis(axis).setPen(pen)
self._legend = LegendItem()
self._legend.setParentItem(self.plot)
self._legend.hide()
self._legend.anchor((1, 0), (1, 0))
def update_views(self):
self.plot_prob.setGeometry(self.plot.sceneBoundingRect())
self.plot_prob.linkedViewChanged(self.plot, self.plot_prob.XAxis)
def set_data(self, data):
self.closeContext()
self.clear()
self.warning()
self.data = data
if self.data is not None:
if not self.data:
self.warning("Empty input data cannot be visualized")
return
domain = self.data.domain
self.varmodel[:] = list(domain) + \
[meta for meta in domain.metas
if meta.is_continuous or meta.is_discrete]
self.groupvarview.clear()
self.groupvarmodel = \
["(None)"] + [var for var in domain if var.is_discrete] + \
[meta for meta in domain.metas if meta.is_discrete]
self.groupvarview.addItem("(None)")
for var in self.groupvarmodel[1:]:
self.groupvarview.addItem(self.icons[var], var.name)
if domain.has_discrete_class:
self.groupvar_idx = \
self.groupvarmodel[1:].index(domain.class_var) + 1
self.openContext(domain)
self.variable_idx = min(max(self.variable_idx, 0),
len(self.varmodel) - 1)
self.groupvar_idx = min(max(self.groupvar_idx, 0),
len(self.groupvarmodel) - 1)
itemmodels.select_row(self.varview, self.variable_idx)
self._setup()
def clear(self):
self.plot.clear()
self.plot_prob.clear()
self.varmodel[:] = []
self.groupvarmodel = []
self.variable_idx = -1
self.groupvar_idx = 0
self._legend.clear()
self._legend.hide()
self.groupvarview.clear()
self.cb_prob.clear()
def _setup_smoothing(self):
if not self.disc_cont and self.var and self.var.is_continuous:
self.cb_disc_cont.setText("Bin continuous variables")
self.l_smoothing_l.setText("Smooth")
self.l_smoothing_r.setText("Precise")
else:
self.cb_disc_cont.setText("Bin continuous variables into {} bins".
format(self.bins[self.smoothing_index]))
self.l_smoothing_l.setText(" " + str(self.bins[0]))
self.l_smoothing_r.setText(" " + str(self.bins[-1]))
def _setup(self):
self.plot.clear()
self.plot_prob.clear()
self._legend.clear()
self._legend.hide()
varidx = self.variable_idx
self.var = self.cvar = None
if varidx >= 0:
self.var = self.varmodel[varidx]
if self.groupvar_idx > 0:
self.cvar = self.groupvarmodel[self.groupvar_idx]
self.cb_prob.clear()
self.cb_prob.addItem("(None)")
self.cb_prob.addItems(self.cvar.values)
self.cb_prob.addItem("(All)")
self.show_prob = min(max(self.show_prob, 0),
len(self.cvar.values) + 1)
data = self.data
self._setup_smoothing()
if self.var is None:
return
if self.disc_cont:
data = self.data[:, (self.var, self.cvar) if self.cvar else self.var ]
disc = Orange.preprocess.discretize.EqualWidth(n=self.bins[self.smoothing_index])
data = Orange.preprocess.Discretize(method=disc, remove_const=False)(data)
self.var = (list(data.domain) + list(data.domain.metas))[0]
self.set_left_axis_name()
self.enable_disable_rel_freq()
if self.cvar:
self.contingencies = \
contingency.get_contingency(data, self.var, self.cvar)
self.display_contingency()
else:
self.distributions = \
distribution.get_distribution(data, self.var)
self.display_distribution()
self.plot.autoRange()
def help_event(self, ev):
in_graph_coor = self.plot.mapSceneToView(ev.scenePos())
ctooltip = []
for vb, item in self.tooltip_items:
if isinstance(item, pg.PlotCurveItem) and item.mouseShape().contains(vb.mapSceneToView(ev.scenePos())):
ctooltip.append(item.tooltip)
elif isinstance(item, DistributionBarItem) and item.boundingRect().contains(vb.mapSceneToView(ev.scenePos())):
ctooltip.append(item.tooltip)
if ctooltip:
QToolTip.showText(ev.screenPos(), "\n\n".join(ctooltip), widget=self.plotview)
return True
return False
def display_distribution(self):
dist = self.distributions
var = self.var
assert len(dist) > 0
self.plot.clear()
self.plot_prob.clear()
self.ploti.hideAxis('right')
self.tooltip_items = []
bottomaxis = self.ploti.getAxis("bottom")
bottomaxis.setLabel(var.name)
bottomaxis.resizeEvent()
self.set_left_axis_name()
if var and var.is_continuous:
bottomaxis.setTicks(None)
if not len(dist[0]):
return
edges, curve = ash_curve(dist, None, m=OWDistributions.ASH_HIST,
smoothing_factor=self.smoothing_facs[self.smoothing_index])
edges = edges + (edges[1] - edges[0])/2
edges = edges[:-1]
item = pg.PlotCurveItem()
pen = QPen(QBrush(Qt.white), 3)
pen.setCosmetic(True)
item.setData(edges, curve, antialias=True, stepMode=False,
fillLevel=0, brush=QBrush(Qt.gray), pen=pen)
self.plot.addItem(item)
item.tooltip = "Density"
self.tooltip_items.append((self.plot, item))
else:
bottomaxis.setTicks([list(enumerate(var.values))])
for i, w in enumerate(dist):
geom = QRectF(i - 0.33, 0, 0.66, w)
item = DistributionBarItem(geom, [1.0],
[QColor(128, 128, 128)])
self.plot.addItem(item)
item.tooltip = "Frequency for %s: %r" % (var.values[i], w)
self.tooltip_items.append((self.plot, item))
def _on_relative_freq_changed(self):
self.set_left_axis_name()
if self.cvar and self.cvar.is_discrete:
self.display_contingency()
else:
self.display_distribution()
self.plot.autoRange()
def display_contingency(self):
"""
Set the contingency to display.
"""
cont = self.contingencies
var, cvar = self.var, self.cvar
assert len(cont) > 0
self.plot.clear()
self.plot_prob.clear()
self._legend.clear()
self.tooltip_items = []
if self.show_prob:
self.ploti.showAxis('right')
else:
self.ploti.hideAxis('right')
bottomaxis = self.ploti.getAxis("bottom")
bottomaxis.setLabel(var.name)
bottomaxis.resizeEvent()
cvar_values = cvar.values
colors = [QColor(*col) for col in cvar.colors]
if var and var.is_continuous:
bottomaxis.setTicks(None)
weights, cols, cvar_values, curves = [], [], [], []
for i, dist in enumerate(cont):
v, W = dist
if len(v):
weights.append(numpy.sum(W))
cols.append(colors[i])
cvar_values.append(cvar.values[i])
curves.append(ash_curve(dist, cont, m=OWDistributions.ASH_HIST,
smoothing_factor=self.smoothing_facs[self.smoothing_index]))
weights = numpy.array(weights)
sumw = numpy.sum(weights)
weights /= sumw
colors = cols
curves = [(X, Y * w) for (X, Y), w in zip(curves, weights)]
ncval = len(cvar_values)
curvesline = [] #from histograms to lines
for (X,Y) in curves:
X = X + (X[1] - X[0])/2
X = X[:-1]
X = numpy.array(X)
Y = numpy.array(Y)
curvesline.append((X,Y))
for t in [ "fill", "line" ]:
for (X, Y), color, w, cval in reversed(list(zip(curvesline, colors, weights, cvar_values))):
item = pg.PlotCurveItem()
pen = QPen(QBrush(color), 3)
pen.setCosmetic(True)
color = QColor(color)
color.setAlphaF(0.2)
item.setData(X, Y/(w if self.relative_freq else 1), antialias=True, stepMode=False,
fillLevel=0 if t == "fill" else None,
brush=QBrush(color), pen=pen)
self.plot.addItem(item)
if t == "line":
item.tooltip = ("Normalized density " if self.relative_freq else "Density ") \
+ "\n"+ cvar.name + "=" + cval
self.tooltip_items.append((self.plot, item))
if self.show_prob:
M_EST = 5 #for M estimate
all_X = numpy.array(numpy.unique(numpy.hstack([X for X,_ in curvesline])))
inter_X = numpy.array(numpy.linspace(all_X[0], all_X[-1], len(all_X)*2))
curvesinterp = [ numpy.interp(inter_X, X, Y) for (X,Y) in curvesline ]
sumprob = numpy.sum(curvesinterp, axis=0)
# allcorrection = M_EST/sumw*numpy.sum(sumprob)/len(inter_X)
legal = sumprob > 0.05 * numpy.max(sumprob)
i = len(curvesinterp) + 1
show_all = self.show_prob == i
for Y, color, cval in reversed(list(zip(curvesinterp, colors, cvar_values))):
i -= 1
if show_all or self.show_prob == i:
item = pg.PlotCurveItem()
pen = QPen(QBrush(color), 3, style=Qt.DotLine)
pen.setCosmetic(True)
#prob = (Y+allcorrection/ncval)/(sumprob+allcorrection)
prob = Y[legal] / sumprob[legal]
item.setData(inter_X[legal], prob, antialias=True, stepMode=False,
fillLevel=None, brush=None, pen=pen)
self.plot_prob.addItem(item)
item.tooltip = "Probability that \n" + cvar.name + "=" + cval
self.tooltip_items.append((self.plot_prob, item))
elif var and var.is_discrete:
bottomaxis.setTicks([list(enumerate(var.values))])
cont = numpy.array(cont)
ncval = len(cvar_values)
maxh = 0 #maximal column height
maxrh = 0 #maximal relative column height
scvar = cont.sum(axis=1)
#a cvar with sum=0 with allways have distribution counts 0,
#therefore we can divide it by anything
scvar[scvar==0] = 1
for i, (value, dist) in enumerate(zip(var.values, cont.T)):
maxh = max(maxh, max(dist))
maxrh = max(maxrh, max(dist/scvar))
for i, (value, dist) in enumerate(zip(var.values, cont.T)):
dsum = sum(dist)
geom = QRectF(i - 0.333, 0, 0.666, maxrh
if self.relative_freq else maxh)
if self.show_prob:
prob = dist / dsum
ci = 1.96 * numpy.sqrt(prob * (1 - prob) / dsum)
else:
ci = None
item = DistributionBarItem(geom, dist/scvar/maxrh
if self.relative_freq
else dist/maxh, colors)
self.plot.addItem(item)
tooltip = "\n".join("%s: %.*f" % (n, 3 if self.relative_freq else 1, v)
for n,v in zip(cvar_values, dist/scvar if self.relative_freq else dist ))
item.tooltip = ("Normalized frequency " if self.relative_freq else "Frequency ") \
+ "(" + cvar.name + "=" + value + "):" \
+ "\n" + tooltip
self.tooltip_items.append((self.plot, item))
if self.show_prob:
item.tooltip += "\n\nProbabilities:"
for ic, a in enumerate(dist):
if self.show_prob - 1 != ic and \
self.show_prob - 1 != len(dist):
continue
position = -0.333 + ((ic+0.5)*0.666/len(dist))
if dsum < 1e-6:
continue
prob = a / dsum
if not 1e-6 < prob < 1 - 1e-6:
continue
ci = 1.96 * sqrt(prob * (1 - prob) / dsum)
item.tooltip += "\n%s: %.3f ± %.3f" % (cvar_values[ic], prob, ci)
mark = pg.ScatterPlotItem()
bar = pg.ErrorBarItem()
pen = QPen(QBrush(QColor(0)), 1)
pen.setCosmetic(True)
bar.setData(x=[i+position], y=[prob],
bottom=min(numpy.array([ci]), prob),
top=min(numpy.array([ci]), 1 - prob),
beam=numpy.array([0.05]),
brush=QColor(1), pen=pen)
mark.setData([i+position], [prob], antialias=True, symbol="o",
fillLevel=None, pxMode=True, size=10,
brush=QColor(colors[ic]), pen=pen)
self.plot_prob.addItem(bar)
self.plot_prob.addItem(mark)
for color, name in zip(colors, cvar_values):
self._legend.addItem(
ScatterPlotItem(pen=color, brush=color, size=10, shape="s"),
escape(name)
)
self._legend.show()
def set_left_axis_name(self):
leftaxis = self.ploti.getAxis("left")
set_label = leftaxis.setLabel
if self.var and self.var.is_continuous:
set_label(["Density", "Relative density"]
[self.cvar is not None and self.relative_freq])
else:
set_label(["Frequency", "Relative frequency"]
[self.cvar is not None and self.relative_freq])
leftaxis.resizeEvent()
def enable_disable_rel_freq(self):
self.cb_prob.setDisabled(self.var is None or self.cvar is None)
self.cb_rel_freq.setDisabled(
self.var is None or self.cvar is None)
def _on_variable_idx_changed(self):
self.variable_idx = selected_index(self.varview)
self._setup()
def _on_groupvar_idx_changed(self):
self._setup()
def _on_set_smoothing(self):
self._setup()
def onDeleteWidget(self):
self.plot.clear()
super().onDeleteWidget()
def get_widget_name_extension(self):
if self.variable_idx >= 0:
return self.varmodel[self.variable_idx]
def send_report(self):
if self.variable_idx < 0:
return
self.report_plot()
text = "Distribution of '{}'".format(
self.varmodel[self.variable_idx])
if self.groupvar_idx:
group_var = self.groupvarmodel[self.groupvar_idx]
prob = self.cb_prob
indiv_probs = 0 < prob.currentIndex() < prob.count() - 1
if not indiv_probs or self.relative_freq:
text += " grouped by '{}'".format(group_var)
if self.relative_freq:
text += " (relative frequencies)"
if indiv_probs:
text += "; probabilites for '{}={}'".format(
group_var, prob.currentText())
self.report_caption(text)
def dist_sum(D1, D2):
"""
A sum of two continuous distributions.
"""
X1, W1 = D1
X2, W2 = D2
X = numpy.r_[X1, X2]
W = numpy.r_[W1, W2]
sort_ind = numpy.argsort(X)
X, W = X[sort_ind], W[sort_ind]
unique, uniq_index = numpy.unique(X, return_index=True)
spans = numpy.diff(numpy.r_[uniq_index, len(X)])
W = [numpy.sum(W[start:start + span])
for start, span in zip(uniq_index, spans)]
W = numpy.array(W)
assert W.shape[0] == unique.shape[0]
return unique, W
def ash_curve(dist, cont=None, bandwidth=None, m=3, smoothing_factor=1):
dist = numpy.asarray(dist)
X, W = dist
if bandwidth is None:
std = weighted_std(X, weights=W)
size = X.size
# if only one sample in the class
if std == 0 and cont is not None:
std = weighted_std(cont.values, weights=numpy.sum(cont.counts, axis=0))
size = cont.values.size
# if attr is constant or contingencies is None (no class variable)
if std == 0:
std = 0.1
size = X.size
bandwidth = 3.5 * std * (size ** (-1 / 3))
hist, edges = average_shifted_histogram(X, bandwidth, m, weights=W,
smoothing=smoothing_factor)
return edges, hist
def average_shifted_histogram(a, h, m=3, weights=None, smoothing=1):
"""
Compute the average shifted histogram.
Parameters
----------
a : array-like
Input data.
h : float
Base bin width.
m : int
Number of shifted histograms.
weights : array-like
An array of weights of the same shape as `a`
"""
a = numpy.asarray(a)
if weights is not None:
weights = numpy.asarray(weights)
if weights.shape != a.shape:
raise ValueError("weights should have the same shape as a")
weights = weights.ravel()
a = a.ravel()
amin, amax = a.min(), a.max()
h = h * 0.5 * smoothing
delta = h / m
wfac = 4 #extended windows for gaussian smoothing
offset = (wfac * m - 1) * delta
nbins = max(numpy.ceil((amax - amin + 2 * offset) / delta), 2 * m * wfac - 1)
bins = numpy.linspace(amin - offset, amax + offset, nbins + 1,
endpoint=True)
hist, edges = numpy.histogram(a, bins, weights=weights, density=True)
kernel = gaussian_kernel((numpy.arange(2 * wfac * m - 1) - (wfac * m - 1)) / (wfac * m), wfac)
kernel = kernel / numpy.sum(kernel)
ash = numpy.convolve(hist, kernel, mode="same")
ash = ash / numpy.diff(edges) / ash.sum()
# assert abs((numpy.diff(edges) * ash).sum()) <= 1e-6
return ash, edges
def triangular_kernel(x):
return numpy.clip(1, 0, 1 - numpy.abs(x))
def gaussian_kernel(x, k):
#fit k standard deviations into available space from [-1 .. 1]
return 1/(numpy.sqrt(2 * numpy.pi)) * numpy.exp( - (x*k)**2 / (2))
def weighted_std(a, axis=None, weights=None, ddof=0):
mean = numpy.average(a, axis=axis, weights=weights)
if axis is not None:
shape = shape_reduce_keep_dims(a.shape, axis)
mean = mean.reshape(shape)
sq_diff = numpy.power(a - mean, 2)
mean_sq_diff, wsum = numpy.average(
sq_diff, axis=axis, weights=weights, returned=True
)
if ddof != 0:
mean_sq_diff *= wsum / (wsum - ddof)
return numpy.sqrt(mean_sq_diff)
def weighted_quantiles(a, prob=[0.25, 0.5, 0.75], alphap=0.4, betap=0.4,
axis=None, weights=None):
a = numpy.asarray(a)
prob = numpy.asarray(prob)
sort_ind = numpy.argsort(a, axis)
a = a[sort_ind]
if weights is None:
weights = numpy.ones_like(a)
else:
weights = numpy.asarray(weights)
weights = weights[sort_ind]
n = numpy.sum(weights)
k = numpy.cumsum(weights, axis)
# plotting positions for the known n knots
pk = (k - alphap * weights) / (n + 1 - alphap * weights - betap * weights)
# m = alphap + prob * (1 - alphap - betap)
return numpy.interp(prob, pk, a, left=a[0], right=a[-1])
def shape_reduce_keep_dims(shape, axis):
if shape is None:
return ()
shape = list(shape)
if isinstance(axis, collections.Sequence):
for ax in axis:
shape[ax] = 1
else:
shape[axis] = 1
return tuple(shape)
def main(argv=None):
from AnyQt.QtWidgets import QApplication
import gc
if argv is None:
argv = sys.argv
argv = list(argv)
app = QApplication(argv)
w = OWDistributions()
w.show()
if len(argv) > 1:
filename = argv[1]
else:
filename = "heart_disease"
data = Orange.data.Table(filename)
w.set_data(data)
w.handleNewSignals()
rval = app.exec_()
w.set_data(None)
w.handleNewSignals()
w.deleteLater()
del w
app.processEvents()
gc.collect()
return rval
if __name__ == "__main__":
sys.exit(main())
| cheral/orange3 | Orange/widgets/visualize/owdistributions.py | Python | bsd-2-clause | 28,245 | [
"Gaussian"
] | d1203974fc004cf1d266054eeb1786087b1ebc50be3726ac6657a3d05165f2fe |
def main():
from brian2 import start_scope,mvolt,ms,NeuronGroup,StateMonitor,run
import matplotlib.pyplot as plt
import neo
import quantities as pq
start_scope()
# Izhikevich neuron parameters.
a = 0.02/ms
b = 0.2/ms
c = -65*mvolt
d = 6*mvolt/ms
I = 4*mvolt/ms
# Standard Izhikevich neuron equations.
eqs = '''
dv/dt = 0.04*v**2/(ms*mvolt) + (5/ms)*v + 140*mvolt/ms - u + I : volt
du/dt = a*((b*v) - u) : volt/second
'''
reset = '''
v = c
u += d
'''
# Setup and run simulation.
G = NeuronGroup(1, eqs, threshold='v>30*mvolt', reset='v = -70*mvolt')
G.v = -65*mvolt
G.u = b*G.v
M = StateMonitor(G, 'v', record=True)
run(300*ms)
# Store results in neo format.
vm = neo.core.AnalogSignal(M.v[0], units=pq.V, sampling_period=0.1*pq.ms)
# Plot results.
plt.figure()
plt.plot(vm.times*1000,vm*1000) # Plot mV and ms instead of V and s.
plt.xlabel('Time (ms)')
plt.ylabel('mv')
# Save results.
iom = neo.io.PyNNNumpyIO('spike_extraction_test_data')
block = neo.core.Block()
segment = neo.core.Segment()
segment.analogsignals.append(vm)
block.segments.append(segment)
iom.write(block)
# Load results.
iom2 = neo.io.PyNNNumpyIO('spike_extraction_test_data.npz')
data = iom2.read()
vm = data[0].segments[0].analogsignals[0]
# Plot results.
# The two figures should match.
plt.figure()
plt.plot(vm.times*1000,vm*1000) # Plot mV and ms instead of V and s.
plt.xlabel('Time (ms)')
plt.ylabel('mv')
if __name__ == '__main__':
main()
| etorre/elephant | elephant/test/make_spike_extraction_test_data.py | Python | bsd-3-clause | 1,574 | [
"NEURON"
] | 3264abe39962dd796ba4c4882083cc1b3d1df235ec57b4cf52d1cbaf664df8fe |
from asyncio import gather, Lock, Semaphore, sleep, CancelledError
from collections import deque
from time import time, monotonic
from queue import Empty
from itertools import cycle
from sys import exit
from distutils.version import StrictVersion
from aiopogo import PGoApi, HashServer, json_loads, exceptions as ex
from aiopogo.auth_ptc import AuthPtc
from cyrandom import choice, randint, uniform
from pogeo import get_distance
from .db import POKESTOP_CACHE, GYM_CACHE, MYSTERY_CACHE, SIGHTING_CACHE, RAID_CACHE, WEATHER_CACHE
from .utils import round_coords, load_pickle, get_device_info, get_start_coords, Units, randomize_point
from .shared import get_logger, LOOP, SessionManager, run_threaded, ACCOUNTS
from . import altitudes, avatar, bounds, db_proc, spawns, sanitized as conf
if conf.NOTIFY or conf.NOTIFY_RAIDS:
from .notification import Notifier
if conf.CACHE_CELLS:
from array import typecodes
if 'Q' in typecodes:
from pogeo import get_cell_ids_compact as _pogeo_cell_ids
else:
from pogeo import get_cell_ids as _pogeo_cell_ids
else:
from pogeo import get_cell_ids as _pogeo_cell_ids
_unit = getattr(Units, conf.SPEED_UNIT.lower())
if conf.SPIN_POKESTOPS:
if _unit is Units.miles:
SPINNING_SPEED_LIMIT = 21
UNIT_STRING = "MPH"
elif _unit is Units.kilometers:
SPINNING_SPEED_LIMIT = 34
UNIT_STRING = "KMH"
elif _unit is Units.meters:
SPINNING_SPEED_LIMIT = 34000
UNIT_STRING = "m/h"
UNIT = _unit.value
del _unit
class Worker:
"""Single worker walking on the map"""
download_hash = ''
scan_delay = conf.SCAN_DELAY if conf.SCAN_DELAY >= 10 else 10
g = {'seen': 0, 'captchas': 0}
if conf.CACHE_CELLS:
cells = load_pickle('cells') or {}
@classmethod
def get_cell_ids(cls, point):
rounded = round_coords(point, 4)
try:
return cls.cells[rounded]
except KeyError:
cells = _pogeo_cell_ids(rounded)
cls.cells[rounded] = cells
return cells
else:
get_cell_ids = _pogeo_cell_ids
login_semaphore = Semaphore(conf.SIMULTANEOUS_LOGINS, loop=LOOP)
sim_semaphore = Semaphore(conf.SIMULTANEOUS_SIMULATION, loop=LOOP)
multiproxy = False
if conf.PROXIES:
if len(conf.PROXIES) > 1:
multiproxy = True
proxies = cycle(conf.PROXIES)
else:
proxies = None
if conf.PTC_PROXIES:
ptc_proxies = cycle(conf.PTC_PROXIES)
else:
ptc_proxies = None
if conf.NOTIFY or conf.NOTIFY_RAIDS:
notifier = Notifier()
def __init__(self, worker_no):
self.worker_no = worker_no
self.log = get_logger('worker-{}'.format(worker_no))
# account information
try:
self.account = self.extra_queue.get_nowait()
except Empty as e:
try:
self.account = self.captcha_queue.get_nowait()
except Empty as e:
raise ValueError("You don't have enough accounts for the number of workers specified in GRID.") from e
self.username = self.account['username']
try:
self.location = self.account['location'][:2]
except KeyError:
self.location = get_start_coords(worker_no)
self.altitude = None
# last time of any request
self.last_request = self.account.get('time', 0)
# last time of a request that requires user interaction in the game
self.last_action = self.last_request
# last time of a GetMapObjects request
self.last_gmo = self.last_request
try:
self.items = self.account['items']
self.bag_items = sum(self.items.values())
except KeyError:
self.account['items'] = {}
self.items = self.account['items']
self.inventory_timestamp = self.account.get('inventory_timestamp', 0) if self.items else 0
self.player_level = self.account.get('level')
self.num_captchas = 0
self.eggs = {}
self.unused_incubators = deque()
self.initialize_api()
# State variables
self.busy = Lock(loop=LOOP)
# Other variables
self.after_spawn = 0
self.speed = 0
self.total_seen = 0
self.error_code = 'INIT'
self.item_capacity = 350
self.visits = 0
self.pokestops = conf.SPIN_POKESTOPS
self.next_spin = 0
self.handle = HandleStub()
def initialize_api(self):
device_info = get_device_info(self.account)
self.empty_visits = 0
self.api = PGoApi(device_info=device_info)
self.api.set_position(*self.location, self.altitude)
if self.proxies:
self.api.proxy = next(self.proxies)
if self.ptc_proxies:
self.api.ptc_proxy = next(self.ptc_proxies)
try:
if self.account['provider'] == 'ptc' and 'auth' in self.account:
self.api.auth_provider = AuthPtc(username=self.username, password=self.account['password'], timeout=conf.LOGIN_TIMEOUT)
self.api.auth_provider._access_token = self.account['auth']
self.api.auth_provider._access_token_expiry = self.account['expiry']
if self.api.auth_provider.check_access_token():
self.api.auth_provider.authenticated = True
except KeyError:
pass
def swap_proxy(self):
proxy = self.api.proxy
while proxy == self.api.proxy:
self.api.proxy = next(self.proxies)
def swap_ptc_proxy(self):
if len(conf.PTC_PROXIES) < 2:
return False
proxy = self.api.ptc_proxy
while proxy == self.api.ptc_proxy:
self.api.ptc_proxy = next(self.ptc_proxies)
return True
async def login(self, reauth=False):
"""Logs worker in and prepares for scanning"""
self.log.info('Trying to log in')
for attempt in range(-1, conf.MAX_RETRIES):
try:
self.error_code = '»'
async with self.login_semaphore:
self.error_code = 'LOGIN'
await self.api.set_authentication(
username=self.username,
password=self.account['password'],
provider=self.account.get('provider') or 'ptc',
timeout=conf.LOGIN_TIMEOUT
)
except ex.UnexpectedAuthError as e:
await self.swap_account('unexpected auth error')
except ex.AuthException as e:
err = e
await sleep(2, loop=LOOP)
else:
err = None
break
if reauth:
if err:
self.error_code = 'NOT AUTHENTICATED'
self.log.info('Re-auth error on {}: {}', self.username, err)
return False
self.error_code = None
return True
if err:
raise err
self.error_code = '°'
version = 9100
async with self.sim_semaphore:
self.error_code = 'APP SIMULATION'
if conf.APP_SIMULATION:
await self.app_simulation_login(version)
else:
await self.download_remote_config(version)
self.error_code = None
return True
async def get_player(self):
request = self.api.create_request()
request.get_player(player_locale=conf.PLAYER_LOCALE)
responses = await self.call(request, chain=False)
tutorial_state = None
try:
get_player = responses['GET_PLAYER']
if get_player.warn:
raise ex.WarnAccountException
if get_player.banned:
raise ex.BannedAccountException
player_data = get_player.player_data
tutorial_state = player_data.tutorial_state
# API can return 0 as capacity.
if player_data.max_item_storage != 0:
self.item_capacity = player_data.max_item_storage
if 'created' not in self.account:
self.account['created'] = player_data.creation_timestamp_ms / 1000
except (KeyError, TypeError, AttributeError):
pass
return tutorial_state
async def download_remote_config(self, version):
request = self.api.create_request()
request.download_remote_config_version(platform=1, device_model=self.account['model'], app_version=version)
responses = await self.call(request, buddy=False, inbox=False, dl_hash=False)
try:
inventory_items = responses['GET_INVENTORY'].inventory_delta.inventory_items
for item in inventory_items:
level = item.inventory_item_data.player_stats.level
if level:
self.player_level = level
break
except KeyError:
pass
await self.random_sleep(.78, 1.05)
try:
remote_config = responses['DOWNLOAD_REMOTE_CONFIG_VERSION']
return (
remote_config.asset_digest_timestamp_ms / 1000000,
remote_config.item_templates_timestamp_ms / 1000)
except KeyError:
return 0.0, 0.0
async def set_avatar(self, tutorial=False):
plater_avatar = avatar.new()
request = self.api.create_request()
request.list_avatar_customizations(
avatar_type=plater_avatar['avatar'],
slot=tuple(),
filters=(2,)
)
await self.call(request, buddy=not tutorial, inbox=False, action=5)
await self.random_sleep(7, 14)
request = self.api.create_request()
request.set_avatar(player_avatar=plater_avatar)
await self.call(request, buddy=not tutorial, inbox=False, action=2)
if tutorial:
await self.random_sleep(.5, 4)
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=(1,))
await self.call(request, buddy=False, inbox=False)
await self.random_sleep(.5, 1)
request = self.api.create_request()
request.get_player_profile()
await self.call(request, inbox=False, action=1)
async def app_simulation_login(self, version):
self.log.info('Starting RPC login sequence (iOS app simulation)')
# empty request
# request = self.api.create_request()
# await self.call(request, chain=False)
# await self.random_sleep(.43, .97)
# request 1: get_player
tutorial_state = await self.get_player()
await self.random_sleep(.53, 1.1)
# request 2: download_remote_config_version
asset_time, template_time = await self.download_remote_config(version)
if asset_time > self.account.get('asset_time', 0.0):
# request 3: get_asset_digest
i = randint(0, 3)
result = 2
page_offset = 0
page_timestamp = 0
while result == 2:
request = self.api.create_request()
request.get_asset_digest(
platform=1,
app_version=version,
paginate=True,
page_offset=page_offset,
page_timestamp=page_timestamp)
responses = await self.call(request, buddy=False, inbox=False)
if i > 2:
await sleep(1.45)
i = 0
else:
i += 1
await sleep(.2)
try:
response = responses['GET_ASSET_DIGEST']
except KeyError:
break
result = response.result
page_offset = response.page_offset
page_timestamp = response.timestamp_ms
self.account['asset_time'] = asset_time
if template_time > self.account.get('template_time', 0.0):
# request 4: download_item_templates
i = randint(0, 3)
result = 2
page_offset = 0
page_timestamp = 0
while result == 2:
request = self.api.create_request()
request.download_item_templates(
paginate=True,
page_offset=page_offset,
page_timestamp=page_timestamp)
responses = await self.call(request, buddy=False, inbox=False)
if i > 2:
await sleep(1.5)
i = 0
else:
i += 1
await sleep(.25)
try:
response = responses['DOWNLOAD_ITEM_TEMPLATES']
except KeyError:
break
result = response.result
page_offset = response.page_offset
page_timestamp = response.timestamp_ms
self.account['template_time'] = template_time
if (conf.COMPLETE_TUTORIAL and
tutorial_state is not None and
not all(x in tutorial_state for x in (0, 1, 3, 4, 7))):
self.log.warning('{} is starting tutorial', self.username)
await self.complete_tutorial(tutorial_state)
else:
# request 5: get_player_profile
request = self.api.create_request()
request.get_player_profile()
await self.call(request, inbox=False)
await self.random_sleep(.2, .3)
if self.player_level:
# request 6: level_up_rewards
request = self.api.create_request()
request.level_up_rewards(level=self.player_level)
await self.call(request)
await self.random_sleep(.45, .7)
else:
self.log.warning('No player level')
# request 7: get_store_items
request = self.api.create_request()
request.get_store_items()
await self.call(request, chain=False)
await self.random_sleep(.43, .97)
# request 8: fetch_all_news
request = self.api.create_request()
request.fetch_all_news()
await self.call(request)
await self.random_sleep(.45, .7)
self.log.info('Finished RPC login sequence (iOS app simulation)')
await self.random_sleep(.5, 1.3)
self.error_code = None
return True
async def complete_tutorial(self, tutorial_state):
self.error_code = 'TUTORIAL'
if 0 not in tutorial_state:
# legal screen
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=(0,))
await self.call(request, buddy=False, inbox=False)
await self.random_sleep(.35, .525)
request = self.api.create_request()
request.get_player(player_locale=conf.PLAYER_LOCALE)
await self.call(request, buddy=False, inbox=False)
await sleep(1)
if 1 not in tutorial_state:
# avatar selection
await self.set_avatar(tutorial=True)
starter_id = None
if 3 not in tutorial_state:
# encounter tutorial
await self.random_sleep(.7, .9)
request = self.api.create_request()
request.get_download_urls(asset_id=
('1a3c2816-65fa-4b97-90eb-0b301c064b7a/1487275569649000',
'aa8f7687-a022-4773-b900-3a8c170e9aea/1487275581132582',
'e89109b0-9a54-40fe-8431-12f7826c8194/1487275593635524'))
await self.call(request, inbox=False)
await self.random_sleep(7, 10.3)
request = self.api.create_request()
starter = choice((1, 4, 7))
request.encounter_tutorial_complete(pokemon_id=starter)
responses = await self.call(request, inbox=False, action=1)
try:
inventory = responses['GET_INVENTORY'].inventory_delta.inventory_items
for item in inventory:
pokemon = item.inventory_item_data.pokemon_data
if pokemon.id:
starter_id = pokemon.id
break
except (KeyError, TypeError):
starter_id = None
await self.random_sleep(.4, .5)
request = self.api.create_request()
request.get_player(player_locale=conf.PLAYER_LOCALE)
await self.call(request, inbox=False)
if 4 not in tutorial_state:
# name selection
await self.random_sleep(12, 18)
request = self.api.create_request()
request.claim_codename(codename=self.username)
await self.call(request, inbox=False, action=2)
await sleep(.7, loop=LOOP)
request = self.api.create_request()
request.get_player(player_locale=conf.PLAYER_LOCALE)
await self.call(request, inbox=False)
await sleep(.13, loop=LOOP)
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=(4,))
await self.call(request, inbox=False)
if 7 not in tutorial_state:
# first time experience
await self.random_sleep(3.9, 4.5)
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=(7,))
await self.call(request, inbox=False)
if starter_id:
await self.random_sleep(4, 5)
request = self.api.create_request()
request.set_buddy_pokemon(pokemon_id=starter_id)
await self.call(request, inbox=False, action=2)
await self.random_sleep(.8, 1.2)
await sleep(.2, loop=LOOP)
return True
def update_inventory(self, inventory_items):
for thing in inventory_items:
obj = thing.inventory_item_data
if obj.HasField('item'):
item = obj.item
self.items[item.item_id] = item.count
self.bag_items = sum(self.items.values())
elif conf.INCUBATE_EGGS:
if obj.HasField('pokemon_data') and obj.pokemon_data.is_egg:
egg = obj.pokemon_data
self.eggs[egg.id] = egg
elif obj.HasField('egg_incubators'):
self.unused_incubators.clear()
for item in obj.egg_incubators.egg_incubator:
if item.pokemon_id:
continue
if item.item_id == 901:
self.unused_incubators.append(item)
else:
self.unused_incubators.appendleft(item)
async def call(self, request, chain=True, buddy=True, settings=True, inbox=True, dl_hash=True, action=None):
if chain:
request.check_challenge()
request.get_hatched_eggs()
request.get_inventory(last_timestamp_ms=self.inventory_timestamp)
request.check_awarded_badges()
if settings:
if dl_hash:
request.download_settings(hash=self.download_hash)
else:
request.download_settings()
if buddy:
request.get_buddy_walked()
if inbox:
request.get_inbox(is_history=True)
if action:
now = time()
# wait for the time required, or at least a half-second
if self.last_action > now + .5:
await sleep(self.last_action - now, loop=LOOP)
else:
await sleep(0.5, loop=LOOP)
response = None
err = None
for attempt in range(-1, conf.MAX_RETRIES):
try:
responses = await request.call()
self.last_request = time()
err = None
break
except (ex.NotLoggedInException, ex.AuthException) as e:
self.log.info('Auth error on {}: {}', self.username, e)
err = e
await sleep(3, loop=LOOP)
if not await self.login(reauth=True):
await self.swap_account(reason='reauth failed')
except ex.TimeoutException as e:
self.error_code = 'TIMEOUT'
if not isinstance(e, type(err)):
err = e
self.log.warning('{}', e)
await sleep(10, loop=LOOP)
except ex.HashingOfflineException as e:
if not isinstance(e, type(err)):
err = e
self.log.warning('{}', e)
self.error_code = 'HASHING OFFLINE'
await sleep(5, loop=LOOP)
except ex.NianticOfflineException as e:
if not isinstance(e, type(err)):
err = e
self.log.warning('{}', e)
self.error_code = 'NIANTIC OFFLINE'
await self.random_sleep()
except ex.HashingQuotaExceededException as e:
if not isinstance(e, type(err)):
err = e
self.log.warning('Exceeded your hashing quota, sleeping.')
self.error_code = 'QUOTA EXCEEDED'
refresh = HashServer.status.get('period')
now = time()
if refresh:
if refresh > now:
await sleep(refresh - now + 1, loop=LOOP)
else:
await sleep(5, loop=LOOP)
else:
await sleep(30, loop=LOOP)
except ex.BadRPCException:
raise
except ex.InvalidRPCException as e:
self.last_request = time()
if not isinstance(e, type(err)):
err = e
self.log.warning('{}', e)
self.error_code = 'INVALID REQUEST'
await self.random_sleep()
except ex.ProxyException as e:
if not isinstance(e, type(err)):
err = e
self.error_code = 'PROXY ERROR'
if self.multiproxy:
self.log.error('{}, swapping proxy.', e)
self.swap_proxy()
else:
if not isinstance(e, type(err)):
self.log.error('{}', e)
await sleep(5, loop=LOOP)
except (ex.MalformedResponseException, ex.UnexpectedResponseException) as e:
self.last_request = time()
if not isinstance(e, type(err)):
self.log.warning('{}', e)
self.error_code = 'MALFORMED RESPONSE'
await self.random_sleep()
if err is not None:
raise err
if action:
# pad for time that action would require
self.last_action = self.last_request + action
try:
delta = responses['GET_INVENTORY'].inventory_delta
self.inventory_timestamp = delta.new_timestamp_ms
self.update_inventory(delta.inventory_items)
except KeyError:
pass
if settings:
try:
dl_settings = responses['DOWNLOAD_SETTINGS']
Worker.download_hash = dl_settings.hash
except KeyError:
self.log.info('Missing DOWNLOAD_SETTINGS response.')
else:
if (not dl_hash
and conf.FORCED_KILL
and dl_settings.settings.minimum_client_version != '0.91.2'):
forced_version = StrictVersion(dl_settings.settings.minimum_client_version)
if forced_version > StrictVersion('0.91.2'):
err = '{} is being forced, exiting.'.format(forced_version)
self.log.error(err)
print(err)
exit()
try:
challenge_url = responses['CHECK_CHALLENGE'].challenge_url
if challenge_url != ' ':
self.g['captchas'] += 1
if conf.CAPTCHA_KEY:
self.log.warning('{} has encountered a CAPTCHA, trying to solve', self.username)
await self.handle_captcha(challenge_url)
else:
raise CaptchaException
except KeyError:
pass
return responses
def travel_speed(self, point):
'''Fast calculation of travel speed to point'''
time_diff = max(time() - self.last_request, self.scan_delay)
distance = get_distance(self.location, point, UNIT)
# conversion from seconds to hours
speed = (distance / time_diff) * 3600
return speed
async def bootstrap_visit(self, point):
for _ in range(3):
if await self.visit(point, bootstrap=True):
return True
self.error_code = '∞'
self.simulate_jitter(0.00005)
return False
async def visit(self, point, spawn_id=None, bootstrap=False):
"""Wrapper for self.visit_point - runs it a few times before giving up
Also is capable of restarting in case an error occurs.
"""
try:
try:
self.altitude = altitudes.get(point)
except KeyError:
self.altitude = await altitudes.fetch(point)
self.location = point
self.api.set_position(*self.location, self.altitude)
if not self.authenticated:
await self.login()
return await self.visit_point(point, spawn_id, bootstrap)
except ex.NotLoggedInException:
self.error_code = 'NOT AUTHENTICATED'
await sleep(1, loop=LOOP)
if not await self.login(reauth=True):
await self.swap_account(reason='reauth failed')
return await self.visit(point, spawn_id, bootstrap)
except ex.AuthConnectionException as e:
if self.swap_ptc_proxy():
self.log.error('{}, swapping proxy.', e)
await sleep(3, loop=LOOP)
else:
await sleep(120, loop=LOOP)
if not await self.login(reauth=True):
await self.swap_account(reason='reauth failed')
except ex.AuthException as e:
self.log.warning('Auth error on {}: {}', self.username, e)
self.error_code = 'NOT AUTHENTICATED'
await sleep(3, loop=LOOP)
await self.swap_account(reason='login failed')
except CaptchaException:
self.error_code = 'CAPTCHA'
self.g['captchas'] += 1
await sleep(1, loop=LOOP)
await self.bench_account()
except CaptchaSolveException:
self.error_code = 'CAPTCHA'
await sleep(1, loop=LOOP)
await self.swap_account(reason='solving CAPTCHA failed')
except ex.TempHashingBanException:
self.error_code = 'HASHING BAN'
self.log.error('Temporarily banned from hashing server for using invalid keys.')
await sleep(185, loop=LOOP)
except ex.WarnAccountException:
self.error_code = 'WARN'
self.log.warning('{} is warn', self.username)
await sleep(1, loop=LOOP)
await self.remove_account(warn=True)
except ex.BannedAccountException:
self.error_code = 'BANNED'
self.log.warning('{} is banned', self.username)
await sleep(1, loop=LOOP)
await self.remove_account()
except ex.ProxyException as e:
self.error_code = 'PROXY ERROR'
if self.multiproxy:
self.log.error('{} Swapping proxy.', e)
self.swap_proxy()
else:
self.log.error('{}', e)
except ex.TimeoutException as e:
self.log.warning('{} Giving up.', e)
except ex.NianticIPBannedException:
self.error_code = 'IP BANNED'
if self.multiproxy:
self.log.warning('Swapping out {} due to IP ban.', self.api.proxy)
self.swap_proxy()
else:
self.log.error('IP banned.')
except ex.NianticOfflineException as e:
await self.swap_account(reason='Niantic endpoint failure')
self.log.warning('{}. Giving up.', e)
except ex.ServerBusyOrOfflineException as e:
self.log.warning('{} Giving up.', e)
except ex.BadRPCException:
self.error_code = 'BAD REQUEST'
self.log.warning('{} received code 3 and is likely banned. Removing until next run.', self.username)
await self.new_account()
except ex.InvalidRPCException as e:
self.log.warning('{} Giving up.', e)
except ex.ExpiredHashKeyException as e:
self.error_code = 'KEY EXPIRED'
err = str(e)
self.log.error(err)
print(err)
exit()
except (ex.MalformedResponseException, ex.UnexpectedResponseException) as e:
self.log.warning('{} Giving up.', e)
self.error_code = 'MALFORMED RESPONSE'
except EmptyGMOException as e:
self.error_code = '0'
self.log.warning('Empty GetMapObjects response for {}. Speed: {:.2f}', self.username, self.speed)
except ex.HashServerException as e:
self.log.warning('{}', e)
self.error_code = 'HASHING ERROR'
except ex.AiopogoError as e:
self.log.exception(e.__class__.__name__)
self.error_code = 'AIOPOGO ERROR'
except CancelledError:
self.log.warning('Visit cancelled.')
except Exception as e:
self.log.exception('A wild {} appeared!', e.__class__.__name__)
self.error_code = 'EXCEPTION'
return False
async def visit_point(self, point, spawn_id, bootstrap,
encounter_conf=conf.ENCOUNTER, notify_conf=conf.NOTIFY,
more_points=conf.MORE_POINTS):
self.handle.cancel()
self.error_code = '∞' if bootstrap else '!'
self.log.info('Visiting {0[0]:.4f},{0[1]:.4f}', point)
start = time()
cell_ids = self.get_cell_ids(point)
since_timestamp_ms = (0,) * len(cell_ids)
request = self.api.create_request()
request.get_map_objects(cell_id=cell_ids,
since_timestamp_ms=since_timestamp_ms,
latitude=point[0],
longitude=point[1])
diff = self.last_gmo + self.scan_delay - time()
if diff > 0:
await sleep(diff, loop=LOOP)
responses = await self.call(request)
self.last_gmo = self.last_request
try:
map_objects = responses['GET_MAP_OBJECTS']
if map_objects.status != 1:
error = 'GetMapObjects code for {}. Speed: {:.2f}'.format(self.username, self.speed)
self.empty_visits += 1
if self.empty_visits > 3:
reason = '{} empty visits'.format(self.empty_visits)
await self.swap_account(reason)
raise ex.UnexpectedResponseException(error)
except KeyError:
await self.random_sleep(.5, 1)
await self.get_player()
raise ex.UnexpectedResponseException('Missing GetMapObjects response.')
pokemon_seen = 0
forts_seen = 0
points_seen = 0
seen_target = not spawn_id
if conf.ITEM_LIMITS and self.bag_items >= self.item_capacity:
await self.clean_bag()
for map_cell in map_objects.map_cells:
request_time_ms = map_cell.current_timestamp_ms
for pokemon in map_cell.wild_pokemons:
pokemon_seen += 1
normalized = self.normalize_pokemon(pokemon)
seen_target = seen_target or normalized['spawn_id'] == spawn_id
if (normalized not in SIGHTING_CACHE and
normalized not in MYSTERY_CACHE):
if (self.player_level and self.player_level >= 30 and
(encounter_conf == 'all'
or (encounter_conf == 'some'
and normalized['pokemon_id'] in conf.ENCOUNTER_IDS))):
try:
await self.encounter(normalized, pokemon.spawn_point_id)
except CancelledError:
db_proc.add(normalized)
raise
except Exception as e:
self.log.warning('{} during encounter', e.__class__.__name__)
if notify_conf and self.notifier.eligible(normalized):
if encounter_conf and 'move_1' not in normalized:
try:
await self.encounter(normalized, pokemon.spawn_point_id)
except CancelledError:
db_proc.add(normalized)
raise
except Exception as e:
self.log.warning('{} during encounter', e.__class__.__name__)
LOOP.create_task(self.notifier.notify(normalized, map_objects.time_of_day))
db_proc.add(normalized)
for fort in map_cell.forts:
if not fort.enabled:
continue
forts_seen += 1
if fort.type == 1: # pokestops
if fort.HasField('lure_info'):
norm = self.normalize_lured(fort, request_time_ms)
pokemon_seen += 1
if norm not in SIGHTING_CACHE:
db_proc.add(norm)
if (self.pokestops and
self.bag_items < self.item_capacity
and time() > self.next_spin
and (not conf.SMART_THROTTLE or
self.smart_throttle(2))):
cooldown = fort.cooldown_complete_timestamp_ms
if not cooldown or time() > cooldown / 1000:
await self.spin_pokestop(fort)
if fort not in POKESTOP_CACHE:
fort_details = await self.check_pokestop(fort)
db_proc.add(self.normalize_pokestop(fort, fort_details))
else:
if fort not in GYM_CACHE:
raw_gym_info = await self.check_gym(fort)
gym_info = {}
gym_info['name'] = raw_gym_info.name
gym_info['url'] = raw_gym_info.url
gym_info['desc'] = raw_gym_info.description
g = self.normalize_gym(fort, gym_info)
db_proc.add(g)
else:
g = GYM_CACHE.get(fort.id)
if (g['name'] is None or
g['lat'] != fort.latitude or
g['lon'] != fort.longitude):
raw_gym_info = await self.check_gym(fort)
gym_info = {}
gym_info['name'] = raw_gym_info.name
gym_info['url'] = raw_gym_info.url
gym_info['desc'] = raw_gym_info.description
g = self.normalize_gym(fort, gym_info)
db_proc.add(g)
elif(g['last_modified'] != fort.last_modified_timestamp_ms // 1000):
g = self.normalize_gym(fort, g)
db_proc.add(g)
if fort.HasField('raid_info'):
if fort not in RAID_CACHE:
if conf.NOTIFY_RAIDS:
LOOP.create_task(self.notifier.notify_raid(fort))
raid = self.normalize_raid(fort)
db_proc.add(raid)
if more_points:
try:
for p in map_cell.spawn_points:
points_seen += 1
p = p.latitude, p.longitude
if spawns.have_point(p) or p not in bounds:
continue
spawns.cell_points.add(p)
except KeyError:
pass
if map_objects.client_weather:
for w in map_objects.client_weather:
weather = self.normalize_weather(w, map_objects.time_of_day)
if weather not in WEATHER_CACHE:
db_proc.add(weather)
if spawn_id:
db_proc.add({
'type': 'target',
'seen': seen_target,
'spawn_id': spawn_id})
if (conf.INCUBATE_EGGS and self.unused_incubators
and self.eggs and (not conf.SMART_THROTTLE or self.smart_throttle(1))):
await self.incubate_eggs()
if pokemon_seen > 0:
self.error_code = ':'
self.total_seen += pokemon_seen
self.g['seen'] += pokemon_seen
self.empty_visits = 0
else:
self.empty_visits += 1
if forts_seen == 0:
self.log.warning('Nothing seen by {}. Speed: {:.2f}', self.username, self.speed)
self.error_code = '0 SEEN'
else:
self.error_code = ','
if self.empty_visits > 3 and not bootstrap:
reason = '{} empty visits'.format(self.empty_visits)
await self.swap_account(reason)
self.visits += 1
if conf.MAP_WORKERS:
self.worker_dict.update([(self.worker_no,
(point, start, self.speed, self.total_seen,
self.visits, pokemon_seen))])
self.log.info(
'Point processed, {} Pokemon and {} forts seen!',
pokemon_seen,
forts_seen,
)
self.update_accounts_dict()
self.handle = LOOP.call_later(60, self.unset_code)
return pokemon_seen + forts_seen + points_seen
def smart_throttle(self, requests=1):
try:
# https://en.wikipedia.org/wiki/Linear_equation#Two_variables
# e.g. hashes_left > 2.25*seconds_left+7.5, spare = 0.05, max = 150
spare = conf.SMART_THROTTLE * HashServer.status['maximum']
hashes_left = HashServer.status['remaining'] - requests
usable_per_second = (HashServer.status['maximum'] - spare) / 60
seconds_left = HashServer.status['period'] - time()
return hashes_left > usable_per_second * seconds_left + spare
except (TypeError, KeyError):
return False
async def check_pokestop(self, pokestop):
request = self.api.create_request()
request.fort_details(fort_id = pokestop.id,
latitude = pokestop.latitude,
longitude = pokestop.longitude)
responses = await self.call(request, action=1.2)
return responses['FORT_DETAILS']
async def check_gym(self, gym):
request = self.api.create_request()
request.gym_get_info(gym_id = gym.id,
player_lat_degrees = self.location[0],
player_lng_degrees = self.location[1],
gym_lat_degrees = gym.latitude,
gym_lng_degrees = gym.longitude)
responses = await self.call(request, action=1.2)
return responses['GYM_GET_INFO']
async def spin_pokestop(self, pokestop):
self.error_code = '$'
pokestop_location = pokestop.latitude, pokestop.longitude
# randomize location up to ~1.5 meters
self.simulate_jitter(amount=0.00001)
distance = get_distance(self.location, pokestop_location)
# permitted interaction distance - 4 (for some jitter leeway)
# estimation of spinning speed limit
if distance > 36 or self.speed > SPINNING_SPEED_LIMIT:
self.error_code = '!'
return False
pokestop_details = await self.check_pokestop(pokestop)
name = pokestop_details.name
request = self.api.create_request()
request.fort_search(fort_id = pokestop.id,
player_latitude = self.location[0],
player_longitude = self.location[1],
fort_latitude = pokestop_location[0],
fort_longitude = pokestop_location[1])
responses = await self.call(request, action=2)
try:
result = responses['FORT_SEARCH'].result
except KeyError:
self.log.warning('Invalid Pokéstop spinning response.')
self.error_code = '!'
return
if result == 1:
self.log.info('Spun {}.', name)
try:
inventory_items = responses['GET_INVENTORY'].inventory_delta.inventory_items
for item in inventory_items:
level = item.inventory_item_data.player_stats.level
if level and level > self.player_level:
# level_up_rewards if level has changed
request = self.api.create_request()
request.level_up_rewards(level=level)
await self.call(request)
self.log.info('Level up, get rewards.')
self.player_level = level
break
except KeyError:
pass
elif result == 2:
self.log.info('The server said {} was out of spinning range. {:.1f}m {:.1f}{}',
name, distance, self.speed, UNIT_STRING)
elif result == 3:
self.log.warning('{} was in the cooldown period.', name)
elif result == 4:
self.log.warning('Could not spin {} because inventory was full. {}',
name, self.bag_items)
self.inventory_timestamp = 0
elif result == 5:
self.log.warning('Could not spin {} because the daily limit was reached.', name)
self.pokestops = False
else:
self.log.warning('Failed spinning {}: {}', name, result)
self.next_spin = time() + conf.SPIN_COOLDOWN
self.error_code = '!'
async def encounter(self, pokemon, spawn_id):
distance_to_pokemon = get_distance(self.location, (pokemon['lat'], pokemon['lon']))
self.error_code = '~'
if distance_to_pokemon > 48:
percent = 1 - (47 / distance_to_pokemon)
lat_change = (self.location[0] - pokemon['lat']) * percent
lon_change = (self.location[1] - pokemon['lon']) * percent
self.location = (
self.location[0] - lat_change,
self.location[1] - lon_change)
self.altitude = uniform(self.altitude - 2, self.altitude + 2)
self.api.set_position(*self.location, self.altitude)
delay_required = min((distance_to_pokemon * percent) / 8, 1.1)
else:
self.simulate_jitter()
delay_required = 1.1
await self.random_sleep(delay_required, delay_required + 1.5)
request = self.api.create_request()
request = request.encounter(encounter_id=pokemon['encounter_id'],
spawn_point_id=spawn_id,
player_latitude=self.location[0],
player_longitude=self.location[1])
responses = await self.call(request, action=2.25)
try:
result = responses['ENCOUNTER'].status
if result == 1:
pdata = responses['ENCOUNTER'].wild_pokemon.pokemon_data
pokemon['move_1'] = pdata.move_1
pokemon['move_2'] = pdata.move_2
pokemon['individual_attack'] = pdata.individual_attack
pokemon['individual_defense'] = pdata.individual_defense
pokemon['individual_stamina'] = pdata.individual_stamina
pokemon['height'] = pdata.height_m
pokemon['weight'] = pdata.weight_kg
pokemon['gender'] = pdata.pokemon_display.gender
elif result == 4:
self.log.info('Pokemon that should be encountered has fled')
elif result == 7:
self.log.warning('Could not encounter #{} because the bag of {} is full.',
pokemon['pokemon_id'], self.username)
await self.swap_account(reason='full pkmn bag')
else:
self.log.error('Failed encountering #{}: {}', pokemon['pokemon_id'], result)
except KeyError:
self.log.error('Missing encounter response.')
self.error_code = '!'
async def clean_bag(self):
self.error_code = '|'
rec_items = {}
limits = conf.ITEM_LIMITS
for item, count in self.items.items():
if item in limits and count > limits[item]:
discard = count - limits[item]
if discard > 50:
rec_items[item] = randint(50, discard)
else:
rec_items[item] = discard
removed = 0
for item, count in rec_items.items():
request = self.api.create_request()
request.recycle_inventory_item(item_id=item, count=count)
responses = await self.call(request, action=2)
try:
if responses['RECYCLE_INVENTORY_ITEM'].result != 1:
self.log.warning("Failed to remove item {}", item)
else:
removed += count
except KeyError:
self.log.warning("Failed to remove item {}", item)
self.log.info("Removed {} items", removed)
self.error_code = '!'
async def incubate_eggs(self):
# copy the deque, as self.call could modify it as it updates the inventory
incubators = self.unused_incubators.copy()
for egg in sorted(self.eggs.values(), key=lambda x: x.egg_km_walked_target):
if not incubators:
break
if egg.egg_incubator_id:
continue
inc = incubators.pop()
if inc.item_id == 901 or egg.egg_km_walked_target > 9:
request = self.api.create_request()
request.use_item_egg_incubator(item_id=inc.id, pokemon_id=egg.id)
responses = await self.call(request, action=4.5)
try:
ret = responses['USE_ITEM_EGG_INCUBATOR'].result
if ret == 4:
self.log.warning("Failed to use incubator because it was already in use.")
elif ret != 1:
self.log.warning("Failed to apply incubator {} on {}, code: {}",
inc.id, egg.id, ret)
except (KeyError, AttributeError):
self.log.error('Invalid response to USE_ITEM_EGG_INCUBATOR')
self.unused_incubators = incubators
async def handle_captcha(self, challenge_url):
if self.num_captchas >= conf.CAPTCHAS_ALLOWED:
self.log.error("{} encountered too many CAPTCHAs, removing.", self.username)
raise CaptchaException
self.error_code = 'C'
self.num_captchas += 1
session = SessionManager.get()
try:
params = {
'key': conf.CAPTCHA_KEY,
'method': 'userrecaptcha',
'googlekey': '6LeeTScTAAAAADqvhqVMhPpr_vB9D364Ia-1dSgK',
'pageurl': challenge_url,
'json': 1
}
async with session.post('http://2captcha.com/in.php', params=params) as resp:
response = await resp.json(loads=json_loads)
except CancelledError:
raise
except Exception as e:
self.log.error('Got an error while trying to solve CAPTCHA. '
'Check your API Key and account balance.')
raise CaptchaSolveException from e
code = response.get('request')
if response.get('status') != 1:
if code in ('ERROR_WRONG_USER_KEY', 'ERROR_KEY_DOES_NOT_EXIST', 'ERROR_ZERO_BALANCE'):
conf.CAPTCHA_KEY = None
self.log.error('2Captcha reported: {}, disabling CAPTCHA solving', code)
else:
self.log.error("Failed to submit CAPTCHA for solving: {}", code)
raise CaptchaSolveException
try:
# Get the response, retry every 5 seconds if it's not ready
params = {
'key': conf.CAPTCHA_KEY,
'action': 'get',
'id': code,
'json': 1
}
while True:
async with session.get("http://2captcha.com/res.php", params=params, timeout=20) as resp:
response = await resp.json(loads=json_loads)
if response.get('request') != 'CAPCHA_NOT_READY':
break
await sleep(5, loop=LOOP)
except CancelledError:
raise
except Exception as e:
self.log.error('Got an error while trying to solve CAPTCHA. '
'Check your API Key and account balance.')
raise CaptchaSolveException from e
token = response.get('request')
if not response.get('status') == 1:
self.log.error("Failed to get CAPTCHA response: {}", token)
raise CaptchaSolveException
request = self.api.create_request()
request.verify_challenge(token=token)
await self.call(request, action=4)
self.update_accounts_dict()
self.log.warning("Successfully solved CAPTCHA")
def simulate_jitter(self, amount=0.00002):
'''Slightly randomize location, by up to ~3 meters by default.'''
self.location = randomize_point(self.location, amount=amount)
self.altitude = uniform(self.altitude - 1, self.altitude + 1)
self.api.set_position(*self.location, self.altitude)
def update_accounts_dict(self):
self.account['location'] = self.location
self.account['time'] = self.last_request
self.account['inventory_timestamp'] = self.inventory_timestamp
if self.player_level:
self.account['level'] = self.player_level
try:
self.account['auth'] = self.api.auth_provider._access_token
self.account['expiry'] = self.api.auth_provider._access_token_expiry
except AttributeError:
pass
ACCOUNTS[self.username] = self.account
async def remove_account(self, warn=False):
self.error_code = 'REMOVING'
if warn:
self.account['warn'] = True
self.log.warning('Removing {} due to warn.', self.username)
else:
self.account['banned'] = True
self.log.warning('Removing {} due to ban.', self.username)
self.update_accounts_dict()
await self.new_account()
async def bench_account(self):
self.error_code = 'BENCHING'
self.log.warning('Swapping {} due to CAPTCHA.', self.username)
self.account['captcha'] = True
self.update_accounts_dict()
self.captcha_queue.put(self.account)
await self.new_account()
async def lock_and_swap(self, minutes):
async with self.busy:
self.error_code = 'SWAPPING'
h, m = divmod(int(minutes), 60)
if h:
timestr = '{}h{}m'.format(h, m)
else:
timestr = '{}m'.format(m)
self.log.warning('Swapping {} which had been running for {}.', self.username, timestr)
self.update_accounts_dict()
self.extra_queue.put(self.account)
await self.new_account()
async def swap_account(self, reason=''):
self.error_code = 'SWAPPING'
self.log.warning('Swapping out {} because {}.', self.username, reason)
self.update_accounts_dict()
self.extra_queue.put(self.account)
await self.new_account()
async def new_account(self):
if (conf.CAPTCHA_KEY
and (conf.FAVOR_CAPTCHA or self.extra_queue.empty())
and not self.captcha_queue.empty()):
self.account = self.captcha_queue.get()
else:
try:
self.account = self.extra_queue.get_nowait()
except Empty:
self.account = await run_threaded(self.extra_queue.get)
self.username = self.account['username']
try:
self.location = self.account['location'][:2]
except KeyError:
self.location = get_start_coords(self.worker_no)
self.inventory_timestamp = self.account.get('inventory_timestamp', 0) if self.items else 0
self.player_level = self.account.get('level')
self.last_request = self.account.get('time', 0)
self.last_action = self.last_request
self.last_gmo = self.last_request
try:
self.items = self.account['items']
self.bag_items = sum(self.items.values())
except KeyError:
self.account['items'] = {}
self.items = self.account['items']
self.num_captchas = 0
self.eggs = {}
self.unused_incubators = deque()
self.initialize_api()
self.error_code = None
def unset_code(self):
self.error_code = None
@staticmethod
def normalize_pokemon(raw, spawn_int=conf.SPAWN_ID_INT):
"""Normalizes data coming from API into something acceptable by db"""
tsm = raw.last_modified_timestamp_ms
tss = round(tsm / 1000)
tth = raw.time_till_hidden_ms
norm = {
'type': 'pokemon',
'encounter_id': raw.encounter_id,
'pokemon_id': raw.pokemon_data.pokemon_id,
'lat': raw.latitude,
'lon': raw.longitude,
'spawn_id': int(raw.spawn_point_id, 16) if spawn_int else raw.spawn_point_id,
'seen': tss
}
if tth > 0 and tth <= 90000:
norm['expire_timestamp'] = round((tsm + tth) / 1000)
norm['time_till_hidden'] = tth / 1000
norm['inferred'] = False
else:
despawn = spawns.get_despawn_time(norm['spawn_id'], tss)
if despawn:
norm['expire_timestamp'] = despawn
norm['time_till_hidden'] = despawn - tss
norm['inferred'] = True
else:
norm['type'] = 'mystery'
if raw.pokemon_data.pokemon_display:
if raw.pokemon_data.pokemon_display.form:
norm['display'] = raw.pokemon_data.pokemon_display.form
return norm
@staticmethod
def normalize_lured(raw, now):
lure = raw.lure_info
return {
'type': 'pokemon',
'encounter_id': lure.encounter_id,
'pokemon_id': lure.active_pokemon_id,
'expire_timestamp': lure.lure_expires_timestamp_ms // 1000,
'lat': raw.latitude,
'lon': raw.longitude,
'spawn_id': 0 if conf.SPAWN_ID_INT else 'LURED',
'time_till_hidden': (lure.lure_expires_timestamp_ms - now) / 1000,
'inferred': 'pokestop'
}
@staticmethod
def normalize_gym(raw_fort, gym_info):
return {
'type': 'fort',
'external_id': raw_fort.id,
'lat': raw_fort.latitude,
'lon': raw_fort.longitude,
'name': gym_info['name'],
'url': gym_info['url'],
'desc': gym_info['desc'],
'team': raw_fort.owned_by_team,
'prestige': raw_fort.gym_points,
'guard_pokemon_id': raw_fort.guard_pokemon_id,
'last_modified': raw_fort.last_modified_timestamp_ms // 1000,
'slots_available': raw_fort.gym_display.slots_available
}
@staticmethod
def normalize_raid(raw):
return {
'type': 'raid',
'external_id': raw.raid_info.raid_seed,
'fort_external_id': raw.id,
'lat': raw.latitude,
'lon': raw.longitude,
'level': raw.raid_info.raid_level,
'pokemon_id': raw.raid_info.raid_pokemon.pokemon_id if raw.raid_info.raid_pokemon else 0,
'move_1': raw.raid_info.raid_pokemon.move_1 if raw.raid_info.raid_pokemon else 0,
'move_2': raw.raid_info.raid_pokemon.move_2 if raw.raid_info.raid_pokemon else 0,
'time_spawn': raw.raid_info.raid_spawn_ms // 1000,
'time_battle': raw.raid_info.raid_battle_ms // 1000,
'time_end': raw.raid_info.raid_end_ms // 1000
}
@staticmethod
def normalize_pokestop(raw_fort, raw_fort_details):
lure_start = 0
if 501 in raw_fort.active_fort_modifier: #501 is the code for lure
lure_start = raw_fort.last_modified_timestamp_ms // 1000
return {
'type': 'pokestop',
'external_id': raw_fort.id,
'lat': raw_fort.latitude,
'lon': raw_fort.longitude,
'name': raw_fort_details.name,
'url': raw_fort_details.image_urls[0],
'desc': raw_fort_details.description,
'lure_start': lure_start,
}
@staticmethod
def normalize_weather(raw, time_of_day):
alert_severity = 0
warn = False
if raw.alerts:
for a in raw.alerts:
warn = warn or a.warn_weather
if a.severity > alert_severity:
alert_severity = a.severity
return {
'type': 'weather',
's2_cell_id': raw.s2_cell_id,
'condition': raw.gameplay_weather.gameplay_condition,
'alert_severity': alert_severity,
'warn': warn,
'day': time_of_day
}
@staticmethod
async def random_sleep(minimum=10.1, maximum=14, loop=LOOP):
"""Sleeps for a bit"""
await sleep(uniform(minimum, maximum), loop=loop)
@property
def start_time(self):
return self.api.start_time
@property
def status(self):
"""Returns status message to be displayed in status screen"""
if self.error_code:
msg = self.error_code
else:
msg = 'P{seen}'.format(
seen=self.total_seen
)
return '[W{worker_no}: {msg}]'.format(
worker_no=self.worker_no,
msg=msg
)
@property
def authenticated(self):
try:
return self.api.auth_provider.authenticated
except AttributeError:
return False
class HandleStub:
def cancel(self):
pass
class EmptyGMOException(Exception):
"""Raised when the GMO response is empty."""
class CaptchaException(Exception):
"""Raised when a CAPTCHA is needed."""
class CaptchaSolveException(Exception):
"""Raised when solving a CAPTCHA has failed."""
| evenly-epic-mule/Monocle | monocle/worker.py | Python | mit | 60,070 | [
"VisIt"
] | 2490629a7529b13a4ed610346999021a929c78de0308559e9bb42f6962852839 |
from datetime import datetime
import json
from twisted.internet import reactor
from twisted.python.failure import Failure
from twisted import logger
from twisted.web.resource import Resource, NoResource
from twisted.web.server import NOT_DONE_YET, Site
import urllib
from matrix_gitter.markup import gitter_to_matrix
from matrix_gitter.utils import assert_http_200, Errback, JsonProducer, \
read_json_response, http_request
log = logger.Logger()
HELP_MESSAGE = (
"This service is entirely controlled through messages sent in private to "
"this bot. The commands I recognize are:\n"
" - `list`: displays the list of Gitter room you are in, that you can "
"join in Matrix via the `invite` command. An asterix indicates a room you "
"are already in through Matrix.\n"
" - `gjoin <gitter-room>`: join a new room on Gitter (you can then use "
"`invite` to talk in it from here).\n"
" - `gpart <gitter-room>`: leave a room on Gitter. This will kick you out "
"of the Matrix room if you were in it.\n"
" - `invite <gitter-room>`: create a Matrix room bridged to that Gitter "
"room and invite you to join it.\n"
" - `logout`: throw away your Gitter credentials. Kick you out of all the "
"rooms you are in.")
def txid():
"""Return a unique ID for transactions.
"""
return datetime.utcnow().isoformat()
class BaseMatrixResource(Resource):
"""Base class for resources called by the homeserver; checks token.
This hold the `api` attribute and checks the access token provided by the
homeserver on each request.
"""
def __init__(self, api):
self.api = api
Resource.__init__(self)
def matrix_request(self, *args, **kwargs):
return self.api.matrix_request(*args, **kwargs)
def render(self, request):
request.setHeader(b"content-type", b"application/json")
token = request.args.get('access_token')
if token:
token = token[0]
if not token:
log.info("No access token")
request.setResponseCode(401)
return '{"errcode": "twisted.unauthorized"}'
elif token != self.api.token_hs:
log.info("Wrong token: {got!r} != {expected!r}",
got=token, expected=self.api.token_hs)
request.setResponseCode(403)
return '{"errcode": "M_FORBIDDEN"}'
else:
return Resource.render(self, request)
class Transaction(BaseMatrixResource):
"""`/transactions/<txid>` endpoint, where the homeserver delivers events.
This reacts to events from Matrix.
"""
isLeaf = True
def render_PUT(self, request):
if len(request.postpath) == 1:
transaction, = request.postpath
else:
raise NoResource
events = json.load(request.content)['events']
for event in events:
user = event['user_id']
room = event['room_id']
log.info(" {user} on {room}",
user=user, room=room)
log.info(" {type}", type=event['type'])
log.info(" {content}", content=event['content'])
if (self.api.is_virtualuser(user) or
self.api.is_virtualuser(event.get('state_key'))):
pass
elif (event['type'] == 'm.room.member' and
event['content'].get('membership') == 'invite' and
event['state_key'] == self.api.bot_fullname):
# We've been invited to a room, join it
# FIXME: Remember rooms we've left from private_room_members
log.info("Joining room {room}", room=room)
d = self.matrix_request(
'POST',
'_matrix/client/r0/join/%s',
{},
room)
d.addErrback(Errback(log, "Error joining room {room}",
room=room))
elif (event['type'] == 'm.room.member' and
event['content'].get('membership') == 'join'):
# We or someone else joined a room
if self.api.get_room(room) is None:
# We want to be in private chats with users, but either we
# or them may invite; this indicates that the second party
# has joined, or that we have joined an empty room.
# Request the list of members to find out
d = self.matrix_request(
'GET',
'_matrix/client/r0/rooms/%s/members',
None,
room,
limit='3')
d.addCallback(read_json_response)
d.addCallback(self.private_room_members, room)
d.addErrback(Errback(
log, "Error getting members of room {room}",
room=room))
# We don't care about joins to linked rooms, they have to be
# virtual users
elif (event['type'] == 'm.room.member' and
event['content'].get('membership') != 'join' and
event['content'].get('membership') != 'invite'):
# Someone left a room
room_obj = self.api.get_room(room)
# It's a linked room: stop forwarding
if room_obj is not None:
log.info("User {user} left room {room}, destroying",
user=user, room=room)
room_obj.destroy()
elif user != self.api.bot_fullname:
# It is a user's private room
user_obj = self.api.get_user(user)
if (user_obj is not None and
room == user_obj.matrix_private_room):
log.info("User {user} left his private room {room}, "
"leaving",
user=user, room=room)
self.api.forget_private_room(room)
d = self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/leave',
{},
room)
d.addCallback(lambda r: self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/forget',
{},
room))
d.addErrback(Errback(log, "Error leaving room {room}",
room=room))
elif (event['type'] == 'm.room.message' and
event['content'].get('msgtype') == 'm.text'):
# Text message to a room
if user != self.api.bot_fullname:
room_obj = self.api.get_room(room)
msg = event['content']['body']
# If it's a linked room: forward
if room_obj is not None:
if user == room_obj.user.matrix_username:
log.info("Forwarding to Gitter")
room_obj.to_gitter(msg)
# If it's a message on a private room, handle a command
else:
user_obj = self.api.get_user(user)
if (user_obj is not None and
room == user_obj.matrix_private_room):
if user_obj.gitter_access_token is not None:
self.command(user_obj, msg)
else:
self.api.private_message(
user_obj,
"You are not logged in.",
False)
return '{}'
def command(self, user_obj, msg):
"""Handle a command receive from a user in private chat.
"""
log.info("Got command from user {user}: {msg!r}",
user=user_obj.matrix_username, msg=msg)
first_word, rest = (msg.split(None, 1) + [''])[:2]
first_word = first_word.strip().lower()
rest = rest.strip()
if first_word == 'list':
if not rest:
d = self.api.get_gitter_user_rooms(user_obj)
d.addCallback(self._send_room_list, user_obj)
d.addErrback(Errback(
log, "Error getting list of rooms for user {user}",
user=user_obj.github_username))
return
elif first_word == 'gjoin':
d = self.api.peek_gitter_room(user_obj, rest)
d.addCallback(lambda room: self.api.join_gitter_room(user_obj,
room['id']))
d.addBoth(self._room_joined, user_obj, rest)
return
elif first_word == 'gpart':
room_obj = self.api.get_gitter_room(user_obj.matrix_username, rest)
if room_obj is not None:
d = self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/leave',
{},
room_obj.matrix_room)
d.addCallback(lambda r: self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/forget',
{},
room_obj.matrix_room))
room_obj.destroy()
d = self.api.leave_gitter_room(user_obj, rest)
d.addBoth(self._room_left, user_obj, rest)
return
elif first_word == 'invite':
room_obj = self.api.get_gitter_room(user_obj.matrix_username, rest)
# Room already exist: invite anyway and display a message
if room_obj is not None:
d = self.api.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/invite',
{'user_id': user_obj.matrix_username},
room_obj.matrix_room)
d.addErrback(Errback(
log, "Error inviting {user} to bridged room {matrix}",
user=user_obj.matrix_username,
matrix=room_obj.matrix_room))
self.api.private_message(
user_obj,
"You are already on room {gitter}: {matrix}".format(
gitter=room_obj.gitter_room_name,
matrix=room_obj.matrix_room),
False)
else:
# Check if the room is available
# FIXME: We want to know if the user is on it
d = self.api.peek_gitter_room(user_obj, rest)
d.addBoth(self._new_room, user_obj, rest)
return
elif first_word == 'logout':
for room_obj in self.api.get_all_rooms(user_obj.matrix_username):
room_obj.destroy()
self.api.logout(user_obj.matrix_username)
self.api.private_message(user_obj, "You have been logged out.",
False)
d = self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/leave',
{},
user_obj.matrix_private_room)
d.addCallback(lambda r: self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/forget',
{},
user_obj.matrix_private_room))
self.api.forget_private_room(user_obj.matrix_private_room)
return
self.api.private_message(user_obj, "Invalid command!", False)
def _send_room_list(self, rooms, user_obj):
log.info("Got room list for user {user} ({nb} rooms)",
user=user_obj.matrix_username, nb=len(rooms))
msg = ["Rooms you are currently in on Gitter (* indicates you are in "
"that room from Matrix as well):"]
for gitter_id, gitter_name, matrix_name in sorted(rooms,
key=lambda r: r[1]):
msg.append(" - %s%s" % (gitter_name,
" *" if matrix_name is not None else ""))
self.api.private_message(user_obj, "\n".join(msg), False)
def _room_joined(self, result, user_obj, room):
if isinstance(result, Failure):
log.failure("Failed to join room {room}", result, room=room)
msg = "Couldn't join room {room}"
else:
msg = "Successfully joined room {room}"
self.api.private_message(user_obj, msg.format(room=room), False)
def _room_left(self, result, user_obj, room):
if isinstance(result, Failure):
log.failure("Failed to leave room {room}", result, room=room)
msg = "Couldn't leave room {room}"
else:
msg = "Successfully left room {room}"
self.api.private_message(user_obj, msg.format(room=room), False)
def _new_room(self, result, user_obj, gitter_room):
if isinstance(result, Failure):
log.failure("Couldn't get info for room {room}", result,
room=gitter_room)
self.api.private_message(
user_obj,
"Can't access room {room}".format(room=gitter_room),
False)
return
d = self.matrix_request(
'POST',
'_matrix/client/r0/createRoom',
{'preset': 'private_chat',
'name': "%s (Gitter)" % gitter_room})
# FIXME: don't allow the user to invite others to that room
d.addCallback(read_json_response)
d.addCallback(self._bridge_rooms, user_obj, result)
d.addErrback(Errback(log, "Couldn't create a room"))
def _bridge_rooms(self, (response, content), user_obj, gitter_room_obj):
matrix_room = content['room_id']
self.api.bridge_rooms(user_obj, matrix_room, gitter_room_obj)
d = self.api.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/invite',
{'user_id': user_obj.matrix_username},
matrix_room)
# FIXME: Should we only start forwarding when the user joins?
def errback_func(err):
log.failure("Couldn't invite user to new room", err)
room_obj = self.api.get_room(matrix_room)
if room_obj is not None:
room_obj.destroy()
d.addErrback(errback_func)
def private_room_members(self, (response, content), room):
"""Get list of members on what should be a private room.
If there is one member, wait for someone to join.
If there are two members, this is now the private room for that user.
If there are more members, leave it.
"""
members = [m['state_key']
for m in content['chunk']
if m['content']['membership'] == 'join']
log.info("Room members for {room}: {members}",
room=room,
members=members)
if len(members) > 2:
log.info("Too many members in room {room}, leaving", room=room)
d = self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/leave',
{},
room)
d.addCallback(lambda r: self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/forget',
{},
room))
d.addErrback(Errback(log, "Error leaving room {room}", room=room))
self.api.forget_private_room(room)
else:
# Find the member that's not us
user = [m for m in members if m != self.api.bot_fullname]
if len(user) == 1:
user_obj = self.api.get_user(user[0])
# Register this room as the private chat with that user
self.api.register_private_room(user_obj.matrix_username, room)
user_obj.matrix_private_room = room
# Say hi
msg = ("Hi {user}! I am the interface to this Matrix-Gitter "
"bridge.").format(
user=user_obj.matrix_username.split(':', 1)[0])
if user_obj.github_username is not None:
msg += "\nYou are currently logged in as {gh}.\n".format(
gh=user_obj.github_username)
msg += HELP_MESSAGE
else:
msg += ("\nYou will need to log in to your Gitter account "
"or sign up for one before I can do anything for "
"you.\n"
"You can do this now using this link: "
"{link}").format(
link=self.api.gitter_auth_link(
user_obj.matrix_username))
self.api.private_message(user_obj, msg, False)
class Users(BaseMatrixResource):
"""Endpoint that creates users the homeserver asks about.
"""
# FIXME: useless since we create all the needed virtual users?
isLeaf = True
def _end(self, request):
log.info("callback done")
request.write('{}')
request.finish()
def render_GET(self, request):
if len(request.postpath) == 1:
user, = request.postpath
else:
raise NoResource
log.info("Requested user {user}", user=user)
user_localpart = user.split(':', 1)[0][1:]
if not user_localpart.startswith('gitter'):
request.setResponseCode(404)
return '{"errcode": "twisted.no_such_user"}'
d = self.matrix_request(
'POST',
'_matrix/client/r0/register',
{'type': 'm.login.application_service',
'username': user_localpart})
d.addErrback(Errback(log, "Error creating user {user}", user=user))
d.addBoth(lambda res: self._end(request))
return NOT_DONE_YET
class MatrixAPI(object):
"""Matrix interface.
This communicates with a Matrix homeserver as an application service.
"""
def __init__(self, bridge, port, homeserver_url, homeserver_domain,
botname, token_as, token_hs, debug=False):
self.bridge = bridge
self.homeserver_url = homeserver_url
self.homeserver_domain = homeserver_domain
self.token_as = token_as
self.token_hs = token_hs
if botname[0] == '@':
botname = botname[1:]
if ':' in botname:
botname, domain = botname.split(':', 1)
if domain != homeserver_domain:
raise ValueError("Bot domain doesn't match homeserver")
self.bot_username = botname
self.bot_fullname = '@%s:%s' % (botname, homeserver_domain)
# Create virtual user for bot
if not self.bridge.virtualuser_exists('gitter'):
log.info("Creating user gitter")
d = self.matrix_request(
'POST',
'_matrix/client/r0/register',
{'type': 'm.login.application_service',
'username': 'gitter'})
self.bridge.add_virtualuser('gitter')
d.addErrback(Errback(log, "Error creating user 'gitter' for the "
"bridge; usage over federated rooms "
"might not work correctly"))
root = Resource()
root.putChild('transactions', Transaction(self))
root.putChild('users', Users(self))
site = Site(root)
site.displayTracebacks = debug
site.logRequest = True
reactor.listenTCP(port, site)
def is_virtualuser(self, user):
if user is None:
return False
user = user.split(':', 1)
if len(user) != 2:
return False
local, domain = user
return (local.startswith('@gitter_') and
domain == self.homeserver_domain)
def matrix_request(self, method, uri, content, *args, **kwargs):
"""Matrix client->homeserver API request.
"""
if args:
uri = uri % tuple(urllib.quote(a) for a in args)
if isinstance(uri, unicode):
uri = uri.encode('ascii')
assert200 = kwargs.pop('assert200', True)
getargs = {'access_token': self.token_as}
getargs.update(kwargs)
uri = '%s%s?%s' % (
self.homeserver_url,
uri,
urllib.urlencode(getargs))
log.debug("matrix_request {method} {uri} {content!r}",
method=method, uri=uri, content=content)
d = http_request(
method,
uri,
{'content-type': 'application/json',
'accept': 'application/json'},
JsonProducer(content) if content is not None else None)
if assert200:
d.addCallback(assert_http_200)
return d
def gitter_info_set(self, user_obj):
"""Called from the Bridge when we get a user's Gitter info.
This happens when a user authenticates through the OAuth webapp.
"""
# If we have a private chat with the user, tell him he logged in,
# else start new private chat
self.private_message(user_obj,
"You are now logged in as {gh}.\n{help}".format(
gh=user_obj.github_username,
help=HELP_MESSAGE),
True)
def forward_message(self, room, username, msg):
"""Called from the Bridge to send a forwarded message to a room.
Creates the user, invites him on the room, then speaks the message.
"""
self.ForwardMessage(self, username, room, msg)
class ForwardMessage(object):
"""Message forwarding state-machine.
"""
# +---------------+
# |forward message|
# +---------------+
# | created=False
# | joined=False
# v
# +------------+
# NO |user exists?|
# +------+<---------------+------------+
# +------->|CREATE| created=True |
# created=False? +------+ | YES
# ^ | v
# | v NO +-------------+
# +---------->+------+<---------------|user on room?|
# | | |INVITE| joined=True +-------------+
# | |--------+------+ |
# | | fail | | YES
# | | v |
# | | +----+ |
# | +---------|JOIN| |
# | fail +----+ |
# | | |
# joined=False? v |
# ^ +-------+ <--------------------+
# +-----------+MESSAGE|
# fail +-------+
def __init__(self, matrix, username, room, message):
self._created = False
self._joined = False
self.matrix = matrix
self.username = username
self.matrix_user = '@gitter_%s:%s' % (
username, self.matrix.homeserver_domain)
self.room = room
self.message = message
if not self.matrix.bridge.virtualuser_exists(
'gitter_%s' % username):
self.create_user()
else:
if self.matrix.bridge.is_virtualuser_on_room(
'gitter_%s' % username,
room):
self.send_message()
else:
self.invite_user()
def fail(self, err):
log.failure("Error posting message to Matrix room {room}", err,
room=self.room)
def create_user(self, result=None):
self._created = True
log.info("Creating user {user}", user=self.username)
d = self.matrix.matrix_request(
'POST',
'_matrix/client/r0/register',
{'type': 'm.login.application_service',
'username': 'gitter_%s' % self.username},
assert200=False)
d.addCallbacks(self.set_user_name, self.fail)
def set_user_name(self, result=None):
d = self.matrix.matrix_request(
'PUT',
'_matrix/client/r0/profile/%s/displayname',
{'displayname': "%s (Gitter)" % self.username},
self.matrix_user,
assert200=False,
user_id=self.matrix_user)
d.addCallbacks(self.user_created, self.fail)
def user_created(self, result=None):
self.matrix.bridge.add_virtualuser('gitter_%s' % self.username)
self.invite_user()
def fail_join(self, err):
if not self._created:
self.create_user()
else:
self.fail(err)
def invite_user(self, result=None):
self._joined = True
d = self.matrix.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/invite',
{'user_id': self.matrix_user},
self.room,
assert200=False)
d.addCallbacks(self.join_user, self.fail_join)
def join_user(self, result=None):
d = self.matrix.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/join',
{},
self.room,
user_id=self.matrix_user)
d.addCallbacks(self.user_joined, self.fail_join)
def user_joined(self, result=None):
self.matrix.bridge.add_virtualuser_on_room(
'gitter_%s' % self.username,
self.room)
self.send_message()
def fail_message(self, err):
if not self._joined:
self.invite_user()
else:
self.fail(err)
def send_message(self, result=None):
d = self.matrix.matrix_request(
'PUT',
'_matrix/client/r0/rooms/%s/send/m.room.message/%s',
{'msgtype': 'm.text',
'body': self.message,
'format': 'org.matrix.custom.html',
'formatted_body': gitter_to_matrix(self.message)},
self.room,
txid(),
user_id=self.matrix_user)
d.addErrback(self.fail_message)
def private_message(self, user_obj, msg, invite):
"""Send a message to a user on the appropriate private room.
If we have no private room with the requested user, `invite` indicates
whether to create a private room and invite him.
"""
if user_obj.matrix_private_room is not None:
self.matrix_request(
'PUT',
'_matrix/client/r0/rooms/%s/send/m.room.message/%s',
{'msgtype': 'm.text',
'body': msg},
user_obj.matrix_private_room,
txid())
elif invite:
d = self.matrix_request(
'POST',
'_matrix/client/r0/createRoom',
{'invite': [user_obj.matrix_username],
'preset': 'private_chat'})
d.addCallback(read_json_response)
d.addCallback(self._private_chat_created, user_obj.matrix_username)
d.addErrback(Errback(
log, "Error creating private room for user {user}",
user=user_obj.matrix_username))
def _private_chat_created(self, (request, content), user):
room = content['room_id']
log.info("Created private chat with user {user}: {room}",
user=user, room=room)
self.register_private_room(user, room)
def register_private_room(self, user, room):
"""Set the private room with a user, getting rid of the previous one.
"""
log.info("Storing new private room for user {user}: {room}",
user=user, room=room)
previous_room = self.bridge.set_user_private_matrix_room(user, room)
# If there was already a private room, leave it
if previous_room is not None and previous_room != room:
log.info("Leaving previous private room {room}",
room=previous_room)
d = self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/leave',
{},
previous_room)
d.addCallback(lambda r: self.matrix_request(
'POST',
'_matrix/client/r0/rooms/%s/forget',
{},
previous_room))
d.addErrback(Errback(log, "Error leaving room {room}",
room=previous_room))
return True
else:
return False
def forget_private_room(self, room):
"""Forget a Matrix room that was someone's private room.
"""
self.bridge.forget_private_matrix_room(room)
def get_room(self, room=None):
"""Find a linked room from its Matrix ID.
"""
return self.bridge.get_room(matrix_room=room)
def get_gitter_room(self, matrix_username, gitter_room):
"""Find a linked room from the user and Gitter name.
"""
return self.bridge.get_room(matrix_username=matrix_username,
gitter_room_name=gitter_room)
def get_all_rooms(self, user):
"""Get the list of all linked rooms for a given Matrix user.
"""
return self.bridge.get_all_rooms(user)
def logout(self, user):
"""Removes a user's Gitter info from the database.
This assumes all his linked rooms are already gone.
"""
self.bridge.logout(user)
def get_user(self, user):
"""Find a user in the database from its Matrix username.
"""
user_obj = self.bridge.get_user(matrix_user=user)
if user_obj is None:
return self.bridge.create_user(user)
return user_obj
def get_gitter_user_rooms(self, user_obj):
"""List the Gitter rooms a user is in.
The user is in these on Gitter and not necessarily through Matrix.
"""
return self.bridge.get_gitter_user_rooms(user_obj)
def peek_gitter_room(self, user_obj, gitter_room_name):
"""Get info on a Gitter room without joining it.
"""
# FIXME: This should indicate if the user is on it
return self.bridge.peek_gitter_room(user_obj, gitter_room_name)
def join_gitter_room(self, user_obj, gitter_room_id):
"""Join a Gitter room.
This happens on Gitter only and does not mean the room becomes linked.
"""
return self.bridge.join_gitter_room(user_obj, gitter_room_id)
def leave_gitter_room(self, user_obj, gitter_room_name):
"""Leave a Gitter room.
This assumes the room is not longer linked for the user.
"""
return self.bridge.leave_gitter_room(user_obj, gitter_room_name)
def bridge_rooms(self, user_obj, matrix_room, gitter_room_obj):
"""Setup a linked room and start forwarding.
"""
self.bridge.bridge_rooms(user_obj, matrix_room, gitter_room_obj)
def gitter_auth_link(self, user):
"""Get the link a user should visit to authenticate.
"""
return self.bridge.gitter_auth_link(user)
| remram44/matrix-appservice-gitter-twisted | matrix_gitter/matrix.py | Python | bsd-3-clause | 32,619 | [
"VisIt"
] | 1548d4a32ee6f30507f40965cf8bb9baa19bcee0e257456b4050f42f99f84631 |
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import glob
import os
import re
import shutil
import time
from collections import Counter, defaultdict
from typing import Optional, Dict, List, Tuple, Union, Iterable, Any, TYPE_CHECKING
import pickle as pkl
import networkx as nx
import numpy as np
import scipy.spatial
from scipy import spatial
from . import super_segmentation_helper as ssh
from .rep_helper import knossos_ml_from_sso, colorcode_vertices, knossos_ml_from_svixs, subfold_from_ix_SSO, \
SegmentationBase
from .segmentation import SegmentationObject, SegmentationDataset
from .segmentation_helper import load_so_attr_bulk
from .. import global_params
from ..backend.storage import CompressedStorage, MeshStorage
from ..handler.basics import write_txt2kzip, get_filepaths_from_dir, safe_copy, coordpath2anno, load_pkl2obj, \
write_obj2pkl, flatten_list, chunkify, data2kzip
from ..handler.config import DynConfig
from ..handler.prediction import certainty_estimate
from ..mp import batchjob_utils as qu
from ..mp import mp_utils as sm
from ..proc.graphs import split_glia, split_subcc_join, create_graph_from_coords
from ..proc.meshes import write_mesh2kzip, merge_someshes, compartmentalize_mesh, mesh2obj_file, write_meshes2kzip, \
_calc_pca_components
from ..proc.rendering import render_sampled_sso, load_rendering_func, render_sso_coords, render_sso_coords_index_views
from ..proc.image import normalize_img
from ..proc.sd_proc import predict_sos_views
from ..reps import log_reps
if TYPE_CHECKING:
from .super_segmentation_dataset import SuperSegmentationDataset
from knossos_utils import skeleton
from knossos_utils.skeleton_utils import load_skeleton as load_skeleton_kzip
from knossos_utils.skeleton_utils import write_skeleton as write_skeleton_kzip
try:
from knossos_utils import mergelist_tools
except ImportError:
from knossos_utils import mergelist_tools_fallback as mergelist_tools
MeshType = Union[Tuple[np.ndarray, np.ndarray, np.ndarray], List[np.ndarray],
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]
class SuperSegmentationObject(SegmentationBase):
"""
Class instances represent individual neuron reconstructions, defined by a
list of agglomerated supervoxels (see :class:`~syconn.reps.segmentation.SegmentationObject`).
Examples:
This class can be used to create a cell reconstruction object after successful executing
:func:`~syconn.exec.exec_inference.run_create_neuron_ssd` as follows::
from syconn import global_params
# import SuperSegmentationObject and SuperSegmentationDataset
from syconn.reps.super_segmentation import *
# set the current working directory SyConn-wide
global_params.wd = '~/SyConn/example_cube1/'
ssd = SuperSegmentationDataset()
cell_reconstr_ids = ssd.ssv_ids
# call constructor explicitly ...
cell = SuperSegmentationObject(cell_reconstr_ids[0])
# ... or via the SuperSegmentationDataset - both contain the same data
cell = ssd.get_super_segmentation_object(cell_reconstr_ids[0])
# inspect existing attributes
cell.load_attr_dict()
print(cell.attr_dict.keys())
To cache SegmentationObject attributes use the ``cache_properties`` argument during initialization of the
:class:`~syconn.reps.segmentation.SegmentationDataset` and pass it on to the ``SuperSegmentationDataset``
instantiation:
sd_mi = SegmentationDataset(obj_type='mi', cache_properties=['rep_coord'])
ssd = SuperSegmentationDataset(sd_lookup=dict(mi=sd_mi))
ssv = ssd.get_super_segmentation_object(ssd.ssv_ids[0])
# :class:`~syconn.reps.segmentation.SegmentationObject` from ``mis`` don't require loading ``rep_coord``
# from its storage file.
for mi in ssv.mis:
rc = mi.rep_coord # normally this requires to load the attribute dict storage file.
Subsequent analysis steps (see the ``SyConn/scripts/example_run/start.py``) augment the
cell reconstruction with more properties::
# to iterate over all cell reconstructions use the generator:
for cell in ssd.ssvs:
# e.g. collect some analysis results
cell.load_attr_dict()
n_synapses = len(cell.syn_ssv)
celltype = cell.attr_dict["celltype_cnn_e3"]
...
# write out cell mesh
cell.mesh2kzip('~/cell{}_mesh.k.zip'.format(cell.id))
# write out cell mesh and meshes of all existing cell organelles
cell.meshes2kzip('~/cell{}_meshes.k.zip'.format(cell.id))
# color the cell mesh according to a semantic prediction
cell.semseg2mesh(semseg_key='spiness', dest_path='~/cell{}_spines.k.zip'.format(cell.id))
See also ``SyConn/docs/api.md`` (WIP).
Attributes:
attr_dict: Attribute dictionary which serves as a general-purpose container. Accessed via
the :class:`~syconn.backend.storage.AttributeDict` interface. After successfully
executing :func:`syconn.exec.exec_init.run_create_neuron_ssd`
and subsequent analysis steps (see the ``SyConn/scripts/example_run/start.py``) the
following keys are present in :attr:`~attr_dict`:
* 'id': ID array, identical to :py:attr:`~ssv_ids`. All other properties have the same
ordering as this array, i.e. if SSV with ID 1234 has index 42 in the 'id'-array
you will find its properties at index 42 in all other cache-arrays.
* 'bounding_box': Bounding box of every SSV.
* 'size': Number voxels of each SSV.
* 'rep_coord': Representative coordinates for each SSV.
* 'sv': Supervoxel IDs for every SSV.
* 'sample_locations': Lists of rendering locations for each SSV. Each entry is a
list (length corresponds to the number of supervoxels) of coordinate arrays for
the corresponding SSV.
* 'celltype_cnn_e3': Celltype classifications based on the elektronn3 CMN.
* 'celltype_cnn_e3_probas': Celltype logits for the different types as an array of
shape (M, C; M: Number of predicted random multi-view sets, C: Number of
classes). In the example run there are currently 9 predicted classes:
STN=0, DA=1, MSN=2, LMAN=3, HVC=4, GP=5, FS=6, TAN=7, INT=8.
* 'syn_ssv': Synapse IDs assigned to each SSV.
* 'syn_sign_ratio': Area-weighted atio of symmetric synapses, see
:func:`~syconn.reps.super_segmentation_object.SuperSegmentationObject.syn_sign_ratio`.
* 'sj': Synaptic junction object IDs which were mapped to each SSV. These are used
for view rendering and also to generate the 'syn_ssv' objects in combination
with contact sites (see corresponding section in the documentation).
* 'mapping_sj_ids': Synaptic junction objects which overlap with the respective
SSVs.
* 'mapping_sj_ratios': Overlap ratio of the synaptic junctions.
* 'vc': Vesicle clouds mapped to each SSV.
* 'mapping_vc_ids': Vesicle cloud objects which overlap with the respective SSVs.
* 'mapping_vc_ratios': Overlap ratio of the vesicle clouds.
* 'mi': Mitochondria mapped to each SSV.
* 'mapping_mi_ids': Mitochondria objects which overlap with the respective SSVs.
* 'mapping_mi_ratios': Overlap ratio of the mitochondria.
skeleton: The skeleton representation of this super-supervoxel. Keys which are
currently in use:
* 'nodes': Array of the node coordinates (in nanometers).
* 'edges': Edges between nodes.
* 'diameters': Estimated cell diameter at every node.
* various node properties, e.g. 'axoness' and 'axoness_avg10000'. Check the
available keys ``sso.skeleton.keys()`` of an initialized :class:`~SuperSegmentationObject`
object ``sso`` after loading the skeleton (``sso.load_skeleton()``).
enable_locking_so: Locking flag for all
:class:`syconn.reps.segmentation.SegmentationObject` assigned to this
object (e.g. SV, mitochondria, vesicle clouds, ...)
nb_cpus: Number of cpus for parallel jobs. will only be used in some
processing steps.
view_dict: A dictionary for caching 2D projection views. Those are stored as
a numpy array of shape (M, N, CH, x, y). M: Length of :py:attr:`~sample_locations` and
has the same ordering; N: Number of views per location; CH: Number of channels (1 for
glia prediction containing only the cell shape and 4 for neuron analysis containing
cell and cell organelle shapes. Stored at :py:attr:`~view_path` and accessed via the
:class:`~syconn.backend.storage.CompressedStorage` interface.
version_dict: A dictionary which contains the versions of other dataset types which share
the same working directory. Defaults to the `Versions` entry in the `config.yml` file.
"""
def __init__(self, ssv_id: int, version: Optional[str] = None, version_dict: Optional[Dict[str, str]] = None,
working_dir: Optional[str] = None, create: bool = False,
sv_ids: Optional[Union[np.ndarray, List[int]]] = None, scaling: Optional[np.ndarray] = None,
object_caching: bool = True, voxel_caching: bool = True, mesh_caching: bool = True,
view_caching: bool = False, config: Optional[DynConfig] = None, nb_cpus: int = 1,
enable_locking: bool = False, enable_locking_so: bool = False, ssd_type: Optional[str] = None,
ssd: Optional['SuperSegmentationDataset'] = None, sv_graph: Optional[nx.Graph] = None):
"""
Args:
ssv_id: unique SSV ID.
version: Version string identifier. if 'tmp' is used, no data will
be saved to disk.
version_dict: Dictionary which contains the versions of other dataset types which share
the same working directory. Defaults to the `versions` entry in the
`config.yml`file.
working_dir (): Path to the working directory.
create: If True, the folder to its storage location :py:attr:`~ssv_dir` will be created.
sv_ids: List of agglomerated supervoxels which define the neuron reconstruction.
scaling: Array defining the voxel size in nanometers (XYZ).
object_caching: :class:`~syconn.reps.segmentation.SegmentationObject` retrieved by
:func:`~syconn.reps.segmentation.SegmentationObject.get_seg_objects`
will be cached in a dictionary.
voxel_caching: Voxel array will be cached at
:py:attr:`~syconn.reps.segmentation.SegmentationObject._voxels`.
mesh_caching: Meshes (cell fragments, mitos, vesicles, ..) will be cached at
:py:attr:`~syconn.reps.segmentation.SegmentationObject._meshes`.
view_caching: Views can be cached at :py:attr:`~view_dict`.
config: Retrieved from :py:attr:`~syconn.global_params.config`, otherwise must be
initialized with a :class:`~syconn.handler.config.DynConfig`
nb_cpus: Number of cpus for parallel jobs. will only be used in some processing steps.
enable_locking: Enable posix locking for IO operations.
enable_locking_so: Locking flag for all :class:`syconn.reps.segmentation.SegmentationObject` assigned.
to this object (e.g. SV, mitochondria, vesicle clouds, ...)
ssd_type: Type of cell reconstruction. Default: 'ssv'. If speficied and `ssd` is given, types must match.
ssd: :py:class:`~syconn.reps.super_segmentation_dataset.SuperSegmentationDataset`; if given it will be used
to check if property caching can be used in `:py:class:`~syconn.reps.segmentation.SegmentationDataset``.
Property caching can be used by passing the datasets (attributes for caching have to be specified in
init. via ``cache_properties``) of interest via the kwarg ``sd_lookup``
during :py:attr:`~syconn.reps.super_segmentation_dataset.SuperSegmentationDataset` initialization.
sv_graph: Sueprvoxel graph. Nodes must be uint SV IDs.
"""
if version == 'temp':
version = 'tmp'
self._allow_skeleton_calc = False
if version == "tmp":
self.enable_locking = False
create = False
self._allow_skeleton_calc = True
else:
self.enable_locking = enable_locking
self._object_caching = object_caching
self._voxel_caching = voxel_caching
self._mesh_caching = mesh_caching
self._view_caching = view_caching
self.enable_locking_so = enable_locking_so
self.nb_cpus = nb_cpus
self._id = ssv_id
self.attr_dict = {}
self._ssd = ssd
if self._ssd is not None:
if ssd_type is not None and self._ssd.type != ssd_type:
raise TypeError(f'Mis-match between given "ssd_type"={ssd_type} and type of "ssd"={ssd}.')
else:
ssd_type = self._ssd.type
elif ssd_type is None:
ssd_type = 'ssv'
self._type = ssd_type
self._rep_coord = None
self._size = None
self._bounding_box = None
self._objects = {}
self.skeleton = None
self._voxels = None
self._voxels_xy_downsampled = None
self._voxels_downsampled = None
self._rag = None
self._sv_graph_uint = sv_graph
# init mesh dicts
self._meshes = {"sv": None, "vc": None, "mi": None, "sj": None, "syn_ssv": None, "syn_ssv_sym": None,
"syn_ssv_asym": None, "er": None, "golgi": None}
self._views = None
self._weighted_graph = None
self._sample_locations = None
self._rot_mat = None
self._label_dict = {}
self.view_dict = {}
if sv_ids is not None:
self.attr_dict["sv"] = sv_ids
self._setup_working_dir(working_dir, config, version, scaling)
if version is None:
try:
self._version = self.config["versions"][self.type]
except KeyError:
raise Exception(f"Unclear version '{version}' during initialization of {self}.")
elif version == "new":
other_datasets = glob.glob(self.working_dir + "/%s_*" % self.type)
max_version = -1
for other_dataset in other_datasets:
other_version = int(re.findall(r"[\d]+", os.path.basename(other_dataset))[-1])
if max_version < other_version:
max_version = other_version
self._version = max_version + 1
else:
self._version = version
if version_dict is None:
try:
self.version_dict = self.config["versions"]
except KeyError:
raise ValueError(f"Unclear version '{version}' during initialization of {self}.")
else:
if isinstance(version_dict, dict):
self.version_dict = version_dict
else:
raise ValueError("No version dict specified in config.")
if create:
os.makedirs(self.ssv_dir, exist_ok=True)
def __hash__(self) -> int:
return hash((self.id, self.type, frozenset(self.sv_ids)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, self.__class__):
return False
return self.id == other.id and self.type == other.type and frozenset(self.sv_ids) == frozenset(other.sv_ids)
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __repr__(self) -> str:
return (f'{type(self).__name__}(ssv_id={self.id}, ssd_type="{self.type}", '
f'version="{self.version}", working_dir="{self.working_dir}")')
def __getitem__(self, item):
return self.attr_dict[item]
# IMMEDIATE PARAMETERS
@property
def type(self) -> str:
"""
The type of this super-sueprvoxel. Default: 'ssv'.
Returns:
String identifier of the object type.
"""
return self._type
@property
def id(self) -> int:
"""
Default value is the smalles SV ID which is part of this cell
reconstruction.
Returns:
Globally unique identifier of this object.
"""
return self._id
@property
def version(self) -> str:
"""
Version of the `~SuperSegmentationDataset` this object
belongs to. Can be any character or string like '0' or 'axongroundtruth'.
Returns:
String identifier of the object's version.
"""
return str(self._version)
@property
def object_caching(self) -> bool:
"""If True, :class:`~syconn.reps.segmentation.SegmentationObject`s which
are part of this cell reconstruction are cached."""
return self._object_caching
@property
def voxel_caching(self) -> bool:
"""If True, voxel data is cached."""
return self._voxel_caching
@property
def mesh_caching(self) -> bool:
"""If True, mesh data is cached."""
return self._mesh_caching
@property
def view_caching(self) -> bool:
"""If True, view data is cached."""
return self._view_caching
@property
def scaling(self) -> np.ndarray:
"""
Voxel size in nanometers (XYZ). Default is taken from the `config.yml`
file and accessible via :py:attr:`~config`.
"""
return self._scaling
# PATHS
@property
def working_dir(self) -> str:
"""
Working directory.
"""
return self._working_dir
@property
def identifier(self) -> str:
"""
Identifier used to create the folder name of the
:class: `~syconn.reps.super_segmentation_dataset.SuperSegmentationDataset`
this object belongs to.
"""
return "%s_%s" % (self.type, self.version.lstrip("_"))
@property
def ssd_dir(self) -> str:
"""
Path to the
:class:`~syconn.reps.super_segmentation_dataset.SuperSegmentationDataset`
directory this object belongs to.
"""
return "%s/%s/" % (self.working_dir, self.identifier)
@property
def ssd_kwargs(self) -> dict:
return dict(working_dir=self.working_dir, version=self.version, config=self._config,
ssd_type=self.type, sso_locking=self.enable_locking)
@property
def ssv_kwargs(self) -> dict:
kw = dict(ssv_id=self.id, working_dir=self.working_dir, version=self.version, config=self._config,
ssd_type=self.type, enable_locking=self.enable_locking)
if self.version == 'tmp':
kw.update(sv_ids=self.sv_ids)
return kw
@property
def ssv_dir(self) -> str:
"""
Path to the folder where the data of this super-supervoxel is stored.
"""
return "%s/so_storage/%s/" % (self.ssd_dir, subfold_from_ix_SSO(self.id))
@property
def attr_dict_path(self) -> str:
"""
Path to the attribute storage. :py:attr:`~attr_dict` can be loaded from here.
"""
# Kept for backwards compatibility, remove if not needed anymore
if os.path.isfile(self.ssv_dir + "atrr_dict.pkl"):
return self.ssv_dir + "atrr_dict.pkl"
return self.ssv_dir + "attr_dict.pkl"
@property
def skeleton_kzip_path(self) -> str:
"""
Path to the skeleton storage.
"""
return self.ssv_dir + "skeleton.k.zip"
@property
def skeleton_kzip_path_views(self) -> str:
"""
Path to the skeleton storage.
Todo:
* probably deprecated.
"""
return self.ssv_dir + "skeleton_views.k.zip"
@property
def objects_dense_kzip_path(self) -> str:
"""Identifier of cell organell overlays"""
return self.ssv_dir + "objects_overlay.k.zip"
@property
def skeleton_path(self) -> str:
"""Identifier of SSV skeleton"""
return self.ssv_dir + "skeleton.pkl"
@property
def edgelist_path(self) -> str:
"""Identifier of SSV graph"""
return self.ssv_dir + "edge_list.bz2"
@property
def view_path(self) -> str:
"""Identifier of view storage"""
return self.ssv_dir + "views.pkl"
@property
def mesh_dc_path(self) -> str:
"""Identifier of mesh storage"""
return self.ssv_dir + "mesh_dc.pkl"
@property
def vlabel_dc_path(self) -> str:
"""Identifier of vertex label storage"""
return self.ssv_dir + "vlabel_dc.pkl"
# IDS
@property
def sv_ids(self) -> np.ndarray:
"""
All cell supervoxel IDs which are assigned to this cell reconstruction.
"""
return np.array(self.lookup_in_attribute_dict("sv"), dtype=np.uint64)
@property
def sj_ids(self) -> np.ndarray:
"""
All synaptic junction (sj) supervoxel IDs which are assigned to this
cell reconstruction.
"""
return np.array(self.lookup_in_attribute_dict("sj"), dtype=np.uint64)
@property
def mi_ids(self) -> np.ndarray:
"""
All mitochondria (mi) supervoxel IDs which are assigned to this
cell reconstruction.
"""
return np.array(self.lookup_in_attribute_dict("mi"), dtype=np.uint64)
@property
def vc_ids(self) -> np.ndarray:
"""
All vesicle cloud (vc) supervoxel IDs which are assigned to this
cell reconstruction.
"""
return np.array(self.lookup_in_attribute_dict("vc"), dtype=np.uint64)
@property
def dense_kzip_ids(self) -> Dict[str, int]:
"""
?
"""
return dict([("mi", 1), ("vc", 2), ("sj", 3)])
# SegmentationObjects
@property
def svs(self) -> List[SegmentationObject]:
"""
All cell :class:`~syconn.reps.segmentation.SegmentationObjects` objects
which are assigned to this cell reconstruction.
"""
return self.get_seg_objects("sv")
@property
def sjs(self) -> List[SegmentationObject]:
"""
All synaptic junction (sj) :class:`~syconn.reps.segmentation.SegmentationObjects` objects
which are assigned to this cell reconstruction. These objects are based on the
initial synapse predictions and may contain synapse-synapse merger.
See :py:attr:`~syn_ssv` for merger-free inter-neuron synapses.
"""
return self.get_seg_objects("sj")
@property
def mis(self) -> List[SegmentationObject]:
"""
All mitochondria (mi) :class:`~syconn.reps.segmentation.SegmentationObjects` objects
which are assigned to this cell reconstruction.
"""
return self.get_seg_objects("mi")
@property
def vcs(self) -> List[SegmentationObject]:
"""
All vesicle cloud (vc) :class:`~syconn.reps.segmentation.SegmentationObjects`
objects
which are assigned to this cell reconstruction.
"""
return self.get_seg_objects("vc")
@property
def syn_ssv(self) -> List[SegmentationObject]:
"""
All synaptic junctions :class:`~syconn.reps.segmentation.SegmentationObject`
objects which are between super-supervoxels (syn_ssv) and assigned to this cell reconstruction.
These objects are generated as an agglomeration of 'syn' objects, which themselves have been generation as a
combination of synaptic junction (sj) and contact site (cs) objects to remove merges in the sj objects.
"""
return self.get_seg_objects("syn_ssv")
# MESHES
def load_mesh(self, mesh_type) -> Optional[MeshType]:
"""
Load mesh of a specific type, e.g. 'mi', 'sv' (cell supervoxel), 'sj' (connected
components of the original synaptic junction predictions), 'syn_ssv' (overlap of
'sj' with cell contact sites), 'syn_ssv_sym' and 'syn_ssv_asym' (only if syn-type
predictions are available).
Args:
mesh_type: Type of :class:`~syconn.reps.segmentation.SegmentationObject` used for
mesh retrieval.
Returns:
Three flat arrays: indices, vertices, normals
Raises
ValueError: If `mesh_type` does not exist in :py:attr:`~_meshes`.
"""
if mesh_type in ('syn_ssv_sym', 'syn_ssv_asym'):
self.typedsyns2mesh()
if mesh_type not in self._meshes:
raise ValueError(f'Unknown mesh type for objects "{mesh_type}" in {self}."')
if self._meshes[mesh_type] is None:
if not self.mesh_caching:
return self._load_obj_mesh(mesh_type)
self._meshes[mesh_type] = self._load_obj_mesh(mesh_type)
return self._meshes[mesh_type]
@property
def mesh(self) -> Optional[MeshType]:
"""
Mesh of all cell supervoxels.
"""
return self.load_mesh("sv")
@property
def sj_mesh(self) -> Optional[MeshType]:
"""
Mesh of all synaptic junction (sj) supervoxels. These objects are based
on the original synapse prediction and might contain merger.
"""
return self.load_mesh("sj")
@property
def vc_mesh(self) -> Optional[MeshType]:
"""
Mesh of all vesicle clouds (vc) supervoxels.
"""
return self.load_mesh("vc")
@property
def mi_mesh(self) -> Optional[MeshType]:
"""
Mesh of all mitochondria (mi) supervoxels.
"""
return self.load_mesh("mi")
@property
def syn_ssv_mesh(self) -> Optional[MeshType]:
"""
Mesh of all inter-neuron synapses junction (syn_ssv) supervoxels. These
objects are generated as a combination of contact sites and synaptic
junctions (sj).
"""
return self.load_mesh("syn_ssv")
def label_dict(self, data_type='vertex') -> CompressedStorage:
"""
Dictionary which stores various predictions. Currently used keys:
* 'vertex': Labels associated with the mesh vertices. The ordering
is the same as the vertex order in ``self.mesh[1]``.
Uses the :class:`~syconn.backend.storage.CompressedStorage` interface.
Args:
data_type: Key for the stored labels.
Returns:
The stored array.
"""
if data_type == 'vertex':
if data_type in self._label_dict:
pass
else:
self._label_dict[data_type] = CompressedStorage(
None if self.version == 'tmp' else self.vlabel_dc_path)
return self._label_dict[data_type]
else:
raise ValueError('Label dict for data type "{}" not supported.'
''.format(data_type))
# PROPERTIES
@property
def config(self) -> DynConfig:
"""
The configuration object which contain all dataset-specific parameters.
See :class:`~syconn.handler.config.DynConfig`.
Returns:
The configuration object.
"""
if self._config is None:
self._config = global_params.config
return self._config
@property
def size(self) -> int:
"""
Returns:
The number of voxels associated with this SSV object.
"""
if self._size is None:
self._size = self.lookup_in_attribute_dict("size")
if self._size is None:
self.calculate_size()
return self._size
@property
def bounding_box(self) -> List[np.ndarray]:
if self._bounding_box is None:
self._bounding_box = self.lookup_in_attribute_dict("bounding_box")
if self._bounding_box is None:
self.calculate_bounding_box()
return self._bounding_box
@property
def shape(self) -> np.ndarray:
"""
The XYZ extent of this SSV object in voxels.
Returns:
The shape/extent of thiss SSV object in voxels (XYZ).
"""
return self.bounding_box[1] - self.bounding_box[0]
@property
def rep_coord(self) -> np.ndarray:
"""
Representative coordinate of this SSV object. Will be the representative
coordinate of the first supervoxel in :py:attr:`~svs`.
Returns:
1D array of the coordinate (XYZ).
"""
if self._rep_coord is None:
self._rep_coord = self.lookup_in_attribute_dict("rep_coord")
if self._rep_coord is None:
self._rep_coord = self.svs[0].rep_coord
return self._rep_coord
@property
def attr_dict_exists(self) -> bool:
"""
Checks if a attribute dictionary file exists at :py:attr:`~attr_dict_path`.
Returns:
True if the attribute dictionary file exists.
"""
return os.path.isfile(self.attr_dict_path)
def mesh_exists(self, obj_type: str) -> bool:
"""
Checks if the mesh of :class:`~syconn.reps.segmentation.SegmentationObject`s
of type `obj_type` exists in the :class:`~syconn.backend.storage.MeshStorage`
located at :py:attr:`~mesh_dc_path`.
Args:
obj_type: Type of requested :class:`~syconn.reps.segmentation.SegmentationObject`s.
Returns:
True if the mesh exists.
"""
mesh_dc = MeshStorage(self.mesh_dc_path,
disable_locking=True)
return obj_type in mesh_dc
@property
def voxels(self) -> Optional[np.ndarray]:
"""
Voxels associated with this SSV object.
Returns:
3D binary array indicating voxel locations.
"""
if len(self.sv_ids) == 0:
return None
if self._voxels is None:
voxels = np.zeros(self.bounding_box[1] - self.bounding_box[0],
dtype=np.bool)
for sv in self.svs:
sv._voxel_caching = False
if sv.voxels_exist:
log_reps.debug(np.sum(sv.voxels), sv.size)
box = [sv.bounding_box[0] - self.bounding_box[0],
sv.bounding_box[1] - self.bounding_box[0]]
voxels[box[0][0]: box[1][0],
box[0][1]: box[1][1],
box[0][2]: box[1][2]][sv.voxels] = True
else:
log_reps.warning("missing voxels from %d" % sv.id)
if self.voxel_caching:
self._voxels = voxels
else:
return voxels
return self._voxels
@property
def voxels_xy_downsampled(self) -> Optional[np.ndarray]:
if self._voxels_xy_downsampled is None:
if self.voxel_caching:
self._voxels_xy_downsampled = \
self.load_voxels_downsampled((2, 2, 1))
else:
return self.load_voxels_downsampled((2, 2, 1))
return self._voxels_xy_downsampled
@property
def rag(self) -> nx.Graph:
"""
The region adjacency graph (defining the supervoxel graph) of this SSV
object.
Returns:
Supervoxel graph with nodes of type
:class:`~syconn.reps.segmentation.SegmentationObject`.
"""
if self._rag is None:
self._rag = self.load_sv_graph()
return self._rag
@property
def compartment_meshes(self) -> dict:
"""
Compartment mesh storage.
Returns:
A dictionary which contains the meshes of each compartment.
"""
if "axon" not in self._meshes:
self._load_compartment_meshes()
return {k: self._meshes[k] for k in ["axon", "dendrite", "soma"]}
def _load_compartment_meshes(self, overwrite: bool = False):
"""
Loading compartment meshes in-place as 'axon', 'dendrite', 'soma' to
:py:attr:`~syconn.reps.super_segmentation_object.SuperSegmentationObject._meshes`.
Args:
overwrite: Overwrites existing compartment meshes
"""
mesh_dc = MeshStorage(self.mesh_dc_path,
disable_locking=not self.enable_locking)
if "axon" not in mesh_dc or overwrite:
mesh_compartments = compartmentalize_mesh(self)
mesh_dc["axon"] = mesh_compartments["axon"]
mesh_dc["dendrite"] = mesh_compartments["dendrite"]
mesh_dc["soma"] = mesh_compartments["soma"]
mesh_dc.push()
comp_meshes = {k: mesh_dc[k] for k in ["axon", "dendrite", "soma"]}
self._meshes.update(comp_meshes)
def load_voxels_downsampled(self, downsampling: tuple = (2, 2, 1),
nb_threads: int = 10) -> np.ndarray:
"""
Load all voxels of this SSV object.
Args:
downsampling: The downsampling of the returned voxels.
nb_threads: Number of threads.
Returns:
List of downsampled voxel coordinates.
"""
return ssh.load_voxels_downsampled(self, downsampling=downsampling,
nb_threads=nb_threads)
def get_seg_objects(self, obj_type: str) -> List[SegmentationObject]:
"""
Factory method for :class:`~syconn.reps.segmentation.SegmentationObject`s of type `obj_type`.
Args:
obj_type: Type of requested :class:`~syconn.reps.segmentation.SegmentationObject`s.
Returns:
The :class:`~syconn.reps.segmentation.SegmentationObject`s of type `obj_type`
sharing the same working directory as this SSV object.
"""
if obj_type not in self._objects:
objs = []
for obj_id in self.lookup_in_attribute_dict(obj_type):
objs.append(self.get_seg_obj(obj_type, obj_id))
if self.object_caching:
self._objects[obj_type] = objs
else:
return objs
return self._objects[obj_type]
def get_seg_obj(self, obj_type: str, obj_id: int) -> SegmentationObject:
"""
Factory method for :class:`~syconn.reps.segmentation.SegmentationObject` of type `obj_type`.
Args:
obj_type: Type of requested :class:`~syconn.reps.segmentation.SegmentationObject`.
obj_id: ID of the requested object.
Returns:
The :class:`~syconn.reps.segmentation.SegmentationObject` of type `obj_type`
sharing the same working directory as this SSV object.
"""
kwargs = dict(enable_locking=self.enable_locking_so, mesh_caching=self.mesh_caching,
voxel_caching=self.voxel_caching)
if self._ssd is not None and obj_type in self._ssd.sd_lookup and self._ssd.sd_lookup[obj_type] is not None:
sd_obj = self._ssd.sd_lookup[obj_type]
if str(sd_obj.version) != str(self.version_dict[obj_type]) or sd_obj.working_dir != self.working_dir:
msg = (f'Inconsistent working directory or version for {obj_type} stored in {self} and look up '
f'dataset {sd_obj}.')
log_reps.error(msg)
raise ValueError(msg)
return sd_obj.get_segmentation_object(obj_id, **kwargs)
return SegmentationObject(obj_id=obj_id, obj_type=obj_type, version=self.version_dict[obj_type],
working_dir=self.working_dir, create=False, scaling=self.scaling, config=self.config)
def get_seg_dataset(self, obj_type: str) -> SegmentationDataset:
"""
Factory method for :class:`~syconn.reps.segmentation.SegmentationDataset` of type `obj_type`.
Args:
obj_type: Type of requested :class:`~syconn.reps.segmentation.SegmentationDataset`.
Returns:
The :class:`~syconn.reps.segmentation.SegmentationDataset` of type `obj_type`
sharing the same working directory as this SSV object.
"""
return SegmentationDataset(obj_type, version_dict=self.version_dict,
version=self.version_dict[obj_type],
scaling=self.scaling,
working_dir=self.working_dir)
def load_attr_dict(self) -> int:
"""
Load the attribute dictionary of this SSV object stored at
:py:attr:`~ssv_dir`.
"""
try:
self.attr_dict = load_pkl2obj(self.attr_dict_path)
return 0
except (IOError, EOFError, pkl.UnpicklingError) as e:
if '[Errno 2] No such file or' not in str(e):
log_reps.critical(f"Could not load SSO attributes from {self.attr_dict_path} due to '{e}'.")
return -1
@property
def sv_graph_uint(self) -> nx.Graph:
if self._sv_graph_uint is None:
if os.path.isfile(self.edgelist_path):
self._sv_graph_uint = nx.read_edgelist(self.edgelist_path, nodetype=np.uint64)
else:
raise ValueError("Could not find graph data for SSV {}.".format(self.id))
return self._sv_graph_uint
def load_sv_graph(self) -> nx.Graph:
"""
Load the supervoxel graph (node objects will be of type
:class:`~syconn.reps.segmentation.SegmentationObject`) of this SSV object.
It is generated from the supervoxel ID graph stored in :py:attr:`_sv_graph`
or the edge list stored at :py:attr:`edgelist_path`.
Returns:
The supervoxel graph with :class:`~syconn.reps.segmentation.SegmentationObject`
nodes.
"""
G = self.sv_graph_uint
# # Might be useful as soon as global graph path is available
# else:
# if os.path.isfile(global_params.config.neuron_svgraph_path):
# G_glob = nx.read_edgelist(global_params.config.neuron_svgraph_path,
# nodetype=np.uint64)
# G = nx.Graph()
# cc = nx.node_connected_component(G_glob, self.sv_ids[0])
# assert len(set(cc).difference(set(self.sv_ids))) == 0, \
# "SV IDs in graph differ from SSV SVs."
# for e in G_glob.edges(cc):
# G.add_edge(*e)
if len(set(list(G.nodes())).difference(set(self.sv_ids))) != 0:
msg = "SV IDs in graph differ from SSV SVs."
log_reps.error(msg)
raise ValueError(msg)
# create graph with SV nodes
new_G = nx.Graph()
for e in G.edges():
new_G.add_edge(self.get_seg_obj("sv", e[0]), self.get_seg_obj("sv", e[1]))
return new_G
def load_sv_edgelist(self) -> List[Tuple[int, int]]:
"""
Load the edges within the supervoxel graph.
Returns:
Edge list representing the supervoxel graph.
"""
g = self.load_sv_graph()
return list(g.edges())
def _load_obj_mesh(self, obj_type: str = "sv", rewrite: bool = False) -> MeshType:
"""
Load the mesh of a given `obj_type`. If :func:`~mesh_exists` is False,
loads the meshes from the underlying sueprvoxel objects.
TODO: Currently does not support color array!
TODO: add support for sym. asym synapse type
Parameters
----------
obj_type : str
rewrite : bool
Returns
-------
np.array, np.array, np.array
ind, vert, normals
"""
if not rewrite and self.mesh_exists(obj_type) and not \
self.version == "tmp":
mesh_dc = MeshStorage(self.mesh_dc_path,
disable_locking=not self.enable_locking)
if len(mesh_dc[obj_type]) == 3:
ind, vert, normals = mesh_dc[obj_type]
else:
ind, vert = mesh_dc[obj_type]
normals = np.zeros((0,), dtype=np.float32)
else:
ind, vert, normals = merge_someshes(self.get_seg_objects(obj_type), nb_cpus=self.nb_cpus,
use_new_subfold=self.config.use_new_subfold)
if not self.version == "tmp":
mesh_dc = MeshStorage(self.mesh_dc_path, read_only=False, disable_locking=not self.enable_locking)
mesh_dc[obj_type] = [ind, vert, normals]
mesh_dc.push()
return np.array(ind, dtype=np.int32), np.array(vert, dtype=np.float32), np.array(normals, dtype=np.float32)
def _load_obj_mesh_compr(self, obj_type: str = "sv") -> MeshType:
"""
Load meshes of all objects of type `obj_type` assigned to this SSV.
Args:
obj_type: Type of requested objects.
Returns:
A single mesh of all objects.
"""
mesh_dc = MeshStorage(self.mesh_dc_path,
disable_locking=not self.enable_locking)
return mesh_dc._dc_intern[obj_type]
def save_attr_dict(self):
"""
Save the SSV's attribute dictionary.
"""
if self.version == 'tmp':
log_reps.warning('"save_attr_dict" called but this SSV has version "tmp", attribute dict will'
' not be saved to disk.')
return
try:
orig_dc = load_pkl2obj(self.attr_dict_path)
except (IOError, EOFError, FileNotFoundError, pkl.UnpicklingError) as e:
if '[Errno 2] No such file or' not in str(e):
log_reps.critical(f"Could not load SSO attributes from {self.attr_dict_path} due to '{e}'. Overwriting")
orig_dc = {}
orig_dc.update(self.attr_dict)
write_obj2pkl(self.attr_dict_path, orig_dc)
def save_attributes(self, attr_keys: List[str], attr_values: List[Any]):
"""
Writes attributes to attribute dict on file system. Does not care about
self.attr_dict.
Parameters
----------
attr_keys : tuple of str
attr_values : tuple of items
"""
if self.version == 'tmp':
log_reps.warning('"save_attributes" called but this SSV has version "tmp", attributes will'
' not be saved to disk.')
return
if not hasattr(attr_keys, "__len__"):
attr_keys = [attr_keys]
if not hasattr(attr_values, "__len__"):
attr_values = [attr_values]
try:
attr_dict = load_pkl2obj(self.attr_dict_path)
except (IOError, EOFError, FileNotFoundError) as e:
if not "[Errno 13] Permission denied" in str(e):
pass
else:
log_reps.critical(f"Could not load SSO attributes at {self.attr_dict_path} due to {e}.")
attr_dict = {}
for k, v in zip(attr_keys, attr_values):
attr_dict[k] = v
try:
write_obj2pkl(self.attr_dict_path, attr_dict)
except IOError as e:
if not "[Errno 13] Permission denied" in str(e):
raise (IOError, e)
else:
log_reps.warn("Could not save SSO attributes to %s due to missing permissions." % self.attr_dict_path,
RuntimeWarning)
def attr_exists(self, attr_key: str) -> bool:
"""
Checks if an attribute exists for this SSV object.
Args:
attr_key: Attribute key.
Returns:
True if the key exists in :py:attr:`~attr_dict`.
"""
return attr_key in self.attr_dict
def lookup_in_attribute_dict(self, attr_key: str) -> Optional[Any]:
"""
Returns the value to `attr_key` stored in :py:attr:`~attr_dict` or None if the key is not existent.
Args:
attr_key: Attribute key.
Returns:
Value to the key ``attr_key``.
"""
if attr_key in self.attr_dict:
return self.attr_dict[attr_key]
# TODO: this is somehow arbitrary
elif len(self.attr_dict) <= 4:
if self.load_attr_dict() == -1:
return None
if attr_key in self.attr_dict:
return self.attr_dict[attr_key]
else:
return None
def load_so_attributes(self, obj_type: str, attr_keys: List[str]):
"""
Collect attributes from :class:`~syconn.reps.segmentation.SegmentationObject`
of type `obj_type`.
The attribute value ordering for each key is the same as :py:attr:`~svs`.
Args:
obj_type: Type of :class:`~syconn.reps.segmentation.SegmentationObject`.
attr_keys: Keys of desired properties. Must exist for the requested
`obj_type`.
Returns:
Attribute values for each key in `attr_keys`.
"""
attr_values = [[] for _ in range(len(attr_keys))]
for obj in self.get_seg_objects(obj_type):
for ii, attr_key in enumerate(attr_keys):
# lookup_in_attribute_dict uses attribute caching of the obj itself or, if enabled,
# the SegmentationDataset cache in the SSD of this SSO.
attr = obj.lookup_in_attribute_dict(attr_key)
attr_values[ii].append(attr)
return attr_values
def calculate_size(self):
"""
Calculates :py:attr:`size`.
"""
self._size = np.sum(self.load_so_attributes('sv', ['size']))
def calculate_bounding_box(self):
"""
Calculates :py:attr:`~bounding_box` (and :py:attr:`size`).
"""
if len(self.sv_ids) == 0:
self._bounding_box = np.zeros((2, 3), dtype=np.int32)
self._size = 0
return
self._bounding_box = np.ones((2, 3), dtype=np.int32) * np.inf
self._size = np.inf
bounding_boxes, sizes = self.load_so_attributes('sv', ['bounding_box', 'size'])
self._size = np.sum(sizes)
self._bounding_box[0] = np.min(bounding_boxes, axis=0)[0]
self._bounding_box[1] = np.max(bounding_boxes, axis=0)[1]
self._bounding_box = self._bounding_box.astype(np.int32)
def calculate_skeleton(self, force: bool = False, **kwargs):
"""
Merges existing supervoxel skeletons (``allow_ssv_skel_gen=False``) or calculates them
from scratch using :func:`~syconn.reps.super_segmentation_helper
.create_sso_skeletons_wrapper` otherwise (requires ``allow_ssv_skel_gen=True``).
Skeleton will be saved at :py:attr:`~skeleton_path`.
Args:
force: Skips :func:`~load_skeleton` if ``force=True``.
"""
if force or self._allow_skeleton_calc:
return ssh.create_sso_skeletons_wrapper([self], **kwargs)
if self.skeleton is not None and len(self.skeleton["nodes"]) != 0 \
and not force:
return
ssh.create_sso_skeletons_wrapper([self], **kwargs)
def save_skeleton_to_kzip(self, dest_path: Optional[str] = None, name: str = 'skeleton',
additional_keys: Optional[List[str]] = None,
comments: Optional[Union[np.ndarray, List[str]]] = None):
"""
Args:
dest_path: Destination path for k.zip file.
name: identifier / name of saved skeleton which appears in KNOSSOS
additional_keys: Additional skeleton keys which are converted into
KNOSSOS skeleton node properties. Will always attempt to write out the
keys 'axoness', 'cell_type' and 'meta'.
comments: np.ndarray of strings or list of strings of length N where N
equals the number of skeleton nodes. Comments will be converted into
KNOSSOS skeleton node comments.
Returns:
Saves KNOSSOS compatible k.zip file containing the SSV skeleton and
its node properties.
"""
if type(additional_keys) == str:
additional_keys = [additional_keys]
try:
if self.skeleton is None:
self.load_skeleton()
if additional_keys is not None:
for k in additional_keys:
assert k in self.skeleton, "Additional key %s is not " \
"part of SSV %d self.skeleton.\nAvailable keys: %s" % \
(k, self.id, repr(self.skeleton.keys()))
a = skeleton.SkeletonAnnotation()
a.scaling = self.scaling
a.comment = name
skel_nodes = []
for i_node in range(len(self.skeleton["nodes"])):
c = self.skeleton["nodes"][i_node]
r = self.skeleton["diameters"][i_node] / 2
skel_nodes.append(skeleton.SkeletonNode().
from_scratch(a, c[0], c[1], c[2], radius=r))
pred_key_ax = "{}_avg{}".format(self.config['compartments'][
'view_properties_semsegax']['semseg_key'],
self.config['compartments'][
'dist_axoness_averaging'])
if pred_key_ax in self.skeleton:
skel_nodes[-1].data[pred_key_ax] = self.skeleton[pred_key_ax][
i_node]
if "meta" in self.skeleton:
skel_nodes[-1].data["meta"] = self.skeleton["meta"][i_node]
if additional_keys is not None:
for k in additional_keys:
skel_nodes[-1].data[k] = self.skeleton[k][i_node]
if comments is not None:
skel_nodes[-1].setComment(str(comments[i_node]))
a.addNode(skel_nodes[-1])
for edge in self.skeleton["edges"]:
a.addEdge(skel_nodes[edge[0]], skel_nodes[edge[1]])
if dest_path is None:
dest_path = self.skeleton_kzip_path
elif not dest_path.endswith('.k.zip'):
dest_path += '.k.zip'
write_skeleton_kzip(dest_path, [a])
except Exception as e:
log_reps.warning("[SSO: %d] Could not load/save skeleton:\n%s" % (self.id, repr(e)))
def save_objects_to_kzip_sparse(self, obj_types: Optional[Iterable[str]] = None,
dest_path: Optional[str] = None):
"""
Export cellular organelles as coordinates with size, shape and overlap
properties in a KNOSSOS compatible format.
Args:
obj_types: Type identifiers of the supervoxel objects which are exported.
dest_path: Path to the destination file. If None, results will be
stored at :py:attr:`~skeleton_kzip_path`:
"""
if obj_types is None:
obj_types = self.config['process_cell_organelles']
annotations = []
for obj_type in obj_types:
assert obj_type in self.attr_dict
map_ratio_key = "mapping_%s_ratios" % obj_type
if not map_ratio_key in self.attr_dict.keys():
log_reps.warning("%s not yet mapped. Object nodes are not "
"written to k.zip." % obj_type)
continue
overlap_ratios = np.array(self.attr_dict[map_ratio_key])
overlap_ids = np.array(self.attr_dict["mapping_%s_ids" % obj_type])
a = skeleton.SkeletonAnnotation()
a.scaling = self.scaling
a.comment = obj_type
so_objs = self.get_seg_objects(obj_type)
for so_obj in so_objs:
c = so_obj.rep_coord
# somewhat approximated from sphere volume:
r = np.power(so_obj.size / 3., 1 / 3.)
skel_node = skeleton.SkeletonNode(). \
from_scratch(a, c[0], c[1], c[2], radius=r)
skel_node.data["overlap"] = \
overlap_ratios[overlap_ids == so_obj.id][0]
skel_node.data["size"] = so_obj.size
skel_node.data["shape"] = so_obj.shape
a.addNode(skel_node)
annotations.append(a)
if dest_path is None:
dest_path = self.skeleton_kzip_path
elif not dest_path.endswith('.k.zip'):
dest_path += '.k.zip'
write_skeleton_kzip(dest_path, annotations)
def save_objects_to_kzip_dense(self, obj_types: List[str],
dest_path: Optional[str] = None):
"""
Export cellular organelles as coordinates with size, shape and overlap
properties in a KNOSSOS compatible format.
Args:
obj_types: Type identifiers of the supervoxel objects which are
exported.
dest_path: Path to the destination file. If None, result will be
stored at :py:attr:`~objects_dense_kzip_path`:
"""
if dest_path is None:
dest_path = self.objects_dense_kzip_path
if os.path.exists(self.objects_dense_kzip_path[:-6]):
shutil.rmtree(self.objects_dense_kzip_path[:-6])
if os.path.exists(self.objects_dense_kzip_path):
os.remove(self.objects_dense_kzip_path)
for obj_type in obj_types:
so_objs = self.get_seg_objects(obj_type)
for so_obj in so_objs:
so_obj.save_kzip(path=dest_path,
write_id=self.dense_kzip_ids[obj_type])
def total_edge_length(self, compartments_of_interest: Optional[List[int]] = None,
ax_pred_key: str = 'axoness_avg10000') -> Union[np.ndarray, float]:
"""
Total edge length of the super-supervoxel :py:attr:`~skeleton` in nanometers.
Args:
compartments_of_interest: Which compartments to take into account for calculation.
axon: 1, dendrite: 0, soma: 2.
ax_pred_key: Key of compartment prediction stored in :attr:`~skeleton`, only used if
`compartments_of_interest` was set.
Returns:
Sum of all edge lengths (L2 norm) in :py:attr:`~skeleton`.
"""
if self.skeleton is None:
self.load_skeleton()
nodes = self.skeleton["nodes"]
edges = self.skeleton["edges"]
if compartments_of_interest is None:
return np.sum([np.linalg.norm(
self.scaling * (nodes[e[0]] - nodes[e[1]])) for e in edges])
else:
node_labels = self.skeleton[ax_pred_key]
edge_length = 0
for e in edges:
if (node_labels[e[0]] in compartments_of_interest) and (node_labels[e[1]] in compartments_of_interest):
edge_length += np.linalg.norm(self.scaling * (nodes[e[0]] - nodes[e[1]]))
return edge_length
def save_skeleton(self, to_kzip=False, to_object=True):
"""
Saves skeleton to default locations as `.pkl` and optionally as `.k.zip`.
Args:
to_kzip: Stores skeleton as a KNOSSOS compatible xml inside a k.zip file.
to_object: Stores skeleton as a dictionary in a pickle file.
"""
if self.version == 'tmp':
log_reps.debug('"save_skeleton" called but this SSV '
'has version "tmp", skeleton will'
' not be saved to disk.')
return
if to_object:
write_obj2pkl(self.skeleton_path, self.skeleton)
if to_kzip:
self.save_skeleton_to_kzip()
def load_skeleton(self) -> bool:
"""
Loads skeleton and will compute it if it does not exist yet (requires
``allow_ssv_skel_gen=True``).
Returns:
True if successfully loaded/generated skeleton, else False.
"""
if self.skeleton is not None:
return True
try:
self.skeleton = load_pkl2obj(self.skeleton_path)
self.skeleton["nodes"] = self.skeleton["nodes"].astype(np.float32)
return True
except:
if global_params.config.allow_ssv_skel_gen:
if global_params.config.use_kimimaro:
# add per ssv skeleton generation for kimimaro
raise NotImplementedError('Individual cells cannot be processed with kimimaro.')
else:
self.calculate_skeleton()
return True
return False
def celltype(self, key: Optional[str] = None) -> int:
"""
Returns the cell type classification result. Default: CMN model, if
`key` is specified returns the corresponding value loaded
by :func:`~lookup_in_attribute_dict`.
Args:
key: Key where classification result is stored.
Returns:
Cell type classification.
"""
if key is None:
key = 'celltype_cnn_e3'
return self.lookup_in_attribute_dict(key)
def weighted_graph(self, add_node_attr: Iterable[str] = ()) -> nx.Graph:
"""
Creates a Euclidean distance (in nanometers) weighted graph representation of the
skeleton of this SSV object. The node IDs represent the index in
the ``'node'`` array part of :py:attr:`~skeleton`. Weights are stored
as 'weight' in the graph, this allows to use e.g.
``nx.single_source_dijkstra_path(..)``.
Args:
add_node_attr: To-be-added node attributes. Must exist in
:py:attr`~skeleton`.
Returns:
The skeleton of this SSV object as a networkx graph.
"""
if self._weighted_graph is None or np.any([len(nx.get_node_attributes(
self._weighted_graph, k)) == 0 for k in add_node_attr]):
if self.skeleton is None:
self.load_skeleton()
node_scaled = self.skeleton["nodes"] * self.scaling
edges = np.array(self.skeleton["edges"], dtype=np.int64)
edge_coords = node_scaled[edges]
weights = np.linalg.norm(edge_coords[:, 0] - edge_coords[:, 1], axis=1)
self._weighted_graph = nx.Graph()
self._weighted_graph.add_nodes_from(
[(ix, dict(position=coord)) for ix, coord in
enumerate(self.skeleton['nodes'])])
self._weighted_graph.add_weighted_edges_from(
[(edges[ii][0], edges[ii][1], weights[ii]) for
ii in range(len(weights))])
for k in add_node_attr:
dc = {}
for n in self._weighted_graph.nodes():
dc[n] = self.skeleton[k][n]
nx.set_node_attributes(self._weighted_graph, dc, k)
return self._weighted_graph
def syn_sign_ratio(self, weighted: bool = True,
recompute: bool = True,
comp_types: Optional[List[int]] = None,
comp_types_partner: Optional[List[int]] = None) -> float:
"""
Ratio of symmetric synapses (between 0 and 1; -1 if no synapse objects)
between functional compartments specified via `comp_types` and
`comp_types_partner`.
Todo:
* Check default of synapse type if synapse type predictions are not
available -> propagate to this method and return -1.
Notes:
Bouton predictions are converted into axon label,
i.e. 3 (en-passant) -> 1 and 4 (terminal) -> 1.
Args:
weighted: Compute synapse-area weighted ratio.
recompute: Ignore existing value.
comp_types: All synapses that are formed on any of the
functional compartment types given in `comp_types` are used
for computing the ratio (0: dendrite,
1: axon, 2: soma). Default: [1, ].
comp_types_partner: Compartment type of the partner cell. Default:
[0, ].
Returns:
(Area-weighted) ratio of symmetric synapses or -1 if no synapses.
"""
if comp_types is None:
comp_types = [1, ]
if comp_types_partner is None:
comp_types_partner = [0, ]
ratio = self.lookup_in_attribute_dict("syn_sign_ratio")
if not recompute and ratio is not None:
return ratio
syn_signs = []
syn_sizes = []
props = load_so_attr_bulk(self.syn_ssv, ('partner_axoness', 'syn_sign', 'mesh_area', 'neuron_partners'),
use_new_subfold=self.config.use_new_subfold)
for syn in self.syn_ssv:
ax = np.array(props['partner_axoness'][syn.id])
# convert boutons to axon class
ax[ax == 3] = 1
ax[ax == 4] = 1
partners = props['neuron_partners'][syn.id]
this_cell_ix = list(partners).index(self.id)
other_cell_ix = 1 - this_cell_ix
if ax[this_cell_ix] not in comp_types:
continue
if ax[other_cell_ix] not in comp_types_partner:
continue
syn_signs.append(props['syn_sign'][syn.id])
syn_sizes.append(props['mesh_area'][syn.id] / 2)
log_reps.debug(f'Used {len(syn_signs)} synapses with a total size of {np.sum(syn_sizes)} um^2 between {comp_types} '
f'(this cell) and {comp_types_partner} (other cells).')
if len(syn_signs) == 0 or np.sum(syn_sizes) == 0:
return -1
syn_signs = np.array(syn_signs)
syn_sizes = np.array(syn_sizes)
if weighted:
ratio = np.sum(syn_sizes[syn_signs == -1]) / float(np.sum(syn_sizes))
else:
ratio = np.sum(syn_signs == -1) / float(len(syn_signs))
return ratio
def aggregate_segmentation_object_mappings(self, obj_types: List[str],
save: bool = False):
"""
Aggregates mapping information of cellular organelles from the SSV's
supervoxels. After this step, :func:`~apply_mapping_decision` can be
called to apply final assignments.
Examples:
A mitochondrion can extend over multiple supervoxels, so it will
overlap with all of them partially. Here, the overlap information
of all supervoxels assigned to this SSV will be aggregated.
Args:
obj_types: Cell organelles types to process.
save: Save :yp:attr:`~attribute_dict` at the end.
"""
assert isinstance(obj_types, list)
mappings = dict((obj_type, Counter()) for obj_type in obj_types)
for sv in self.svs:
sv.load_attr_dict()
for obj_type in obj_types:
if "mapping_%s_ids" % obj_type in sv.attr_dict:
keys = sv.attr_dict["mapping_%s_ids" % obj_type]
values = sv.attr_dict["mapping_%s_ratios" % obj_type]
mappings[obj_type] += Counter(dict(zip(keys, values)))
for obj_type in obj_types:
if obj_type in mappings:
self.attr_dict["mapping_%s_ids" % obj_type] = list(mappings[obj_type].keys())
self.attr_dict["mapping_%s_ratios" % obj_type] = list(mappings[obj_type].values())
if save:
self.save_attr_dict()
def apply_mapping_decision(self, obj_type: str,
correct_for_background: bool = True,
lower_ratio: Optional[float] = None,
upper_ratio: Optional[float] = None,
sizethreshold: Optional[float] = None,
save: bool = True):
"""
Applies mapping decision of cellular organelles to this SSV object. A
:class:`~syconn.reps.segmentation.SegmentationObject` in question is
assigned to this :class:`~syconn.reps.super_segmentation_object.SuperSegmentationObject`
if they share the highest overlap. For more details see ``SyConn/docs/object_mapping.md``.
Default parameters for the mapping will be taken from the `config.yml` file.
Args:
obj_type: Type of :class:`~syconn.reps.segmentation.SegmentationObject`
which are to be mapped.
correct_for_background: Ignore background ID during mapping
lower_ratio: Minimum overlap s.t. objects are mapped.
upper_ratio: Maximum ratio s.t. objects are mapped.
sizethreshold: Minimum voxel size of an object, objects below will be
ignored.
save: If True, :py:attr:`~attr_dict` will be saved.
Todo:
* check what ``correct_for_background`` was for. Any usecase for
``correct_for_background=False``?
* duplicate of ssd_proc._apply_mapping_decisions_thread, implement common-use method
Returns:
"""
assert obj_type in self.version_dict
self.load_attr_dict()
if not "mapping_%s_ratios" % obj_type in self.attr_dict:
log_reps.error("No mapping ratios found")
return
if not "mapping_%s_ids" % obj_type in self.attr_dict:
log_reps.error("no mapping ids found")
return
if lower_ratio is None:
try:
lower_ratio = self.config['cell_objects']["lower_mapping_ratios"][
obj_type]
except KeyError:
msg = "Lower ratio undefined"
log_reps.error(msg)
raise ValueError(msg)
if upper_ratio is None:
try:
upper_ratio = self.config['cell_objects']["upper_mapping_ratios"][
obj_type]
except:
log_reps.critical("Upper ratio undefined - 1. assumed")
upper_ratio = 1.
if sizethreshold is None:
try:
sizethreshold = self.config['cell_objects']["sizethresholds"][obj_type]
except KeyError:
msg = "Size threshold undefined"
log_reps.error(msg)
raise ValueError(msg)
obj_ratios = np.array(self.attr_dict["mapping_%s_ratios" % obj_type])
if correct_for_background:
for i_so_id in range(
len(self.attr_dict["mapping_%s_ids" % obj_type])):
so_id = self.attr_dict["mapping_%s_ids" % obj_type][i_so_id]
obj_version = self.config["versions"][obj_type]
this_so = SegmentationObject(so_id, obj_type,
version=obj_version,
scaling=self.scaling,
working_dir=self.working_dir)
this_so.load_attr_dict()
if 0 in this_so.attr_dict["mapping_ids"]:
ratio_0 = this_so.attr_dict["mapping_ratios"][
this_so.attr_dict["mapping_ids"] == 0][0]
obj_ratios[i_so_id] /= (1 - ratio_0)
id_mask = obj_ratios > lower_ratio
if upper_ratio < 1.:
id_mask[obj_ratios > upper_ratio] = False
candidate_ids = \
np.array(self.attr_dict["mapping_%s_ids" % obj_type])[id_mask]
self.attr_dict[obj_type] = []
for candidate_id in candidate_ids:
obj = SegmentationObject(candidate_id, obj_type=obj_type,
version=self.version_dict[obj_type],
working_dir=self.working_dir, config=self.config)
if obj.size > sizethreshold:
self.attr_dict[obj_type].append(candidate_id)
if save:
self.save_attr_dict()
def _map_cellobjects(self, obj_types: Optional[List[str]] = None,
save: bool = True):
"""
Wrapper function for mapping all existing cell organelles (as defined in
:py:attr:`~config['process_cell_organelles']`).
Args:
obj_types: Type of :class:`~syconn.reps.super_segmentation_object
.SuperSegmentationObject` which should be mapped.
save: Saves the attribute dict of this SSV object afterwards.
"""
if obj_types is None:
obj_types = self.config['process_cell_organelles']
self.aggregate_segmentation_object_mappings(obj_types, save=save)
for obj_type in obj_types:
# TODO: remove handling of sj?
self.apply_mapping_decision(obj_type, save=save,
correct_for_background=obj_type == "sj")
def clear_cache(self):
"""
Clears the following, cached data:
* :py:attr:`~voxels`
* :py:attr:`~voxels_xy_downsampled`
* :py:attr:`~sample_locations`
* :py:attr:`~_objects`
* :py:attr:`~_views`
* :py:attr:`~skeleton`
* :py:attr:`~_meshes`
"""
self._objects = {}
self._voxels = None
self._voxels_xy_downsampled = None
self._views = None
self._sample_locations = None
self._meshes = {"sv": None, "sj": None, "syn_ssv": None,
"vc": None, "mi": None, "conn": None,
"syn_ssv_sym": None, "syn_ssv_asym": None}
self.skeleton = None
def preprocess(self):
"""
Process object mapping (requires the prior assignment of object
candidates), cache object meshes and calculate the SSV skeleton.
"""
self.load_attr_dict()
self._map_cellobjects()
for sv_type in self.config['process_cell_organelles'] + ["sv", "syn_ssv"]:
_ = self._load_obj_mesh(obj_type=sv_type, rewrite=False)
self.calculate_skeleton()
def copy2dir(self, dest_dir: str, safe: bool = True):
"""
Copies the content at :py:attr:`~ssv_dir` to another directory.
Examples:
To copy the data of this SSV object (``ssv_orig``) to another yet not
existing SSV (``ssv_target``). call ``ssv_orig.copy2dir(ssv_target.ssv_dir)``.
All files contained in the directory py:attr:`~ssv_dir` of ``ssv_orig``
will be copied to ``ssv_target.ssv_dir``.
Args:
dest_dir: Destination directory where all files contained in
py:attr:`~ssv_dir` will be copied to.
safe: If True, will not overwrite existing data.
"""
# get all files in home directory
fps = get_filepaths_from_dir(self.ssv_dir, ending=["pkl", "k.zip"])
fnames = [os.path.split(fname)[1] for fname in fps]
# Open the file and raise an exception if it exists
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
for i in range(len(fps)):
src_filename = fps[i]
dest_filename = dest_dir + "/" + fnames[i]
try:
safe_copy(src_filename, dest_filename, safe=safe)
log_reps.debug("Copied %s to %s." % (src_filename, dest_filename))
except Exception as e:
log_reps.error("Skipped '{}', due to the following error: '{}'"
"".format(fnames[i], str(e)))
pass
self.load_attr_dict()
if os.path.isfile(dest_dir + "/attr_dict.pkl"):
dest_attr_dc = load_pkl2obj(dest_dir + "/attr_dict.pkl")
else:
dest_attr_dc = {}
dest_attr_dc.update(self.attr_dict)
write_obj2pkl(dest_dir + "/attr_dict.pkl", dest_attr_dc)
def partition_cc(self, max_nb_sv: Optional[int] = None,
lo_first_n: Optional[int] = None) -> List[List[Any]]:
"""
Splits the supervoxel graph of this SSV into subgraphs. Default values
are generated from :py:attr:`~.config`.
Args:
max_nb_sv: Number of supervoxels per sub-graph. This defines the sub-graph context.
lo_first_n: Do not use first n traversed nodes for new bfs traversals.
This allows to partition the original supervoxel graph of size `N`
into ``N//lo_first_n`` sub-graphs.
Returns:
"""
if lo_first_n is None:
lo_first_n = self.config['glia']['subcc_chunk_size_big_ssv']
if max_nb_sv is None:
max_nb_sv = self.config['glia']['subcc_size_big_ssv'] + 2 * (lo_first_n - 1)
init_g = self.rag
partitions = split_subcc_join(init_g, max_nb_sv, lo_first_n=lo_first_n)
return partitions
# -------------------------------------------------------------------- VIEWS
def save_views(self, views: np.ndarray, view_key: str = "views"):
"""
This will only save views on SSV level and not for each individual SV!
Args:
views: The view array.
view_key: The key used for the look-up.
Returns:
"""
if self.version == 'tmp':
log_reps.warning('"save_views" called but this SSV '
'has version "tmp", views will'
' not be saved to disk.')
return
view_dc = CompressedStorage(self.view_path, read_only=False,
disable_locking=not self.enable_locking)
view_dc[view_key] = views
view_dc.push()
def load_views(self, view_key: Optional[str] = None, woglia: bool = True,
raw_only: bool = False, force_reload: bool = False,
nb_cpus: Optional[int] = None, ignore_missing: bool = False,
index_views: bool = False) -> np.ndarray:
"""
Load views which are stored in :py:attr:`~view_dict` or if not present attempts
to retrieve data from :py:attr:`view_path` given the key `view_key`,
i.e. this operates on SSV level. If the given key does not exist on
:class:`~SuperSegmentationObject` level or is None, attempts to load the views from
the underlying :class:`~syconn.reps.segmentation.SegmentationObject`s.
Args:
view_key: The key used for the look-up.
woglia: If True, will load the views render from the glia-free agglomeration.
raw_only: If True, will only return the cell shape channel in the views.
force_reload: If True will force reloading the SV views.
nb_cpus: Number of CPUs.
ignore_missing: If True, it will not raise KeyError if SV does not exist.
index_views: Views which contain the indices of the vertices at the respective pixels.
Used as look-up to map the predicted semantic labels onto the mesh vertices.
Returns:
Concatenated views for each SV in self.svs with shape [N_LOCS, N_CH, N_VIEWS, X, Y].
"""
if self.view_caching and view_key in self.view_dict:
# self.view_dict stores list of views with length of sample_locations
return self.view_dict[view_key]
view_dc = CompressedStorage(self.view_path, read_only=True,
disable_locking=not self.enable_locking)
if view_key in view_dc and not force_reload:
if self.view_caching:
self.view_dict[view_key] = view_dc[view_key]
return self.view_dict[view_key]
return view_dc[view_key]
del view_dc # delete previous initialized view dictionary
params = [[sv, {'woglia': woglia, 'raw_only': raw_only, 'index_views':
index_views, 'ignore_missing': ignore_missing,
'view_key': view_key}] for sv in self.svs]
# load views from underlying SVs
views = sm.start_multiprocess_obj("load_views", params,
nb_cpus=self.nb_cpus
if nb_cpus is None else nb_cpus)
views = np.concatenate(views)
# stores list of views with length of sample_locations
if self.view_caching and view_key is not None:
self.view_dict[view_key] = views
return views
def view_existence(self, woglia: bool = True, index_views: bool = False,
view_key: Optional[str] = None) -> List[bool]:
"""
Checks whether a specific set of views exists for this object.
Args:
woglia: If True, will load the views render from the glia-free agglomeration.
index_views: Views which contain the indices of the vertices at the respective pixels.
Used as look-up to map the predicted semantic labels onto the mesh vertices.
view_key: The key used for the look-up.
Returns:
True if the specified views exist.
"""
view_paths = set([sv.view_path(woglia=woglia, index_views=index_views,
view_key=view_key) for sv in self.svs])
cached_ids = []
for vp in view_paths:
cached_ids += list(CompressedStorage(vp, disable_locking=True).keys())
cached_ids = set(cached_ids).intersection(self.sv_ids)
so_views_exist = [svid in cached_ids for svid in self.sv_ids]
return so_views_exist
def render_views(self, add_cellobjects: bool = False, verbose: bool = False,
overwrite: bool = True, cellobjects_only: bool = False,
woglia: bool = True, skip_indexviews: bool = False):
"""
Renders views for each SV based on SSV context and stores them
on SV level. Usually only used once: for initial glia or axoness
prediction.
The results will be saved distributed at each
class:`~syconn.reps.segmentation.SegmentationObject` of this object.
It is not cached in :py:attr:`view_dict` nor :py:attr:`view_path`.
Used during initial glia, compartment and cell type predictions.
See :func:`~_render_rawviews` for how to store views in the SSV storage, which
is e.g. used during GT generation.
Args:
add_cellobjects: Add cellular organelle channels in the 2D projection views.
verbose: Log additional information.
overwrite: Re-render at all rendering locations.
cellobjects_only: Render only cellular organelle channels. Currently not in use.
woglia: If True, will load the views render from the glia-free agglomeration.
skip_indexviews: Index views will not be generated, used for initial SSV
glia-removal rendering.
"""
# TODO: partial rendering currently does not support index view generation (-> vertex
# indices will be different for each partial mesh)
if len(self.sv_ids) > self.config['glia']['rendering_max_nb_sv'] and not woglia:
if not skip_indexviews:
raise ValueError('Index view rendering is currently not supported with partial '
'cell rendering.')
part = self.partition_cc()
log_reps.info('Partitioned huge SSV into {} subgraphs with each {}'
' SVs.'.format(len(part), len(part[0])))
log_reps.info("Rendering SSO. {} SVs left to process"
".".format(len(self.sv_ids)))
params = [[so.id for so in el] for el in part]
params = chunkify(params, self.config.ngpu_total * 2)
so_kwargs = {'version': self.svs[0].version,
'working_dir': self.working_dir,
'obj_type': self.svs[0].type}
render_kwargs = {"overwrite": overwrite, 'woglia': woglia,
"render_first_only": self.config['glia']['subcc_chunk_size_big_ssv'],
'add_cellobjects': add_cellobjects,
"cellobjects_only": cellobjects_only,
'skip_indexviews': skip_indexviews}
params = [[par, so_kwargs, render_kwargs] for par in params]
qu.batchjob_script(
params, "render_views_partial", suffix="_SSV{}".format(self.id),
n_cores=self.config['ncores_per_node'] // self.config['ngpus_per_node'],
remove_jobfolder=True, additional_flags="--gres=gpu:1")
else:
# render raw data
rot_mat = render_sampled_sso(
self, add_cellobjects=add_cellobjects, verbose=verbose, overwrite=overwrite,
return_rot_mat=True, cellobjects_only=cellobjects_only, woglia=woglia)
if skip_indexviews:
return
# render index views
render_sampled_sso(self, verbose=verbose, overwrite=overwrite,
index_views=True, rot_mat=rot_mat)
def render_indexviews(self, nb_views=2, save=True, force_recompute=False,
verbose=False, view_key=None, ws=None, comp_window=None):
"""
Render SSV raw views in case non-default number of views is required.
Will be stored in SSV view dict. Default raw/index/prediction views are
stored decentralized in corresponding SVs.
Args:
nb_views: int
save: bool
force_recompute: bool
verbose: bool
view_key: Optional[str]
key used for storing view array. Default: 'index{}'.format(nb_views)
ws: Tuple[int]
Window size in pixels [y, x]
comp_window: float
Physical extent in nm of the view-window along y (see `ws` to infer pixel size)
Returns: np.array
"""
if view_key is None:
view_key = 'index{}'.format(nb_views)
if not force_recompute:
try:
views = self.load_views(view_key)
if not save:
return views
else:
return
except KeyError:
pass
locs = np.concatenate(self.sample_locations(cache=False))
if self._rot_mat is None:
index_views, rot_mat = render_sso_coords_index_views(
self, locs, nb_views=nb_views, verbose=verbose,
return_rot_matrices=True, ws=ws, comp_window=comp_window)
self._rot_mat = rot_mat
else:
index_views = render_sso_coords_index_views(self, locs, nb_views=nb_views,
verbose=verbose,
rot_mat=self._rot_mat, ws=ws,
comp_window=comp_window)
if self.view_caching:
self.view_dict[view_key] = index_views
if not save:
return index_views
self.save_views(index_views, view_key)
def _render_rawviews(self, nb_views=2, save=True, force_recompute=False,
add_cellobjects=True, verbose=False, view_key=None,
ws=None, comp_window=None):
"""
Render SSV raw views in case non-default number of views is required.
Will be stored in SSV view dict. Default raw/index/prediction views are
stored decentralized in corresponding SVs.
Args:
nb_views: int
save: bool
force_recompute: bool
add_cellobjects: bool
verbose: bool
view_key: Optional[str]
key used for storing view array. Default: 'raw{}'.format(nb_views)
ws: Tuple[int]
Window size in pixels [y, x]
comp_window: float
Physical extent in nm of the view-window along y (see `ws` to infer pixel size)
Returns: np.array
"""
if view_key is None:
view_key = 'raw{}'.format(nb_views)
if not force_recompute:
try:
views = self.load_views(view_key)
if not save:
return views
return
except KeyError:
pass
locs = np.concatenate(self.sample_locations(cache=False))
if self._rot_mat is None:
views, rot_mat = render_sso_coords(self, locs, verbose=verbose, ws=ws,
add_cellobjects=add_cellobjects, comp_window=comp_window,
nb_views=nb_views, return_rot_mat=True)
self._rot_mat = rot_mat
else:
views = render_sso_coords(self, locs, verbose=verbose, ws=ws,
add_cellobjects=add_cellobjects, comp_window=comp_window,
nb_views=nb_views, rot_mat=self._rot_mat)
if self.view_caching:
self.view_dict[view_key] = views
if save:
self.save_views(views, view_key)
else:
return views
def predict_semseg(self, m, semseg_key, nb_views=None, verbose=False,
raw_view_key=None, save=False, ws=None, comp_window=None,
add_cellobjects: Union[bool, Iterable] = True, bs: int = 10):
"""
Generates label views based on input model and stores it under the key
'semseg_key', either within the SSV's SVs or in an extra view-storage
according to input parameters:
Default situation (nb_views and raw_view_key is None):
semseg_key = 'spiness', nb_views=None
This will load the raw views stored at the SSV's SVs.
Non-default (nb_views or raw_view_key is not None):
semseg_key = 'spiness4', nb_views=4
This requires to run 'self._render_rawviews(nb_views=4)'
This method then has to be called like:
'self.predict_semseg(m, 'spiness4', nb_views=4)'
Parameters
----------
semseg_key : str
nb_views : Optional[int]
k : int
verbose : bool
raw_view_key : str
key used for storing view array within SSO directory. Default: 'raw{}'.format(nb_views)
If key does not exist, views will be re-rendered with properties defined
in :py:attr:`~config` or as given in the kwargs `ws`, `nb_views` and `comp_window`.
save : bool
If True, views will be saved.
ws : Tuple[int]
Window size in pixels [y, x]
comp_window : float
Physical extent in nm of the view-window along y (see `ws` to infer pixel size)
add_cellobjects: Add cell objects. Either bool or list of structures used to render. Only
used when `raw_view_key` or `nb_views` is None - then views are rendered on-the-fly.
bs: Batch size during inference.
"""
view_props_default = self.config['views']['view_properties']
if (nb_views is not None) or (raw_view_key is not None):
# treat as special view rendering
if nb_views is None:
nb_views = view_props_default['nb_views']
if raw_view_key is None:
raw_view_key = 'raw{}'.format(nb_views)
if raw_view_key in self.view_dict:
views = self.load_views(raw_view_key)
else:
self._render_rawviews(nb_views, ws=ws, comp_window=comp_window, save=save,
view_key=raw_view_key, verbose=verbose,
force_recompute=True, add_cellobjects=add_cellobjects)
views = self.load_views(raw_view_key)
if len(views) != len(np.concatenate(self.sample_locations(cache=False))):
raise ValueError("Unequal number of views and redering locations.")
labeled_views = ssh.predict_views_semseg(views, m, verbose=verbose, batch_size=bs)
assert labeled_views.shape[2] == nb_views, \
"Predictions have wrong shape."
if self.view_caching:
self.view_dict[semseg_key] = labeled_views
if save:
self.save_views(labeled_views, semseg_key)
else:
# treat as default view rendering
views = self.load_views()
locs = self.sample_locations(cache=False)
assert len(views) == len(np.concatenate(locs)), \
"Unequal number of views and rendering locations."
# re-order number of views according to SV rendering locations
# TODO: move view reordering to 'pred_svs_semseg', check other usages before!
reordered_views = []
cumsum = np.cumsum([0] + [len(el) for el in locs])
for ii in range(len(locs)):
sv_views = views[cumsum[ii]:cumsum[ii + 1]]
reordered_views.append(sv_views)
if self.version == 'tmp':
log_reps.warning('"predict_semseg" called but this SSV '
'has version "tmp", results will'
' not be saved to disk.')
ssh.pred_svs_semseg(m, reordered_views, semseg_key, self.svs,
nb_cpus=self.nb_cpus, verbose=verbose,
return_pred=self.version == 'tmp', bs=bs) # do not write to disk
def semseg2mesh(self, semseg_key: str, dest_path: Optional[str] = None,
nb_views: Optional[int] = None, k: int = 1,
force_recompute: bool = False,
index_view_key: Optional[str] = None):
"""
Generates vertex labels and stores it in the SSV's label storage under
the key `semseg_key`.
Examples:
Default situation:
``semseg_key = 'spiness'``, ``nb_views=None``
This will load the index and label views stored at the SSV's SVs.
Non-default:
``semseg_key = 'spiness4'``, ``nb_views=4``
This requires to run ``self._render_rawviews(nb_views=4)``,
``self.render_indexviews(nb_views=4)`` and ``predict_semseg(MODEL,
'spiness4', nb_views=4)``.
This method then has to be called like: ``self.semseg2mesh('spiness4', nb_views=4)``
Args:
semseg_key: Key used to retrieve the semantic segmentation results.
dest_path: Path where the mesh will be stored as .ply in a k.zip.
nb_views: Number of views used
k: Number of nearest vertices to average over. If k=0 unpredicted vertices
will be treated as 'unpredicted' class.
force_recompute: Force recompute.
index_view_key: Key usedc to retrieve the index views.
Returns:
"""
# colors are only needed if dest_path is given
# (last two colors correspond to background and undpredicted vertices (k=0))
cols = None
if dest_path is not None:
if 'spiness' in semseg_key or 'dnho' in semseg_key or 'do' in semseg_key:
cols = np.array([[0.6, 0.6, 0.6, 1], [0.9, 0.2, 0.2, 1],
[0.1, 0.1, 0.1, 1], [0.05, 0.6, 0.6, 1],
[0.9, 0.9, 0.9, 1], [0.1, 0.1, 0.9, 1]])
elif 'axon' in semseg_key:
# cols = np.array([[0.6, 0.6, 0.6, 1], [0.9, 0.2, 0.2, 1],
# [0.1, 0.1, 0.1, 1], [0.9, 0.9, 0.9, 1],
# [0.1, 0.1, 0.9, 1]])
# dendrite, axon, soma, bouton, terminal, background, unpredicted
cols = np.array([[0.6, 0.6, 0.6, 1], [0.9, 0.2, 0.2, 1],
[0.1, 0.1, 0.1, 1], [0.05, 0.6, 0.6, 1],
[0.8, 0.8, 0.1, 1], [0.9, 0.9, 0.9, 1],
[0.1, 0.1, 0.9, 1]])
elif 'ads' in semseg_key:
# dendrite, axon, soma, unpredicted
cols = np.array([[0.6, 0.6, 0.6, 1], [0.9, 0.2, 0.2, 1],
[0.1, 0.1, 0.1, 1], [0.1, 0.1, 0.9, 1]])
elif 'abt' in semseg_key:
# axon, bouton, terminal, unpredicted
cols = np.array([[0.9, 0.2, 0.2, 1], [0.05, 0.6, 0.6, 1],
[0.8, 0.8, 0.1, 1], [0.1, 0.1, 0.9, 1]])
elif 'dnh' in semseg_key:
# dendrite, neck, head, unpredicted
cols = np.array([[0.6, 0.6, 0.6, 1], [0.1, 0.1, 0.1, 1],
[0.9, 0.2, 0.2, 1], [0.1, 0.1, 0.9, 1]])
elif '3models' in semseg_key or 'dasbt' in semseg_key:
# dendrite, axon, soma, bouton, terminal, neck, head, unpredicted
cols = np.array([[0.6, 0.6, 0.6, 1], [0.6, 0.1, 0.1, 1],
[0.1, 0.1, 0.1, 1], [0.05, 0.6, 0.6, 1],
[0.4, 0.4, 0.8, 1], [0.8, 0.8, 0.1, 1],
[0.9, 0.4, 0.4, 1], [0.1, 0.1, 0.9, 1]])
else:
raise ValueError('Semantic segmentation of "{}" is not supported.'
''.format(semseg_key))
cols = (cols * 255).astype(np.uint8)
return ssh.semseg2mesh(self, semseg_key, nb_views, dest_path, k,
cols, force_recompute=force_recompute,
index_view_key=index_view_key)
def semseg_for_coords(self, coords: np.ndarray, semseg_key: str, k: int = 5,
ds_vertices: int = 20,
ignore_labels: Optional[Iterable[int]] = None):
"""
Get the semantic segmentation with key `semseg_key` from the `k` nearest
vertices at every coordinate in `coords`.
Args:
coords: np.array
Voxel coordinates, unscaled! [N, 3]
semseg_key: str
k: int
Number of nearest neighbors (NN) during k-NN classification
ds_vertices: int
striding factor for vertices, uses ``max(1, ds_vertices // 10)`` if
``len(vertices) < 5e6``.
ignore_labels: List[int]
Vertices with labels in `ignore_labels` will be ignored during
majority vote, e.g. used to exclude unpredicted vertices.
Returns: np.array
Same length as `coords`. For every coordinate in `coords` returns the
majority label based on its k-nearest neighbors.
"""
# TODO: Allow multiple keys as in self.attr_for_coords, e.g. to
# include semseg axoness in a single query
if ignore_labels is None:
ignore_labels = []
coords = np.array(coords) * self.scaling
vertices = self.mesh[1].reshape((-1, 3))
if len(vertices) == 0:
return np.zeros((0, ), dtype=np.int32)
if len(vertices) < 5e6:
ds_vertices = max(1, ds_vertices // 10)
vertex_labels = self.label_dict('vertex')[semseg_key][::ds_vertices]
if np.ndim(vertex_labels) == 2:
vertex_labels = vertex_labels.squeeze(1)
vertices = vertices[::ds_vertices]
for ign_l in ignore_labels:
vertices = vertices[vertex_labels != ign_l]
vertex_labels = vertex_labels[vertex_labels != ign_l]
if len(vertex_labels) != len(vertices):
raise ValueError('Size of vertices and their labels does not match!')
if len(vertices) < k:
log_reps.warning(f'Number of vertices ({len(vertices)}) is less than the given '
f'value of k ({k}). Setting k to lower value.')
k = len(vertices)
maj_vote = colorcode_vertices(coords, vertices, vertex_labels, k=k,
return_color=False, nb_cpus=self.nb_cpus)
return maj_vote
def get_spine_compartments(self, semseg_key: str = 'spiness', k: int = 1,
min_spine_cc_size: Optional[int] = None,
dest_folder: Optional[str] = None) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Retrieve connected components of vertex spine predictions.
Args:
semseg_key: Key of the used semantic segmentation.
k: Number of nearest neighbors for majority label vote (smoothing of
classification results).
min_spine_cc_size: Minimum number of vertices to consider a connected
component a valid object.
dest_folder: Default is None, else provide a path (str) to a folder.
The mean location and size of the head and neck connected
components will be stored as numpy array file (npy).
Returns:
Neck locations, neck sizes, head locations, head sizes. Location
and size arrays have the same ordering.
"""
if min_spine_cc_size is None:
min_spine_cc_size = self.config['spines']['min_spine_cc_size']
vertex_labels = self.label_dict('vertex')[semseg_key]
vertices = self.mesh[1].reshape((-1, 3))
max_dist = self.config['spines']['min_edge_dist_spine_graph']
g = create_graph_from_coords(vertices, force_single_cc=True,
max_dist=max_dist)
g_orig = g.copy()
for e in g_orig.edges():
l0 = vertex_labels[e[0]]
l1 = vertex_labels[e[1]]
if l0 != l1:
g.remove_edge(e[0], e[1])
log_reps.info("Starting connected components for SSV {}."
"".format(self.id))
all_ccs = list(sorted(nx.connected_components(g), key=len,
reverse=True))
log_reps.info("Finished connected components for SSV {}."
"".format(self.id))
sizes = np.array([len(c) for c in all_ccs])
thresh_ix = np.argmax(sizes < min_spine_cc_size)
all_ccs = all_ccs[:thresh_ix]
sizes = sizes[:thresh_ix]
cc_labels = []
cc_coords = []
for c in all_ccs:
curr_v_ixs = list(c)
curr_v_l = vertex_labels[curr_v_ixs]
curr_v_c = vertices[curr_v_ixs]
if len(np.unique(curr_v_l)) != 1:
msg = '"get_spine_compartments": Connected component ' \
'contains multiple labels.'
log_reps.error(msg)
raise ValueError(msg)
cc_labels.append(curr_v_l[0])
cc_coords.append(np.mean(curr_v_c, axis=0))
cc_labels = np.array(cc_labels)
cc_coords = np.array(cc_coords)
np.random.seed(0)
neck_c = (cc_coords[cc_labels == 0] / self.scaling).astype(np.uint64)
neck_s = sizes[cc_labels == 0]
head_c = (cc_coords[cc_labels == 1] / self.scaling).astype(np.uint64)
head_s = sizes[cc_labels == 1]
if dest_folder is not None:
np.save("{}/neck_coords_ssv{}_k{}_{}_ccsize{}.npy".format(
dest_folder, self.id, k, semseg_key, min_spine_cc_size), neck_c)
np.save("{}/head_coords_ssv{}_k{}_{}_ccsize{}.npy".format(
dest_folder, self.id, k, semseg_key, min_spine_cc_size), head_c)
return neck_c, neck_s, head_c, head_s
def sample_locations(self, force=False, cache=True, verbose=False,
ds_factor=None):
"""
Args:
force: bool
force resampling of locations
cache: bool
save sample location in SSO attribute dict
verbose: bool
ds_factor: float
Downscaling factor to generate locations
Returns: list of array
Sample coordinates for each SV in self.svs.
"""
if self.version == 'tmp' and cache:
cache = False
if not force and self._sample_locations is not None:
return self._sample_locations
if not force:
if self.attr_exists("sample_locations"):
return self.attr_dict["sample_locations"]
if verbose:
start = time.time()
params = [[sv, {"force": force, 'save': cache,
'ds_factor': ds_factor}] for sv in self.svs]
# list of arrays
# TODO: currently does not support multiprocessing
locs = sm.start_multiprocess_obj("sample_locations", params,
nb_cpus=1) # self.nb_cpus)
if cache:
self.save_attributes(["sample_locations"], [locs])
if verbose:
dur = time.time() - start
log_reps.debug("Sampling locations from {} SVs took {:.2f}s."
" {.4f}s/SV (incl. read/write)".format(
len(self.sv_ids), dur, dur / len(self.sv_ids)))
return locs
# ------------------------------------------------------------------ EXPORTS
def pklskel2kzip(self):
self.load_skeleton()
es = self.skeleton["edges"]
ns = self.skeleton["nodes"]
a = skeleton.SkeletonAnnotation()
a.scaling = self.scaling
a.comment = "skeleton"
for e in es:
n0 = skeleton.SkeletonNode().from_scratch(a, ns[e[0]][0],
ns[e[0]][1], ns[e[0]][2])
n1 = skeleton.SkeletonNode().from_scratch(a, ns[e[1]][0],
ns[e[1]][1], ns[e[1]][2])
a.addNode(n0)
a.addNode(n1)
a.addEdge(n0, n1)
write_skeleton_kzip(self.skeleton_kzip_path, a)
def write_locations2kzip(self, dest_path: Optional[str] = None):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
elif not dest_path.endswith('.k.zip'):
dest_path += '.k.zip'
loc = np.concatenate(self.sample_locations())
new_anno = coordpath2anno(loc, add_edges=False)
new_anno.setComment("sample_locations")
write_skeleton_kzip(dest_path, [new_anno])
def mergelist2kzip(self, dest_path: Optional[str] = None):
if len(self.attr_dict) == 0:
self.load_attr_dict()
kml = knossos_ml_from_sso(self)
if dest_path is None:
dest_path = self.skeleton_kzip_path
write_txt2kzip(dest_path, kml, "mergelist.txt")
def mesh2kzip(self, dest_path: Optional[str] = None, obj_type: str = "sv",
ext_color: Optional[np.ndarray] = None, **kwargs):
"""
Writes mesh of SSV to kzip as .ply file.
Args:
dest_path:
obj_type: str
'sv' for cell surface, 'mi': mitochondria, 'vc': vesicle clouds,
'sj': synaptic junctions
ext_color: np.array of scalar
If scalar, it has to be an integer between 0 and 255.
If array, it has to be of type uint/int and of shape (N, 4) while N
is the number of vertices of the SSV cell surface mesh:
N = len(self.mesh[1].reshape((-1, 3)))
Returns:
"""
color = None
if dest_path is None:
dest_path = self.skeleton_kzip_path
# TODO: revisit re-definition of `obj_type` to 'sj'.
if obj_type == "syn_ssv":
mesh = self.syn_ssv_mesh
# also store it as 'sj' s.t. `init_sso_from_kzip` can use it for rendering.
# TODO: add option to rendering code which enables rendering of arbitrary cell organelles
obj_type = 'sj'
else:
mesh = self.load_mesh(obj_type)
if ext_color is not None:
if type(ext_color) is list:
ext_color = np.array(ext_color)
if np.isscalar(ext_color) and ext_color == 0:
color = None
elif np.isscalar(ext_color):
color = ext_color
elif type(ext_color) is np.ndarray:
if ext_color.ndim != 2:
msg = "'ext_color' is numpy array of dimension {}." \
" Only 2D arrays are allowed.".format(ext_color.ndim)
log_reps.error(msg)
raise ValueError(msg)
if ext_color.shape[1] == 3:
# add alpha channel
alpha_sh = (len(ext_color), 1)
alpha_arr = (np.ones(alpha_sh) * 255).astype(ext_color.dtype)
ext_color = np.concatenate([ext_color, alpha_arr], axis=1)
color = ext_color.flatten()
write_mesh2kzip(dest_path, mesh[0], mesh[1], mesh[2], color,
ply_fname=obj_type + ".ply", **kwargs)
def meshes2kzip(self, dest_path: Optional[str] = None, sv_color: Optional[np.ndarray] = None,
synssv_instead_sj: bool = True, object_types: Optional[List[str]] = None, **kwargs):
"""
Writes SV, mito, vesicle cloud and synaptic junction meshes to k.zip.
Args:
dest_path: str
sv_color: np.array
array with RGBA values or None to use default values
(see :func:`~mesh2kzip`).
synssv_instead_sj: bool
object_types: List[str]
Objects to export.
Returns:
"""
if dest_path is None:
dest_path = self.skeleton_kzip_path
if object_types is None:
object_types = ["sj", "vc", "mi", "sv"]
for ot in object_types: # determines rendering order in KNOSSOS
if ot == "sj" and synssv_instead_sj:
ot = 'syn_ssv'
self.mesh2kzip(obj_type=ot, dest_path=dest_path,
ext_color=sv_color if ot == "sv" else None, **kwargs)
def mesh2file(self, dest_path=None, center=None, color=None, scale=None, obj_type='sv'):
"""
Writes mesh to file (e.g. .ply, .stl, .obj) via the 'openmesh' library.
If possible, writes it as binary.
Args:
dest_path: str
center: np.array
scaled center coordinates (in nm).
color: np.array
Either single color (1D; will be applied to all vertices) or
per-vertex color array (2D).
scale: float
Multiplies vertex locations after centering.
obj_type: str
Defines the object type which is used for loading the mesh
via :func:`~load_mesh`.
"""
mesh2obj_file(dest_path, self.load_mesh(obj_type), center=center, color=color,
scale=scale)
def export2kzip(self, dest_path: str, attr_keys: Iterable[str] = ('skeleton',),
rag: Optional[nx.Graph] = None,
sv_color: Optional[np.ndarray] = None, individual_sv_meshes: bool = True,
object_meshes: Optional[tuple] = None, synssv_instead_sj: bool = True):
"""
Writes the SSO to a KNOSSOS loadable kzip including the mergelist
(:func:`~mergelist2kzip`), its meshes (:func:`~meshes2kzip`), data set
specific information and additional data (`attr_keys`).
0 to 255. Saved SSO can also be re-loaded as an SSO instance via
:func:`~syconn.proc.ssd_assembly.init_sso_from_kzip`.
Todo:
* Switch to .json format for storing meta information.
Notes:
Will not invoke :func:`~load_attr_dict`.
Args:
dest_path: Path to destination kzip file.
attr_keys: Currently allowed: 'sample_locations', 'skeleton',
'attr_dict', 'rag'.
rag: SV graph of SSV with uint nodes.
sv_color: Cell supervoxel colors. Array with RGBA (0...255) values
or None to use default values (see :func:`~mesh2kzip`).
individual_sv_meshes: Export meshes of cell supervoxels individually.
object_meshes: Defaults to subcellular organelles defined in config.yml
('process_cell_organelles').
synssv_instead_sj: If True, will use 'syn_ssv' objects instead of 'sj'.
"""
# # The next two calls are deprecated but might be useful at some point
# self.save_skeleton_to_kzip(dest_path=dest_path)
# self.save_objects_to_kzip_sparse(["mi", "sj", "vc"],
# dest_path=dest_path)
if not dest_path.endswith('.k.zip'):
dest_path += '.k.zip'
if os.path.isfile(dest_path):
raise FileExistsError(f'k.zip file already exists at "{dest_path}".')
tmp_dest_p = []
target_fnames = []
attr_keys = list(attr_keys)
if 'rag' in attr_keys:
if rag is None and not os.path.isfile(self.edgelist_path):
log_reps.warn("Could not find SV graph of SSV {}. Please"
" pass `sv_graph` as kwarg.".format(self))
else:
tmp_dest_p.append('{}_rag.bz2'.format(dest_path))
target_fnames.append('rag.bz2')
if rag is None:
rag = self.sv_graph_uint
nx.write_edgelist(rag, tmp_dest_p[-1])
attr_keys.remove('rag')
if object_meshes is None:
object_meshes = list(self.config['process_cell_organelles']) + ['sv', 'syn_ssv']
else:
object_meshes = list(object_meshes)
allowed_attributes = ('sample_locations', 'skeleton', 'attr_dict')
for attr in attr_keys:
if attr not in allowed_attributes:
raise ValueError('Invalid attribute specified. Currently suppor'
'ted attributes for export: {}'.format(allowed_attributes))
if attr == 'skeleton' and self.skeleton is None:
self.load_skeleton()
tmp_dest_p.append('{}_{}.pkl'.format(dest_path, attr))
target_fnames.append('{}.pkl'.format(attr))
sso_attr = getattr(self, attr)
if hasattr(sso_attr, '__call__'):
sso_attr = sso_attr()
write_obj2pkl(tmp_dest_p[-1], sso_attr)
# always write meta dict
tmp_dest_p.append('{}_{}.pkl'.format(dest_path, 'meta'))
target_fnames.append('{}.pkl'.format('meta'))
write_obj2pkl(tmp_dest_p[-1], {'version_dict': self.version_dict,
'scaling': self.scaling,
'working_dir': self.working_dir,
'sso_id': self.id})
# write all data
data2kzip(dest_path, tmp_dest_p, target_fnames)
if individual_sv_meshes and 'sv' in object_meshes:
object_meshes.remove('sv')
self.write_svmeshes2kzip(dest_path, force_overwrite=False)
self.meshes2kzip(dest_path=dest_path, sv_color=sv_color, force_overwrite=False,
synssv_instead_sj=synssv_instead_sj, object_types=object_meshes)
self.mergelist2kzip(dest_path=dest_path)
if 'skeleton' in attr_keys:
self.save_skeleton_to_kzip(dest_path=dest_path)
def typedsyns2mesh(self, dest_path: Optional[str] = None, rewrite: bool = False):
"""
Generates typed meshes of 'syn_ssv' and stores it at :py:attr:`~mesh_dc_path`
(keys: ``'syn_ssv_sym'`` and ``'syn_ssv_asym'``) and writes it to `dest_path` (if given).
Accessed with the respective keys via :py:attr:`~load_mesh`.
Synapse types are looked up in the 'syn_ssv' AttributeDicts and treated as follows:
* excitatory / asymmetric: 1
* inhibitory / symmetric: -1
Args:
dest_path: Optional output path for the synapse meshes.
rewrite: Ignore existing meshes in :py:attr:`~_meshes` or at :py:attr:`~mesh_dc_path`.
"""
if not rewrite and self.mesh_exists('syn_ssv_sym') and self.mesh_exists('syn_ssv_asym') \
and not self.version == "tmp":
return
syn_signs = load_so_attr_bulk(self.syn_ssv, 'syn_sign', use_new_subfold=self.config.use_new_subfold)
sym_syns = []
asym_syns = []
for syn in self.syn_ssv:
syn_sign = syn_signs[syn.id]
if syn_sign == -1:
sym_syns.append(syn)
elif syn_sign == 1:
asym_syns.append(syn)
else:
raise ValueError(f'Unknown synapse sign {syn_sign}.')
sym_syn_mesh = list(merge_someshes(sym_syns, use_new_subfold=self.config.use_new_subfold))
asym_syn_mesh = list(merge_someshes(asym_syns, use_new_subfold=self.config.use_new_subfold))
if self.version is not "tmp":
mesh_dc = MeshStorage(self.mesh_dc_path, read_only=False,
disable_locking=not self.enable_locking)
mesh_dc['syn_ssv_sym'] = sym_syn_mesh
mesh_dc['syn_ssv_asym'] = asym_syn_mesh
mesh_dc.push()
self._meshes['syn_ssv_sym'] = sym_syn_mesh
self._meshes['syn_ssv_asym'] = asym_syn_mesh
if dest_path is None:
return
# TODO: add appropriate ply fname and/or comment
write_mesh2kzip(dest_path, asym_syn_mesh[0], asym_syn_mesh[1],
asym_syn_mesh[2], color=np.array((240, 50, 50, 255)), ply_fname='10.ply')
write_mesh2kzip(dest_path, sym_syn_mesh[0], sym_syn_mesh[1],
sym_syn_mesh[2], color=np.array((50, 50, 240, 255)), ply_fname='11.ply')
def write_svmeshes2kzip(self, dest_path: Optional[str] = None, **kwargs):
"""
Write individual cell supervoxel ('sv') meshes in ply format to kzip file.
Args:
dest_path: Target file name.
"""
if dest_path is None:
dest_path = self.skeleton_kzip_path
inds, verts, norms, cols, ply_fnames = [], [], [], [], []
for sv in self.svs:
inds.append(sv.mesh[0])
verts.append(sv.mesh[1])
norms.append(sv.mesh[2])
cols.append(None)
ply_fnames.append(f"sv_{sv.id}.ply")
write_meshes2kzip(dest_path, inds, verts, norms, cols, ply_fnames=ply_fnames, **kwargs)
def _svattr2mesh(self, dest_path, attr_key, cmap, normalize_vals=False):
sv_attrs = np.array([sv.lookup_in_attribute_dict(attr_key).squeeze()
for sv in self.svs])
if normalize_vals:
min_val = sv_attrs.min()
sv_attrs -= min_val
sv_attrs /= sv_attrs.max()
ind, vert, norm, col = merge_someshes(self.svs, color_vals=sv_attrs, cmap=cmap,
use_new_subfold=self.config.use_new_subfold)
write_mesh2kzip(dest_path, ind, vert, norm, col, "%s.ply" % attr_key)
def svprobas2mergelist(self, key="glia_probas", dest_path=None):
if dest_path is None:
dest_path = self.skeleton_kzip_path
coords = np.array([sv.rep_coord for sv in self.svs])
sv_comments = ["%s; %s" % (str(np.mean(sv.attr_dict[key], axis=0)),
str(sv.attr_dict[key]).replace('\n', ''))
for sv in self.svs]
kml = knossos_ml_from_svixs([sv.id for sv in self.svs], coords,
comments=sv_comments)
write_txt2kzip(dest_path, kml, "mergelist.txt")
def _pred2mesh(self, pred_coords: np.ndarray, preds: np.ndarray, ply_fname: Optional[str] = None,
dest_path: Optional[str] = None, colors: Optional[Union[tuple, np.ndarray, list]] = None,
k: int = 1, **kwargs):
"""
If dest_path or ply_fname is None then indices, vertices, colors are
returned. Else Mesh is written to k.zip file as specified.
Args:
pred_coords: N x 3; scaled to nm
preds: Label array (N x 1).
ply_fname: str
dest_path: str
colors: Color for each possible prediction value (range(np.max(preds))
k: Number of nearest neighbors (average prediction)
**kwargs: Keyword arguments passed to `colorcode_vertices`.
Returns: None or [np.array, np.array, np.array]
"""
if ply_fname is not None and not ply_fname.endswith(".ply"):
ply_fname += ".ply"
if dest_path is not None and ply_fname is None:
msg = "Specify 'ply_fanme' in order to save colored " \
"mesh to k.zip."
log_reps.error(msg)
raise ValueError(msg)
mesh = self.mesh
col = colorcode_vertices(mesh[1].reshape((-1, 3)), pred_coords,
preds, colors=colors, k=k, **kwargs)
if dest_path is None:
return mesh[0], mesh[1], col
else:
write_mesh2kzip(dest_path, mesh[0], mesh[1], mesh[2], col,
ply_fname=ply_fname)
# --------------------------------------------------------------------- GLIA
def gliaprobas2mesh(self, dest_path=None, pred_key_appendix=""):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
import seaborn as sns
mcmp = sns.diverging_palette(250, 15, s=99, l=60, center="dark",
as_cmap=True)
self._svattr2mesh(dest_path, "glia_probas" + pred_key_appendix,
cmap=mcmp)
def gliapred2mesh(self, dest_path=None, thresh=None, pred_key_appendix=""):
if thresh is None:
thresh = self.config['glia']['glia_thresh']
astrocyte_svs = [sv for sv in self.svs if sv.glia_pred(thresh, pred_key_appendix) == 1]
nonastrocyte_svs = [sv for sv in self.svs if sv.glia_pred(thresh, pred_key_appendix) == 0]
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
mesh = merge_someshes(astrocyte_svs, use_new_subfold=self.config.use_new_subfold)
neuron_mesh = merge_someshes(nonastrocyte_svs, use_new_subfold=self.config.use_new_subfold)
write_meshes2kzip(dest_path, [mesh[0], neuron_mesh[0]], [mesh[1], neuron_mesh[1]],
[mesh[2], neuron_mesh[2]], [None, None],
["glia_%0.2f.ply" % thresh, "nonglia_%0.2f.ply" % thresh])
def gliapred2mergelist(self, dest_path=None, thresh=None,
pred_key_appendix=""):
if thresh is None:
thresh = self.config['glia']['glia_thresh']
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
params = [[sv, ] for sv in self.svs]
coords = sm.start_multiprocess_obj("rep_coord", params, nb_cpus=self.nb_cpus)
coords = np.array(coords)
params = [[sv, {"thresh": thresh, "pred_key_appendix": pred_key_appendix}]
for sv in self.svs]
glia_preds = sm.start_multiprocess_obj("glia_pred", params,
nb_cpus=self.nb_cpus)
glia_preds = np.array(glia_preds)
glia_comments = ["%0.4f" % gp for gp in glia_preds]
kml = knossos_ml_from_svixs([sv.id for sv in self.svs], coords,
comments=glia_comments)
write_txt2kzip(dest_path, kml, "mergelist.txt")
def gliasplit(self, recompute=False, thresh=None, verbose=False, pred_key_appendix=""):
astrocyte_svs_key = "astrocyte_svs" + pred_key_appendix
neuron_svs_key = "neuron_svs" + pred_key_appendix
if thresh is None:
thresh = self.config['glia']['glia_thresh']
if recompute or not (self.attr_exists(astrocyte_svs_key) and
self.attr_exists(neuron_svs_key)):
if verbose:
log_reps.debug("Splitting glia in SSV {} with {} SV's.".format(
self.id, len(self.sv_ids)))
start = time.time()
nonglia_ccs, astrocyte_ccs = split_glia(self, thresh=thresh,
pred_key_appendix=pred_key_appendix)
if verbose:
log_reps.debug("Splitting glia in SSV %d with %d SV's finished "
"after %.4gs." % (self.id, len(self.sv_ids),
time.time() - start))
non_glia_ccs_ixs = [[so.id for so in nonglia] for nonglia in
nonglia_ccs]
astrocyte_ccs_ixs = [[so.id for so in glia] for glia in astrocyte_ccs]
self.attr_dict[astrocyte_svs_key] = astrocyte_ccs_ixs
self.attr_dict[neuron_svs_key] = non_glia_ccs_ixs
self.save_attributes([astrocyte_svs_key, neuron_svs_key],
[astrocyte_ccs_ixs, non_glia_ccs_ixs])
else:
log_reps.critical('Skipping SSO {}, glia splits already exist'
'.'.format(self.id))
def gliasplit2mesh(self, dest_path=None, pred_key_appendix=""):
"""
Args:
dest_path:
pred_key_appendix:
Returns:
"""
# TODO: adapt writemesh2kzip to work with multiple writes
# to same file or use write_meshes2kzip here.
astrocyte_svs_key = "astrocyte_svs" + pred_key_appendix
neuron_svs_key = "neuron_svs" + pred_key_appendix
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
# write meshes of CC's
astrocyte_ccs = self.attr_dict[astrocyte_svs_key]
for kk, astrocyte in enumerate(astrocyte_ccs):
mesh = merge_someshes([self.get_seg_obj("sv", ix) for ix in
astrocyte], use_new_subfold=self.config.use_new_subfold)
write_mesh2kzip(dest_path, mesh[0], mesh[1], mesh[2], None,
"astrocyte_cc%d.ply" % kk)
non_glia_ccs = self.attr_dict[neuron_svs_key]
for kk, nonglia in enumerate(non_glia_ccs):
mesh = merge_someshes([self.get_seg_obj("sv", ix) for ix in
nonglia], use_new_subfold=self.config.use_new_subfold)
write_mesh2kzip(dest_path, mesh[0], mesh[1], mesh[2], None,
"nonglia_cc%d.ply" % kk)
def morphembed2mesh(self, dest_path, pred_key='latent_morph', whiten=True):
"""
Write morphology embedding as RGB to k.zip file.
Args:
dest_path:
pred_key:
whiten:
Returns:
"""
if self.skeleton is None:
self.load_skeleton()
d = np.array(self.skeleton[pred_key])
if whiten:
d -= d.mean(axis=0)
eig = _calc_pca_components(d)
d_transf = np.dot(d, eig[:, :3])
d_transf -= d_transf.min(axis=0)
d_transf /= d_transf.max(axis=0)
vert_col = colorcode_vertices(self.mesh[1].reshape((-1, 3)), self.skeleton['nodes'] * self.scaling,
np.arange(len(self.skeleton['nodes'])), normalize_img(d_transf))
self.mesh2kzip(dest_path, ext_color=vert_col)
def write_gliapred_cnn(self, dest_path=None):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
skel = load_skeleton_kzip(self.skeleton_kzip_path_views)[
"sample_locations"]
n_nodes = [n for n in skel.getNodes()]
pred_coords = [n.getCoordinate() * np.array(self.scaling) for n in
n_nodes]
preds = [int(n.data["glia_pred"]) for n in n_nodes]
self._pred2mesh(pred_coords, preds, "gliapred.ply",
dest_path=dest_path,
colors=[[11, 129, 220, 255], [218, 73, 58, 255]])
def predict_views_gliaSV(self, model, verbose=True,
pred_key_appendix=""):
if self.version == 'tmp':
log_reps.warning('"predict_views_gliaSV" called but this SSV '
'has version "tmp", results will'
' not be saved to disk.')
start = time.time()
pred_key = "glia_probas"
pred_key += pred_key_appendix
# 'tmp'-version: do not write to disk
predict_sos_views(model, self.svs, pred_key,
nb_cpus=self.nb_cpus, verbose=verbose,
woglia=False, raw_only=True,
return_proba=self.version == 'tmp')
end = time.time()
log_reps.debug("Prediction of %d SV's took %0.2fs (incl. read/write). "
"%0.4fs/SV" % (len(self.sv_ids), end - start,
float(end - start) / len(self.sv_ids)))
# ------------------------------------------------------------------ AXONESS
def _load_skelfeatures(self, key):
if not self.skeleton:
self.load_skeleton()
assert self.skeleton is not None, "Skeleton does not exist."
if key in self.skeleton:
assert len(self.skeleton["nodes"]) == len(self.skeleton[key]), \
"Length of skeleton features is not equal to number of nodes."
return self.skeleton[key]
else:
return None
def _save_skelfeatures(self, k, features, overwrite=False):
if not self.skeleton:
self.load_skeleton()
assert self.skeleton is not None, "Skeleton does not exist."
if k in self.skeleton and not overwrite:
raise ValueError("Key {} already exists in skeleton"
" feature dict.".format(k))
self.skeleton[k] = features
assert len(self.skeleton["nodes"]) == len(self.skeleton[k]), \
"Length of skeleton features is not equal to number of nodes."
self.save_skeleton()
def write_axpred_rfc(self, dest_path=None, k=1):
if dest_path is None:
dest_path = self.skeleton_kzip_path
if self.load_skeleton():
if not "axoness" in self.skeleton:
return False
axoness = self.skeleton["axoness"].copy()
axoness[self.skeleton["axoness"] == 1] = 0
axoness[self.skeleton["axoness"] == 0] = 1
self._pred2mesh(self.skeleton["nodes"] * self.scaling, axoness,
k=k, dest_path=dest_path)
def skelproperty2mesh(self, key, dest_path=None, k=1):
if self.skeleton is None:
self.load_skeleton()
if dest_path is None:
dest_path = self.skeleton_kzip_path
self._pred2mesh(self.skeleton["nodes"] * self.scaling,
self.skeleton[key], k=k, dest_path=dest_path,
ply_fname=key + ".ply")
def axoness_for_coords(self, coords, radius_nm=4000, pred_type="axoness"):
"""
Dies not need to be axoness, it supports any attribut stored in self.skeleton.
Args:
coords: np.array
Voxel coordinates, unscaled! [N, 3]
radius_nm: float
pred_type: str
Returns: np.array
Same length as coords. For every coordinate in coords returns the
majority label within radius_nm
"""
return np.array(self.attr_for_coords(coords, [pred_type], radius_nm))
def attr_for_coords(self, coords, attr_keys, radius_nm=None, k=1):
"""
TODO: move to super_segmentation_helper.py
Query skeleton node attributes at given coordinates. Supports any
attribute stored in self.skeleton. If radius_nm is given, will
assign majority attribute value.
Parameters
----------
coords : np.array
Voxel coordinates, unscaled! [N, 3]
radius_nm : Optional[float]
If None, will only use attribute of nearest node, otherwise
majority attribute value is used.
attr_keys : List[str]
Attribute identifier
k : int
Number of nearest neighbors, only if `radius_nm` is None.
Returns
-------
List
Same length as coords. For every coordinate in coords returns the
majority label within radius_nm or [-1] if Key does not exist.
"""
if type(attr_keys) is str:
attr_keys = [attr_keys]
coords = np.array(coords)
if self.skeleton is None:
self.load_skeleton()
if self.skeleton is None or len(self.skeleton["nodes"]) == 0:
log_reps.warn("Skeleton did not exist for SSV {} (size: {}; rep. coord.: "
"{}).".format(self.id, self.size, self.rep_coord))
return -1 * np.ones((len(coords), len(attr_keys)))
# get close locations
if k > 1 and len(self.skeleton["nodes"]) < k:
log_reps.warn(f'Number of skeleton nodes ({len(self.skeleton["nodes"])}) '
f'is smaller than k={k} in SSO {self.id}. Lowering k.')
k = len(self.skeleton["nodes"])
kdtree = scipy.spatial.cKDTree(self.skeleton["nodes"] * self.scaling)
if radius_nm is None:
_, close_node_ids = kdtree.query(coords * self.scaling, k=k, n_jobs=self.nb_cpus)
else:
close_node_ids = kdtree.query_ball_point(coords * self.scaling, radius_nm)
attr_dc = defaultdict(list)
for i_coord in range(len(coords)):
curr_close_node_ids = close_node_ids[i_coord]
for attr_key in attr_keys:
# e.g. for glia SSV axoness does not exist.
if attr_key not in self.skeleton:
el = -1 if k == 1 else [-1] * k
attr_dc[attr_key].append(el)
continue
# use nodes within radius_nm, there might be multiple node ids
if radius_nm is not None:
if len(curr_close_node_ids) == 0:
dist, curr_close_node_ids = kdtree.query(coords * self.scaling)
log_reps.info(
"Couldn't find skeleton nodes within {} nm. Using nearest "
"one with distance {} nm. SSV ID {}, coordinate at {}."
"".format(radius_nm, dist[0], self.id, coords[i_coord]))
cls, cnts = np.unique(
np.array(self.skeleton[attr_key])[np.array(curr_close_node_ids)],
return_counts=True)
if len(cls) > 0:
attr_dc[attr_key].append(cls[np.argmax(cnts)])
else:
log_reps.info("Did not find any skeleton node within {} nm at {}."
" SSV {} (size: {}; rep. coord.: {}).".format(
radius_nm, i_coord, self.id, self.size, self.rep_coord))
attr_dc[attr_key].append(-1)
else: # only nearest node ID
attr_dc[attr_key].append(self.skeleton[attr_key][curr_close_node_ids])
# in case latent morphology was not predicted / needed
if "latent_morph" in attr_keys:
latent_morph = attr_dc["latent_morph"]
for i in range(len(latent_morph)):
curr_latent = latent_morph[i]
if np.isscalar(curr_latent) and curr_latent == -1:
curr_latent = np.array([np.inf] * self.config['tcmn']['ndim_embedding'])
latent_morph[i] = curr_latent
return [np.array(attr_dc[k]) for k in attr_keys]
def predict_views_axoness(self, model, verbose=False,
pred_key_appendix=""):
start = time.time()
pred_key = "axoness_probas"
pred_key += pred_key_appendix
if self.version == 'tmp':
log_reps.warning('"predict_views_axoness" called but this SSV '
'has version "tmp", results will'
' not be saved to disk.')
try:
predict_sos_views(model, self.svs, pred_key,
nb_cpus=self.nb_cpus, verbose=verbose,
woglia=True, raw_only=False,
return_proba=self.version == 'tmp') # do not write to disk
except KeyError:
log_reps.error("Re-rendering SSV %d (%d SVs), because views are missing."
% (self.id, len(self.sv_ids)))
self.render_views(add_cellobjects=True, woglia=True, overwrite=True)
predict_sos_views(model, self.svs, pred_key,
nb_cpus=self.nb_cpus, verbose=verbose,
woglia=True, raw_only=False,
return_proba=self.version == 'tmp') # do not write to disk)
end = time.time()
log_reps.debug("Prediction of %d SV's took %0.2fs (incl. read/write). "
"%0.4fs/SV" % (len(self.sv_ids), end - start,
float(end - start) / len(self.sv_ids)))
def predict_views_embedding(self, model, pred_key_appendix="", view_key=None):
"""
This will save a latent vector which captures a local morphology fingerprint for every
skeleton node location as :py:attr:`~skeleton`['latent_morph'] based on the nearest rendering
location.
Notes:
* This method requires existing :py:attr:`~views`. For on the fly view rendering use
:py:func:`~syconn.reps.super_segmentation_helper.view_embedding_of_sso_nocache`
Todo:
* Add option for on the fly rendering and call
:py:func:`~syconn.reps.super_segmentation_helper.view_embedding_of_sso_nocache` in here.
Args:
model:
pred_key_appendix:
view_key: str
View identifier, e.g. if views have been pre-rendered and are stored in
`self.view_dict`
Returns:
"""
from ..handler.prediction import naive_view_normalization_new
pred_key = "latent_morph"
pred_key += pred_key_appendix
if self.version == 'tmp':
log_reps.warning('"predict_views_embedding" called but this SSV '
'has version "tmp", results will'
' not be saved to disk.')
views = self.load_views(view_key=view_key) # [N, 4, 2, y, x]
# TODO: add normalization to model - prevent potentially different normalization!
views = naive_view_normalization_new(views)
# The inference with TNets can be optimzed, via splititng the views into three equally sized parts.
inp = (views[:, :, 0], np.zeros_like(views[:, :, 0]), np.zeros_like(views[:, :, 0]))
# return dist1, dist2, inp1, inp2, inp3 latent
_, _, latent, _, _ = model.predict_proba(inp) # only use first view for now
# map latent vecs at rendering locs to skeleton node locations via nearest neighbor
self.load_skeleton()
if 'view_ixs' not in self.skeleton:
hull_tree = spatial.cKDTree(np.concatenate(self.sample_locations()))
dists, ixs = hull_tree.query(self.skeleton["nodes"] * self.scaling,
n_jobs=self.nb_cpus, k=1)
self.skeleton["view_ixs"] = ixs
self.skeleton[pred_key] = latent[self.skeleton["view_ixs"]]
self.save_skeleton()
def cnn_axoness2skel(self, **kwargs):
locking_tmp = self.enable_locking
self.enable_locking = False # all SV operations are read-only
# (enable_locking is inherited by sso.svs);
# SSV operations not, but SSO file structure is not chunked
res = ssh.cnn_axoness2skel(self, **kwargs)
self.enable_locking = locking_tmp
return res
def average_node_axoness_views(self, **kwargs):
"""
Apply a sliding window averaging along the axon predictions stored at the
nodes of the :py:attr:`~skeleton`. See
:func:`~syconn.reps.super_segmentation_helper._average_node_axoness_views`
for details. Will call :func:`~save_skeleton`.
Args:
**kwargs: Key word arguments used in
:func:`~syconn.reps.super_segmentation_helper._average_node_axoness_views`.
"""
locking_tmp = self.enable_locking
self.enable_locking = False # all SV operations are read-only
# (enable_locking is inherited by sso.svs);
# SSV operations not, but SSO file structure is not chunked
res = ssh.average_node_axoness_views(self, **kwargs)
self.save_skeleton()
self.enable_locking = locking_tmp
return res
def axoness2mesh(self, dest_path, k=1, pred_key_appendix=''):
"""
Deprecated. See :func:`~semseg2mesh`. Write the per-location CMN axon
predictions (img2scalar) to a kzip file.
Args:
dest_path: Path to the kzip file.
k: Number of nearest neighbors used for the majority vote.
pred_key_appendix: Key to load specific predictions.
"""
ssh.write_axpred_cnn(self, pred_key_appendix=pred_key_appendix, k=k,
dest_path=dest_path)
# --------------------------------------------------------------- CELL TYPES
def predict_celltype_multiview(self, model, pred_key_appendix, model_tnet=None, view_props=None,
onthefly_views=False, overwrite=True, model_props=None,
verbose: bool = False, save_to_attr_dict: bool = True):
"""
Infer celltype classification via `model` (stored as ``celltype_cnn_e3`` and
``celltype_cnn_e3_probas`` in the :py:attr:`~attr_dict`) and an optional
cell morphology embedding via `model_tnet` (stored as ``latent_morph_ct``).
Args:
model: nn.Module
pred_key_appendix: str
model_tnet: Optional[nn.Module]
view_props: Optional[dict]
Dictionary which contains view properties. If None, default defined in
:py:attr:`~config` will be used.
onthefly_views: bool
overwrite:
model_props: Model properties. See config.yml for an example.
verbose:
save_to_attr_dict: Save prediction in attr_dict.
"""
if model_props is None:
model_props = {}
view_props_def = self.config['views']['view_properties']
if view_props is not None:
view_props_def.update(view_props)
view_props = view_props_def
if not onthefly_views:
ssh.predict_sso_celltype(self, model, pred_key_appendix=pred_key_appendix,
save_to_attr_dict=save_to_attr_dict, overwrite=overwrite, **model_props)
else:
ssh.celltype_of_sso_nocache(self, model, pred_key_appendix=pred_key_appendix,
save_to_attr_dict=save_to_attr_dict,
overwrite=overwrite, verbose=verbose, **view_props, **model_props)
if model_tnet is not None:
view_props = dict(view_props) # create copy
if 'use_syntype' in view_props:
del view_props['use_syntype']
ssh.view_embedding_of_sso_nocache(self, model_tnet, pred_key_appendix=pred_key_appendix,
overwrite=True, **view_props)
def predict_cell_morphology_pts(self, **kwargs):
"""
Store local cell morphology with key 'latent_morph' (+ `pred_key_appendix`) in the SSV skeleton.
Args:
**kwargs:
"""
from syconn.handler.prediction_pts import infere_cell_morphology_ssd
ssd_kwargs = dict(working_dir=self.working_dir, config=self.config)
ssv_params = [dict(ssv_id=self.id, **ssd_kwargs)]
infere_cell_morphology_ssd(ssv_params, **kwargs)
def render_ortho_views_vis(self, dest_folder=None, colors=None, ws=(2048, 2048),
obj_to_render=("sv",)):
multi_view_sso = load_rendering_func('multi_view_sso')
if colors is None:
colors = {"sv": (0.5, 0.5, 0.5, 0.5), "mi": (0, 0, 1, 1),
"vc": (0, 1, 0, 1), "sj": (1, 0, 0, 1)}
views = multi_view_sso(self, colors, ws=ws, obj_to_render=obj_to_render)
if dest_folder:
from scipy.misc import imsave # TODO: use new imageio package
for ii, v in enumerate(views):
imsave("%s/SSV_%d_%d.png" % (dest_folder, self.id, ii), v)
else:
return views
def certainty_celltype(self, pred_key: Optional[str] = None) -> float:
"""_probas
Certainty estimate of the celltype prediction:
1. If `is_logit` is True, Generate pseudo-probabilities from the
input using softmax.
2. Sum the evidence per class and (re-)normalize.
3. Compute the entropy, scale it with the maximum entropy (equal
probabilities) and subtract it from 1.
Notes:
See :func:`~syconn.handler.prediction.certainty_estimate`
Args:
pred_key: Key of classification results (one C-class probability
vector for every multi-view sample). ``pred_key + '_probas'`` must exist in
:py:attr:`~attr_dict`.
Returns:
Certainty measure based on the entropy of the cell type logits.
"""
if pred_key is None:
pred_key = 'celltype_cnn_e3'
cert = self.lookup_in_attribute_dict(pred_key + '_certainty')
if cert is not None:
return cert
logits = self.lookup_in_attribute_dict(pred_key + '_probas')
return certainty_estimate(logits, is_logit=True)
def majority_vote(self, prop_key: str, max_dist: float) -> np.ndarray:
"""
Smooths (average using sliding window of 2 times max_dist and majority
vote) property prediction in annotation.
Args:
prop_key: Property to average.
max_dist: Maximum distance (in nm) for sliding window used in majority voting.
Returns:
"""
assert prop_key in self.skeleton, "Given key does not exist in self.skeleton"
prop_array = self.skeleton[prop_key]
assert prop_array.squeeze().ndim == 1, "Property array has to be 1D."
maj_votes = np.zeros_like(prop_array)
for ii in range(len(self.skeleton["nodes"])):
paths = nx.single_source_dijkstra_path(self.weighted_graph(),
ii, max_dist)
neighs = np.array(list(paths.keys()), dtype=np.int64)
labels, cnts = np.unique(prop_array[neighs], return_counts=True)
maj_label = labels[np.argmax(cnts)]
maj_votes[ii] = maj_label
return maj_votes
def shortestpath2soma(self, coordinates: np.ndarray,
axoness_key: Optional[str] = None) -> List[float]:
"""
Computes the shortest path to the soma along :py:attr:`~skeleton`.
Cell compartment predictions must exist in ``self.skeleton['axoness_avg10000']``,
see :func:`~syconn.exec.exec_inference.run_semsegaxoness_mapping`.
Requires a populated :py:attr:`~skeleton`, e.g. via :func:`~load_skeleton`.
Args:
coordinates: Starting coordinates in voxel coordinates; shape of (N, 3).
axoness_key: Key to axon prediction stored in :py:attr:`~skeleton`.
Raises:
KeyError: If axon prediction does not exist.
Examples:
To get the shortest paths between all synapses and the soma use::
from syconn.reps.super_segmentation import *
from syconn import global_params
global_params.wd = '~/SyConn/example_cube1/'
ssd = SuperSegmentationDataset()
# get any cell reconstruction
ssv = ssd.get_super_segmentation_object(ssd.ssv_ids[0])
# get synapse coordinates in voxels.
syns = np.array([syn.rep_coord for syn in ssv.syn_ssv])
shortest_paths = ssv.shortestpath2soma(syns)
Returns:
The shortest path in nanometers for each start coordinate.
"""
if axoness_key is None:
axoness_key = 'axoness_avg{}'.format(self.config['compartments'][
'dist_axoness_averaging'])
nodes = self.skeleton['nodes']
soma_ixs = np.nonzero(self.skeleton[axoness_key] == 2)[0]
if np.sum(soma_ixs) == 0:
return [np.inf] * len(coordinates)
graph = self.weighted_graph(add_node_attr=[axoness_key])
kdt = scipy.spatial.cKDTree(nodes)
dists, start_ixs = kdt.query(coordinates, n_jobs=self.nb_cpus)
log_reps.debug(f'Computing shortest paths to soma for {len(start_ixs)} '
f'starting nodes.')
shortest_paths_of_interest = []
for ix in start_ixs:
shortest_paths = nx.single_source_dijkstra_path_length(graph, ix)
# get the shortest path to a soma
curr_path = np.min([shortest_paths[soma_ix] for soma_ix in soma_ixs])
shortest_paths_of_interest.append(curr_path)
return shortest_paths_of_interest
def path_density_seg_obj(self, obj_type: str, compartments_of_interest: Optional[List[int]] = None,
ax_pred_key: str = 'axoness_avg10000') -> float:
"""
Args:
obj_type: Key to any available sub-cellular structure.
compartments_of_interest: Which compartments to take into account for calculation.
axon: 1, dendrite: 0, soma: 2
ax_pred_key: Key of compartment prediction stored in :attr:`~skeleton`, only used if
`compartments_of_interest` was set.
Returns:
Average volume per path length (um^3 / um).
"""
objs = np.array(self.get_seg_objects(obj_type))
if self.skeleton is None:
self.load_skeleton()
skel = self.skeleton
if compartments_of_interest is not None:
node_labels = skel[ax_pred_key]
node_labels[node_labels == 3] = 1
node_labels[node_labels == 4] = 1
tree = spatial.cKDTree(skel['nodes'] * self.scaling)
_, ixs = tree.query(np.array([obj.rep_coord for obj in objs]) * self.scaling, k=1, n_jobs=self.nb_cpus)
obj_labels = node_labels[ixs]
mask = np.zeros(len(objs), dtype=np.bool)
for comp_label in compartments_of_interest:
mask = mask | (obj_labels == comp_label)
objs = objs[mask]
if len(objs) > 0:
vx_count = np.sum([obj.size for obj in objs])
else:
vx_count = 0
obj_vol = vx_count * np.prod(self.scaling) / 1e9 # in um^3
path_length = self.total_edge_length(compartments_of_interest) / 1e3 # in um
if path_length == 0:
return 0.0
else:
return obj_vol / path_length
# ------------------------------------------------------------------------------
# SO rendering code
def render_sampled_sos_cc(sos, ws=(256, 128), verbose=False, woglia=True,
render_first_only=0, add_cellobjects=True,
overwrite=False, cellobjects_only=False,
index_views=False, enable_locking=True):
"""
Renders for each SV views at sampled locations (number is dependent on
SV mesh size with scaling fact) from combined mesh of all SV.
Args:
sos: list of SegmentationObject
ws: tuple
verbose: bool
woglia: bool
without glia components
render_first_only: int
add_cellobjects: bool
overwrite: bool
cellobjects_only: bool
index_views: bool
enable_locking: bool
enable system locking when writing views
Returns:
"""
# initilaize temporary SSO
if not overwrite:
if render_first_only:
if np.all([sos[ii].views_exist(woglia=woglia) for ii in range(render_first_only)]):
return
else:
if np.all([sv.views_exist(woglia=woglia) for sv in sos]):
return
sso = SuperSegmentationObject(sos[0].id,
create=False, enable_locking=False,
working_dir=sos[0].working_dir,
version="tmp", scaling=sos[0].scaling)
sso._objects["sv"] = sos
if render_first_only:
coords = [sos[ii].sample_locations() for ii in range(render_first_only)]
else:
coords = sso.sample_locations(cache=False)
if add_cellobjects:
sso._map_cellobjects(save=False)
part_views = np.cumsum([0] + [len(c) for c in coords])
if index_views:
views = render_sso_coords_index_views(sso, flatten_list(coords),
ws=ws, verbose=verbose)
else:
views = render_sso_coords(sso, flatten_list(coords),
add_cellobjects=add_cellobjects,
ws=ws, verbose=verbose,
cellobjects_only=cellobjects_only)
for i in range(len(coords)):
# TODO: write chunked
v = views[part_views[i]:part_views[i + 1]]
if np.sum(v) == 0 or np.sum(v) == np.prod(v.shape):
log_reps.warn("Empty views detected after rendering.",
RuntimeWarning)
sv_obj = sos[i]
sv_obj.save_views(views=v, woglia=woglia, index_views=index_views,
cellobjects_only=cellobjects_only,
enable_locking=True)
def render_so(so, ws=(256, 128), add_cellobjects=True, verbose=False):
"""
Render super voxel views located at given locations. Does not write views
to so.views_path
Args:
so: SegmentationObject
super voxel ID
ws: tuple of int
Rendering windows size
add_cellobjects: bool
verbose: bool
Returns: np.array
views
"""
# initilaize temporary SSO for cellobject mapping purposes
sso = SuperSegmentationObject(so.id,
create=False,
working_dir=so.working_dir,
version="tmp", scaling=so.scaling)
sso._objects["sv"] = [so]
coords = sso.sample_locations(cache=False)[0]
if add_cellobjects:
sso._map_cellobjects(save=False)
views = render_sso_coords(sso, coords, ws=ws, add_cellobjects=add_cellobjects,
verbose=verbose)
return views
def celltype_predictor(args) -> Iterable:
"""
Args:
args:
Returns:
"""
from ..handler.prediction import get_celltype_model_e3
ssv_ids, nb_cpus, model_props = args
use_onthefly_views = global_params.config.use_onthefly_views
view_props = global_params.config['views']['view_properties']
m = get_celltype_model_e3()
missing_ssvs = []
for ix in ssv_ids:
ssv = SuperSegmentationObject(ix, working_dir=global_params.config.working_dir)
ssv.nb_cpus = nb_cpus
ssv._view_caching = True
try:
ssv.predict_celltype_multiview(m, pred_key_appendix="", onthefly_views=use_onthefly_views,
overwrite=True, view_props=view_props, model_props=model_props)
except RuntimeError as e:
missing_ssvs.append(ssv.id)
msg = 'ERROR during celltype prediction of SSV {}. {}'.format(ssv.id, repr(e))
log_reps.error(msg)
return missing_ssvs
def semsegaxoness_predictor(args) -> List[int]:
"""
Predicts axoness and stores resulting labels at vertex dictionary.
Args:
args: (ssv_ids, view_props, nb_cpus, map_properties, pred_key, max_dist)
Returns:
IDs of missing/failed SSVs.
"""
from ..handler.prediction import get_semseg_axon_model
ssv_ids, view_props, nb_cpus, map_properties, pred_key, max_dist, bs = args
m = get_semseg_axon_model()
missing_ssvs = []
for ix in ssv_ids:
ssv = SuperSegmentationObject(ix, working_dir=global_params.config.working_dir)
ssv.nb_cpus = nb_cpus
ssv._view_caching = True
try:
ssh.semseg_of_sso_nocache(ssv, m, bs=bs, **view_props)
semsegaxoness2skel(ssv, map_properties, pred_key, max_dist)
except RuntimeError as e:
missing_ssvs.append(ssv.id)
msg = 'Error during sem. seg. prediction of SSV {}. {}'.format(ssv.id, repr(e))
log_reps.error(msg)
del ssv
return missing_ssvs
def semsegaxoness2skel(sso: SuperSegmentationObject, map_properties: dict,
pred_key: str, max_dist: int):
"""
Populate the following two skeleton keys:
* "{}_avg{}".format(pred_key, max_dist)
* "{}_avg{}_comp_maj".format(pred_key, max_dist)
Args:
sso: SuperSegmentationObject.
map_properties: Properties used to map the vertex predictions to the skeleton nodes.
pred_key: Used for retrieving vertex labels and to store the mapped node labels in the skeleton.
max_dist: Distance used for majority vote in ``majorityvote_skeleton_property``.
Notes:
* Node predictions will be zero if no mesh vertices are available or no nodes exist.
Returns:
"""
if sso.skeleton is None:
sso.load_skeleton()
if sso.skeleton is None:
log_reps.warning(f"Skeleton of {sso} hdoes not exist.")
return
if len(sso.skeleton["nodes"]) == 0 or len(sso.mesh[1]) == 0:
log_reps.warning(f"Skeleton of {sso} has zero nodes or no mesh vertices.")
sso.skeleton["{}_avg{}".format(pred_key, max_dist)] = np.zeros((len(sso.skeleton['nodes']), 1))
sso.skeleton["{}_avg{}_comp_maj".format(pred_key, max_dist)] = np.zeros((len(sso.skeleton['nodes']), 1))
sso.save_skeleton()
return
# vertex predictions
node_preds = sso.semseg_for_coords(
sso.skeleton['nodes'], semseg_key=pred_key,
**map_properties)
# perform average only on axon dendrite and soma predictions
nodes_ax_den_so = np.array(node_preds, dtype=np.int32)
# set en-passant and terminal boutons to axon class for averaging
# bouton labels are stored in node_preds
nodes_ax_den_so[nodes_ax_den_so == 3] = 1
nodes_ax_den_so[nodes_ax_den_so == 4] = 1
sso.skeleton[pred_key] = nodes_ax_den_so
# average along skeleton, stored as: "{}_avg{}".format(pred_key, max_dist)
ssh.majorityvote_skeleton_property(sso, prop_key=pred_key,
max_dist=max_dist)
# suffix '_avg{}' is added by `_average_node_axoness_views`
nodes_ax_den_so = sso.skeleton["{}_avg{}".format(pred_key, max_dist)]
# recover bouton predictions within axons and store smoothed result
nodes_ax_den_so[(node_preds == 3) & (nodes_ax_den_so == 1)] = 3
nodes_ax_den_so[(node_preds == 4) & (nodes_ax_den_so == 1)] = 4
sso.skeleton["{}_avg{}".format(pred_key, max_dist)] = nodes_ax_den_so
# will create a compartment majority voting after removing all soma nodes
# the restul will be written to: ``ax_pred_key + "_comp_maj"``
ssh.majority_vote_compartments(sso, "{}_avg{}".format(pred_key, max_dist))
nodes_ax_den_so = sso.skeleton["{}_avg{}_comp_maj".format(pred_key, max_dist)]
# recover bouton predictions within axons and store majority result
nodes_ax_den_so[(node_preds == 3) & (nodes_ax_den_so == 1)] = 3
nodes_ax_den_so[(node_preds == 4) & (nodes_ax_den_so == 1)] = 4
sso.skeleton["{}_avg{}_comp_maj".format(pred_key, max_dist)] = nodes_ax_den_so
sso.save_skeleton()
def semsegspiness_predictor(args) -> List[int]:
"""
Predicts spiness and stores resulting labels at vertex dictionary.
Args:
args: (ssv_ids, view_props, nb_cpus, kwargs_semseg2mesh, kwargs_semsegforcoords)
Returns:
"""
from ..handler.prediction import get_semseg_spiness_model
m = get_semseg_spiness_model()
ssv_ids, view_props, nb_cpus, kwargs_semseg2mesh, kwargs_semsegforcoords = args
missing_ssvs = []
for ix in ssv_ids:
ssv = SuperSegmentationObject(ix, working_dir=global_params.config.working_dir)
ssv.nb_cpus = nb_cpus
ssv._view_caching = True
try:
ssh.semseg_of_sso_nocache(ssv, m, **view_props, **kwargs_semseg2mesh)
# map to skeleton
ssv.load_skeleton()
if ssv.skeleton is None or len(ssv.skeleton["nodes"]) == 0:
log_reps.warning(f"Skeleton of SSV {ssv.id} has zero nodes.")
node_preds = np.zeros((0, ), dtype=np.int32)
else:
# vertex predictions
node_preds = ssv.semseg_for_coords(ssv.skeleton['nodes'],
kwargs_semseg2mesh['semseg_key'],
**kwargs_semsegforcoords)
ssv.skeleton[kwargs_semseg2mesh['semseg_key']] = node_preds
ssv.save_skeleton()
except RuntimeError as e:
missing_ssvs.append(ssv.id)
msg = 'Error during sem. seg. prediction of SSV {}. {}'.format(ssv.id, repr(e))
log_reps.error(msg)
return missing_ssvs
| StructuralNeurobiologyLab/SyConn | syconn/reps/super_segmentation_object.py | Python | gpl-2.0 | 159,389 | [
"NEURON"
] | c670d4d7edb9980ea0793316a915829238a3180245687b2a71a405be69c5b73d |
import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap_bdry_uv(src_file, src_grd, dst_grd, dxy=20, cdepth=0, kk=2, dst_dir='./'):
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# get dimensions
Mp, Lp = dst_grd.hgrid.mask_rho.shape
# create destination file
dst_file = src_file.rsplit('/')[-1]
dst_fileu = dst_dir + dst_file[:-3] + '_u_bdry_' + dst_grd.name + '.nc'
print '\nCreating destination file', dst_fileu
if os.path.exists(dst_fileu) is True:
os.remove(dst_fileu)
pyroms_toolbox.nc_create_roms_file(dst_fileu, dst_grd, nctime)
dst_filev = dst_dir + dst_file[:-3] + '_v_bdry_' + dst_grd.name + '.nc'
print 'Creating destination file', dst_filev
if os.path.exists(dst_filev) is True:
os.remove(dst_filev)
pyroms_toolbox.nc_create_roms_file(dst_filev, dst_grd, nctime)
# open destination file
ncu = netCDF.Dataset(dst_fileu, 'a', format='NETCDF3_64BIT')
ncv = netCDF.Dataset(dst_filev, 'a', format='NETCDF3_64BIT')
#load var
cdf = netCDF.Dataset(src_file)
src_varu = cdf.variables['u']
src_varv = cdf.variables['v']
time = cdf.variables['ocean_time'][0]
#get missing value
spval = src_varu._FillValue
src_varu = cdf.variables['u'][0]
src_varv = cdf.variables['v'][0]
# get weights file
wts_file = 'remap_weights_GLBa0.08_to_ARCTIC2_bilinear_t_to_rho.nc'
# build intermediate zgrid
zlevel = -src_grd.z_t[::-1,0,0]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in destination file
print 'Creating variable u_north'
ncu.createVariable('u_north', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval)
ncu.variables['u_north'].long_name = '3D u-momentum north boundary condition'
ncu.variables['u_north'].units = 'meter second-1'
ncu.variables['u_north'].field = 'u_north, scalar, series'
print 'Creating variable u_south'
ncu.createVariable('u_south', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval)
ncu.variables['u_south'].long_name = '3D u-momentum south boundary condition'
ncu.variables['u_south'].units = 'meter second-1'
ncu.variables['u_south'].field = 'u_south, scalar, series'
print 'Creating variable u_east'
ncu.createVariable('u_east', 'f8', ('ocean_time', 's_rho', 'eta_u'), fill_value=spval)
ncu.variables['u_east'].long_name = '3D u-momentum east boundary condition'
ncu.variables['u_east'].units = 'meter second-1'
ncu.variables['u_east'].field = 'u_east, scalar, series'
print 'Creating variable u_west'
ncu.createVariable('u_west', 'f8', ('ocean_time', 's_rho', 'eta_u'), fill_value=spval)
ncu.variables['u_west'].long_name = '3D u-momentum west boundary condition'
ncu.variables['u_west'].units = 'meter second-1'
ncu.variables['u_west'].field = 'u_east, scalar, series'
# create variable in destination file
print 'Creating variable ubar_north'
ncu.createVariable('ubar_north', 'f8', ('ocean_time', 'xi_u'), fill_value=spval)
ncu.variables['ubar_north'].long_name = '2D u-momentum north boundary condition'
ncu.variables['ubar_north'].units = 'meter second-1'
ncu.variables['ubar_north'].field = 'ubar_north, scalar, series'
print 'Creating variable ubar_south'
ncu.createVariable('ubar_south', 'f8', ('ocean_time', 'xi_u'), fill_value=spval)
ncu.variables['ubar_south'].long_name = '2D u-momentum south boundary condition'
ncu.variables['ubar_south'].units = 'meter second-1'
ncu.variables['ubar_south'].field = 'ubar_south, scalar, series'
print 'Creating variable ubar_east'
ncu.createVariable('ubar_east', 'f8', ('ocean_time', 'eta_u'), fill_value=spval)
ncu.variables['ubar_east'].long_name = '2D u-momentum east boundary condition'
ncu.variables['ubar_east'].units = 'meter second-1'
ncu.variables['ubar_east'].field = 'ubar_east, scalar, series'
print 'Creating variable ubar_west'
ncu.createVariable('ubar_west', 'f8', ('ocean_time', 'eta_u'), fill_value=spval)
ncu.variables['ubar_west'].long_name = '2D u-momentum west boundary condition'
ncu.variables['ubar_west'].units = 'meter second-1'
ncu.variables['ubar_west'].field = 'ubar_east, scalar, series'
print 'Creating variable v_north'
ncv.createVariable('v_north', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval)
ncv.variables['v_north'].long_name = '3D v-momentum north boundary condition'
ncv.variables['v_north'].units = 'meter second-1'
ncv.variables['v_north'].field = 'v_north, scalar, series'
print 'Creating variable v_south'
ncv.createVariable('v_south', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval)
ncv.variables['v_south'].long_name = '3D v-momentum south boundary condition'
ncv.variables['v_south'].units = 'meter second-1'
ncv.variables['v_south'].field = 'v_south, scalar, series'
print 'Creating variable v_east'
ncv.createVariable('v_east', 'f8', ('ocean_time', 's_rho', 'eta_v'), fill_value=spval)
ncv.variables['v_east'].long_name = '3D v-momentum east boundary condition'
ncv.variables['v_east'].units = 'meter second-1'
ncv.variables['v_east'].field = 'v_east, scalar, series'
print 'Creating variable v_west'
ncv.createVariable('v_west', 'f8', ('ocean_time', 's_rho', 'eta_v'), fill_value=spval)
ncv.variables['v_west'].long_name = '3D v-momentum west boundary condition'
ncv.variables['v_west'].units = 'meter second-1'
ncv.variables['v_west'].field = 'v_east, scalar, series'
print 'Creating variable vbar_north'
ncv.createVariable('vbar_north', 'f8', ('ocean_time', 'xi_v'), fill_value=spval)
ncv.variables['vbar_north'].long_name = '2D v-momentum north boundary condition'
ncv.variables['vbar_north'].units = 'meter second-1'
ncv.variables['vbar_north'].field = 'vbar_north, scalar, series'
print 'Creating variable vbar_south'
ncv.createVariable('vbar_south', 'f8', ('ocean_time', 'xi_v'), fill_value=spval)
ncv.variables['vbar_south'].long_name = '2D v-momentum south boundary condition'
ncv.variables['vbar_south'].units = 'meter second-1'
ncv.variables['vbar_south'].field = 'vbar_south, scalar, series'
print 'Creating variable vbar_east'
ncv.createVariable('vbar_east', 'f8', ('ocean_time', 'eta_v'), fill_value=spval)
ncv.variables['vbar_east'].long_name = '2D v-momentum east boundary condition'
ncv.variables['vbar_east'].units = 'meter second-1'
ncv.variables['vbar_east'].field = 'vbar_east, scalar, series'
print 'Creating variable vbar_west'
ncv.createVariable('vbar_west', 'f8', ('ocean_time', 'eta_v'), fill_value=spval)
ncv.variables['vbar_west'].long_name = '2D v-momentum west boundary condition'
ncv.variables['vbar_west'].units = 'meter second-1'
ncv.variables['vbar_west'].field = 'vbar_east, scalar, series'
# remaping
print 'remapping and rotating u and v from', src_grd.name, \
'to', dst_grd.name
print 'time =', time
# flood the grid
print 'flood the grid'
src_uz = pyroms_toolbox.Grid_HYCOM.flood_fast(src_varu, src_grd, pos='t', \
spval=spval, dxy=dxy, cdepth=cdepth, kk=kk)
src_vz = pyroms_toolbox.Grid_HYCOM.flood_fast(src_varv, src_grd, pos='t', \
spval=spval, dxy=dxy, cdepth=cdepth, kk=kk)
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_uz = pyroms.remapping.remap(src_uz, wts_file, \
spval=spval)
dst_vz = pyroms.remapping.remap(src_vz, wts_file, \
spval=spval)
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_u_north = pyroms.remapping.z2roms(dst_uz[::-1, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_u_south = pyroms.remapping.z2roms(dst_uz[::-1, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_u_east = pyroms.remapping.z2roms(dst_uz[::-1, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_u_west = pyroms.remapping.z2roms(dst_uz[::-1, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
dst_v_north = pyroms.remapping.z2roms(dst_vz[::-1, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_v_south = pyroms.remapping.z2roms(dst_vz[::-1, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_v_east = pyroms.remapping.z2roms(dst_vz[::-1, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_v_west = pyroms.remapping.z2roms(dst_vz[::-1, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
# rotate u,v fields
src_angle = pyroms.remapping.remap(src_grd.angle, \
'remap_weights_GLBa0.08_to_ARCTIC2_bilinear_t_to_rho.nc', \
spval=spval)
dst_angle = dst_grd.hgrid.angle_rho
angle = dst_angle - src_angle
angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1))
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[:,Mp-2:Mp, 0:Lp])
U_north = U_north * eitheta_north
dst_u_north = np.real(U_north)
dst_v_north = np.imag(U_north)
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[:,0:2, 0:Lp])
U_south = U_south * eitheta_south
dst_u_south = np.real(U_south)
dst_v_south = np.imag(U_south)
U_east = dst_u_east + dst_v_east*1j
eitheta_east = np.exp(-1j*angle[:,0:Mp, Lp-2:Lp])
U_east = U_east * eitheta_east
dst_u_east = np.real(U_east)
dst_v_east = np.imag(U_east)
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[:,0:Mp, 0:2])
U_west = U_west * eitheta_west
dst_u_west = np.real(U_west)
dst_v_west = np.imag(U_west)
# move back to u,v points
dst_u_north = 0.5 * np.squeeze(dst_u_north[:,-1,:-1] + dst_u_north[:,-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:,:-1,:] + dst_v_north[:,1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[:,0,:-1] + dst_u_south[:,0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:,:-1,:] + dst_v_south[:,1:,:])
dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:,:-1] + dst_u_east[:,:,1:])
dst_v_east = 0.5 * np.squeeze(dst_v_east[:,:-1,-1] + dst_v_east[:,1:,-1])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:,:-1] + dst_u_west[:,:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:,:-1,0] + dst_v_west[:,1:,0])
# Fix north pole
# print dst_u_north.shape
# print dst_v_north.shape
# dst_u_north[:,707] = 2/3.0*dst_u_north[:,706] + 1/3.0*dst_u_north[:,709]
# dst_u_north[:,708] = 1/3.0*dst_u_north[:,706] + 2/3.0*dst_u_north[:,709]
# dst_v_north[:,708] = 2/3.0*dst_v_north[:,707] + 1/3.0*dst_v_north[:,710]
# dst_v_north[:,709] = 1/3.0*dst_v_north[:,707] + 2/3.0*dst_v_north[:,710]
# spval
idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0)
idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0)
idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0)
idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0)
idxu_east = np.where(dst_grd.hgrid.mask_u[:,-1] == 0)
idxv_east = np.where(dst_grd.hgrid.mask_v[:,-1] == 0)
idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0)
idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0)
for n in range(dst_grd.vgrid.N):
dst_u_north[n, idxu_north[0]] = spval
dst_v_north[n, idxv_north[0]] = spval
dst_u_south[n, idxu_south[0]] = spval
dst_v_south[n, idxv_south[0]] = spval
dst_u_east[n, idxu_east[0]] = spval
dst_v_east[n, idxv_east[0]] = spval
dst_u_west[n, idxu_west[0]] = spval
dst_v_west[n, idxv_west[0]] = spval
# compute depth average velocity ubar and vbar
# get z at the right position
z_u_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:-1] + dst_grd.vgrid.z_w[0,:,-1,1:])
z_v_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:] + dst_grd.vgrid.z_w[0,:,-2,:])
z_u_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:-1] + dst_grd.vgrid.z_w[0,:,0,1:])
z_v_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:] + dst_grd.vgrid.z_w[0,:,1,:])
z_u_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:,-1] + dst_grd.vgrid.z_w[0,:,:,-2])
z_v_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,-1] + dst_grd.vgrid.z_w[0,:,1:,-1])
z_u_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:,0] + dst_grd.vgrid.z_w[0,:,:,1])
z_v_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,0] + dst_grd.vgrid.z_w[0,:,1:,0])
dst_ubar_north = np.zeros(dst_u_north.shape[1])
dst_ubar_south = np.zeros(dst_u_south.shape[1])
dst_ubar_east = np.zeros(dst_u_east.shape[1])
dst_ubar_west = np.zeros(dst_u_west.shape[1])
dst_vbar_north = np.zeros(dst_v_north.shape[1])
dst_vbar_south = np.zeros(dst_v_south.shape[1])
dst_vbar_east = np.zeros(dst_v_east.shape[1])
dst_vbar_west = np.zeros(dst_v_west.shape[1])
for i in range(dst_u_north.shape[1]):
dst_ubar_north[i] = (dst_u_north[:,i] * np.diff(z_u_north[:,i])).sum() / -z_u_north[0,i]
for i in range(dst_v_north.shape[1]):
dst_vbar_north[i] = (dst_v_north[:,i] * np.diff(z_v_north[:,i])).sum() / -z_v_north[0,i]
for i in range(dst_u_south.shape[1]):
dst_ubar_south[i] = (dst_u_south[:,i] * np.diff(z_u_south[:,i])).sum() / -z_u_south[0,i]
for i in range(dst_v_south.shape[1]):
dst_vbar_south[i] = (dst_v_south[:,i] * np.diff(z_v_south[:,i])).sum() / -z_v_south[0,i]
for j in range(dst_u_east.shape[1]):
dst_ubar_east[j] = (dst_u_east[:,j] * np.diff(z_u_east[:,j])).sum() / -z_u_east[0,j]
dst_ubar_west[j] = (dst_u_west[:,j] * np.diff(z_u_west[:,j])).sum() / -z_u_west[0,j]
for j in range(dst_v_east.shape[1]):
dst_vbar_east[j] = (dst_v_east[:,j] * np.diff(z_v_east[:,j])).sum() / -z_v_east[0,j]
dst_vbar_west[j] = (dst_v_west[:,j] * np.diff(z_v_west[:,j])).sum() / -z_v_west[0,j]
#mask
dst_ubar_north = np.ma.masked_where(dst_grd.hgrid.mask_u[-1,:] == 0, dst_ubar_north)
dst_ubar_south = np.ma.masked_where(dst_grd.hgrid.mask_u[0,:] == 0, dst_ubar_south)
dst_ubar_east = np.ma.masked_where(dst_grd.hgrid.mask_u[:,-1] == 0, dst_ubar_east)
dst_ubar_west = np.ma.masked_where(dst_grd.hgrid.mask_u[:,0] == 0, dst_ubar_west)
dst_vbar_north = np.ma.masked_where(dst_grd.hgrid.mask_v[-1,:] == 0, dst_vbar_north)
dst_vbar_south = np.ma.masked_where(dst_grd.hgrid.mask_v[0,:] == 0, dst_vbar_south)
dst_vbar_east = np.ma.masked_where(dst_grd.hgrid.mask_v[:,-1] == 0, dst_vbar_east)
dst_vbar_west = np.ma.masked_where(dst_grd.hgrid.mask_v[:,0] == 0, dst_vbar_west)
# write data in destination file
print 'write data in destination file'
ncu.variables['ocean_time'][0] = time
ncu.variables['u_north'][0] = dst_u_north
ncu.variables['u_south'][0] = dst_u_south
ncu.variables['u_east'][0] = dst_u_east
ncu.variables['u_west'][0] = dst_u_west
ncu.variables['ubar_north'][0] = dst_ubar_north
ncu.variables['ubar_south'][0] = dst_ubar_south
ncu.variables['ubar_east'][0] = dst_ubar_east
ncu.variables['ubar_west'][0] = dst_ubar_west
ncv.variables['ocean_time'][0] = time
ncv.variables['v_north'][0] = dst_v_north
ncv.variables['v_south'][0] = dst_v_south
ncv.variables['v_east'][0] = dst_v_east
ncv.variables['v_west'][0] = dst_v_west
ncv.variables['vbar_north'][0] = dst_vbar_north
ncv.variables['vbar_south'][0] = dst_vbar_south
ncv.variables['vbar_east'][0] = dst_vbar_east
ncv.variables['vbar_west'][0] = dst_vbar_west
# close file
ncu.close()
ncv.close()
cdf.close()
| kshedstrom/pyroms | examples/Arctic_HYCOM/remap_bdry_uv.py | Python | bsd-3-clause | 16,837 | [
"NetCDF"
] | ddeed37a7635dbbb91cce51990a2a2b98627bb1f6f14e94e1e9dfd94a641315c |
# -*-coding:utf8-*-
from __future__ import print_function
"""
This file is part of SkyLab
Skylab is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# python packages
from copy import deepcopy
from itertools import product
# scipy-project imports
import numpy as np
import scipy.interpolate
from scipy.stats import norm
# local package imports
from .utils import kernel_func
# default values for parameters
# model-parameters
_gamma_params = dict(gamma=[2., (1., 4.)])
# histogramming
_sinDec_bins = 25
_sinDec_range = None
_2dim_bins = 25
# spline
_ratio_perc = 99.
_1dim_order = 2
_2dim_order = 2
_precision = 0.1
_par_val = np.nan
_parab_cache = np.zeros((0, ), dtype=[("S1", np.float), ("a", np.float),
("b", np.float)])
def set_pars(self, **kwargs):
r"""Constructor with basic settings needed in all LLH classes.
"""
# Set all attributes passed, warn if private or not known.
for attr, value in kwargs.iteritems():
if not hasattr(self, attr):
print(">>>> {0:s} does not have attribute '{1:s}', "
"skipping...".format(self.__repr__(), attr))
continue
if attr.startswith("_"):
print(">>>> _{0:s} should be considered private and "
"for internal use only!".format(attr))
setattr(self, attr, value)
class NullModel(object):
r"""Base class of models for likelihood fitting, this defines every core
class of the likelihood fitting that is needed in the point source
calculation without implementing any functionality. Use this class as
starting point for a unbinned point source likelihood model
"""
def __init__(self, *args, **kwargs):
raise NotImplementedError(
"NullModel only to be used as abstract superclass".format(
self.__repr__()))
@property
def params(self):
return self._params
@params.setter
def params(self, val):
self._params = val
return
@params.deleter
def params(self):
self._params = dict()
return
def __raise__(self, *args, **kwargs):
raise NotImplementedError("Implemented as abstract in {0:s}...".format(
self.__repr__()))
def __call__(self, *args, **kwargs):
r"""Calling the class-object will set it up for use of the other
functions, i.e., creating splines out of data, etc.
"""
self.__raise__()
def background(self, *args, **kwargs):
r"""Calculation of the background probability *B* in the point source
likelihood, mainly a spatial dependent term.
"""
self.__raise__()
def signal(self, *args, **kwargs):
r"""Calculation of the signal probability *S* in the point source
likelihood, mainly a spatial dependent term.
"""
self.__raise__()
def reset(self, *args, **kwargs):
r"""Resetting the llh-model to delete possible cached values
"""
self.__raise__()
def weight(self, *args, **kwargs):
r"""Additional weights calculated for each event, commonly used to
implement energy weights in the point source likelihood.
It differs from signal and background distributions that is (to
first approximation) does not depend on the source position.
"""
self.__raise__()
class ClassicLLH(NullModel):
r"""Classic likelihood model for point source searches, only using spatial
information of each event
"""
sinDec_bins = _sinDec_bins
sinDec_range = _sinDec_range
_order = _1dim_order
_bckg_spline = np.nan
_gamma = 2.
def __init__(self, *args, **kwargs):
r"""Constructor of ClassicLLH. Set all configurations here.
"""
self.params = kwargs.pop("params", dict())
# Set all attributes passed to class
set_pars(self, **kwargs)
return
def __call__(self, exp, mc, livetime, **kwargs):
r"""Use experimental data to create one dimensional spline of
declination information for background information.
Parameters
-----------
exp : structured array
Experimental data with all neccessary fields, i.e., sinDec for
ClassicLLH
mc : structured array
Same as exp for Monte Carlo plus true information.
livetime : float
Livetime to scale the Monte Carlo with
"""
hist, bins = np.histogram(exp["sinDec"], density=True,
bins=self.sinDec_bins,
range=self.sinDec_range)
# background spline
# overwrite range and bins to actual bin edges
self.sinDec_bins = bins
self.sinDec_range = (bins[0], bins[-1])
if np.any(hist <= 0.):
bmids = (self.sinDec_bins[1:] + self.sinDec_bins[:-1]) / 2.
estr = ("Declination hist bins empty, this must not happen! "
+ "Empty bins: {0}".format(bmids[hist <= 0.]))
raise ValueError(estr)
elif np.any((exp["sinDec"] < bins[0]) | (exp["sinDec"] > bins[-1])):
raise ValueError("Data outside of declination bins!")
self._bckg_spline = scipy.interpolate.InterpolatedUnivariateSpline(
(bins[1:] + bins[:-1]) / 2.,
np.log(hist), k=self.order)
# eff. Area
self._effA(mc, livetime, **kwargs)
return
def __str__(self):
r"""String representation of ClassicLLH.
"""
out_str = "{0:s}\n".format(self.__repr__())
out_str += 67*"~"+"\n"
out_str += "Spatial background hist:\n"
out_str += "\tSinDec bins : {0:3d}\n".format(len(self.sinDec_bins)-1)
out_str += "\tSinDec range : {0:-4.2f} to {1:-4.2f}\n".format(
*self.sinDec_range)
out_str += 67*"~"+"\n"
return out_str
def _effA(self, mc, livetime, **kwargs):
r"""Build splines for effective Area given a fixed spectral
index *gamma*.
"""
# powerlaw weights
w = mc["ow"] * mc["trueE"]**(-self.gamma) * livetime * 86400.
# get pdf of event distribution
h, bins = np.histogram(np.sin(mc["trueDec"]), weights=w,
bins=self.sinDec_bins, density=True)
# normalize by solid angle
h /= np.diff(self.sinDec_bins)
# multiply histogram by event sum for event densitiy
h *= w.sum()
self._spl_effA = scipy.interpolate.InterpolatedUnivariateSpline(
(bins[1:] + bins[:-1]) / 2., np.log(h), k=self.order)
return
@property
def bckg_spline(self):
return self._bckg_spline
@bckg_spline.setter
def bckg_spline(self, val):
if not hasattr(val, "__call__"):
print(">>> WARNING: {0} is not callable! Not "
"spline-ish".format(val))
return
self._bckg_spline = val
return
@property
def gamma(self):
return self._gamma
@gamma.setter
def gamma(self, val):
self._gamma = float(val)
return
@property
def order(self):
return self._order
@order.setter
def order(self, val):
self.order = int(val)
return
def background(self, ev):
r"""Spatial background distribution.
For IceCube is only declination dependent, in a more general scenario,
it is dependent on zenith and
azimuth, e.g. in ANTARES, KM3NET, or using time dependent information.
Parameters
-----------
ev : structured array
Event array, importand information *sinDec* for this calculation
Returns
--------
P : array-like
spatial background probability for each event to be found
at *sinDec*
"""
return 1. / 2. / np.pi * np.exp(self.bckg_spline(ev["sinDec"]))
def effA(self, dec, **params):
r"""Calculate integrated effective Area at declination for distributing
source events among different samples.
"""
if (np.sin(dec) < self.sinDec_bins[0]
or np.sin(dec) > self.sinDec_bins[-1]):
return 0., None
return self._spl_effA(np.sin(dec)), None
def reset(self):
r"""Classic likelihood does only depend on spatial part, needs no
caching
"""
return
def signal(self, src_ra, src_dec, ev):
r"""Spatial distance between source position and events
Signal is assumed to cluster around source position.
The distribution is assumed to be well approximated by a gaussian
locally.
Parameters
-----------
ev : structured array
Event array, import information: sinDec, ra, sigma
Returns
--------
P : array-like
Spatial signal probability for each event
"""
cos_ev = np.sqrt(1. - ev["sinDec"]**2)
cosDist = (
np.cos(src_ra - ev["ra"]) * np.cos(src_dec) * cos_ev +
np.sin(src_dec) * ev["sinDec"]
)
# handle possible floating precision errors
cosDist[np.isclose(cosDist, 1.) & (cosDist > 1)] = 1.
dist = np.arccos(cosDist)
return (1./2./np.pi/ev["sigma"]**2
* np.exp(-dist**2 / 2. / ev["sigma"]**2))
def weight(self, ev, **params):
r"""For classicLLH, no weighting of events
"""
return np.ones(len(ev)), None
class UniformLLH(ClassicLLH):
r"""Spatial LLH class that assumes uniform distribution.
"""
def __call__(self, *args, **kwargs):
return
def background(self, ev):
return np.full(len(ev), 1. / 4. / np.pi)
class WeightLLH(ClassicLLH):
r"""Likelihood class supporting weights for the calculation.
The weights are calculated using N observables for exp. data and Monte
Carlo.
Abstract class, not incorporating a weighting scheme for Monte Carlo.
"""
_precision = _precision
_g1 = _par_val
_w_cache = _parab_cache
def __init__(self, params, pars, bins, *args, **kwargs):
r"""Constructor
Parameters
-----------
params : dict
List of fit parameters. Each entry is a tuple out of
(seed, [lower bound, upper bound])
pars : list
Parameter names to use for histogram, without sinDec, which is
added as last normalisation parameter
bins : int, ndarray
Binning for each parameter
Other Parameters
-----------------
range : ndarray
Bin ranges for each parameter
kernel : ndarray, int, float
Smoothing filter defining the kernel for smoothing. Smoothing done
solely for dimensions that are not normalised. A ndarray specifies
the filter directly, an int is used for a flat kernel with size
of *filter* in direction of both sides, a float uses a normal
distributed kernel with approximately one standard deviation per
bin.
"""
params = params
self.hist_pars = pars
self._ndim_bins = bins
self._ndim_range = kwargs.pop("range", None)
self._ndim_norm = kwargs.pop("normed", 0)
# define kernel
kernel = kwargs.pop("kernel", 0)
if np.all(np.asarray(kernel) == 0):
# No smoothing
self._XX = None
else:
if isinstance(kernel, (list, np.ndarray)):
kernel_arr = np.asarray(kernel)
assert(np.all(kernel_arr >= 0))
elif isinstance(kernel, int):
assert(kernel > 0)
kernel_arr = np.ones(2 * kernel + 1, dtype=np.float)
elif isinstance(kernel, float):
assert(kernel >= 1)
val = 1.6635
r = np.linspace(-val, val, 2 * int(kernel) + 1)
kernel_arr = norm.pdf(r)
else:
raise ValueError(
"Kernel has to be positive int / float or array")
XX = [kernel_arr for i in range(
len(self.hist_pars) - self._ndim_norm)]
XX.extend([[1] for i in range(self._ndim_norm)])
XX = np.meshgrid(*XX)
self._XX = np.product(XX, axis=0).T
super(WeightLLH, self).__init__(*args, params=params, **kwargs)
self._w_spline_dict = dict()
return
def __call__(self, exp, mc, livetime):
r"""In addition to *classicLLH.__call__(),
splines for energy-declination are created as well.
"""
self._setup(exp)
# calclate splines for all values of splines
par_grid = dict()
for par, val in self.params.iteritems():
# create grid of all values that could come up due to boundaries
# use one more grid point below and above for gradient calculation
low, high = val[1]
grid = np.arange(low - self._precision,
high + 2. * self._precision,
self._precision)
par_grid[par] = grid
pars = par_grid.keys()
for tup in product(*par_grid.values()):
# call spline function to cache the spline
self._ratio_spline(mc, **dict([(p_i, self._around(t_i))
for p_i, t_i in zip(pars, tup)]))
# create spatial splines of classic LLH class and eff. Area
super(WeightLLH, self).__call__(exp, mc, livetime, **par_grid)
return
def __str__(self):
r"""String representation
"""
out_str = super(WeightLLH, self).__str__()
out_str += "Weighting hist:\n"
for p, b, r in zip(self.hist_pars, self._ndim_bins, self._ndim_range):
out_str += "\t{0:11s} : {1:3d}\n".format(p + " bins", len(b)-1)
out_str += "\t{0:11s} : {1:-4.2f} to {2:-4.2f}\n".format(
p + " range", *r)
out_str += "\tPrecision : {0:4.2f}\n".format(self._precision)
out_str += 67*"~"+"\n"
return out_str
def _around(self, value):
r"""Round a value to a precision defined in the class.
Parameters
-----------
value : array-like
Values to round to precision.
Returns
--------
round : array-like
Rounded values.
"""
return np.around(float(value) / self._precision) * self._precision
def _get_weights(self, **params):
r"""Calculate weights using the given parameters.
Parameters
-----------
params : dict
Dictionary containing the parameter values for the weighting.
Returns
--------
weights : array-like
Weights for each event
"""
raise NotImplementedError("Weigthing not specified, using subclass")
def _hist(self, arr, weights=None):
r"""Create histogram of data so that it is correctly normalized.
The edges of the histogram are copied so that the spline is defined for
the entire data range.
"""
h, binedges = np.histogramdd(arr, bins=self._ndim_bins,
range=self._ndim_range,
weights=weights, normed=True)
if self._ndim_norm > 0:
norms = np.sum(h, axis=tuple(range(h.ndim - self._ndim_norm)))
norms[norms == 0] = 1.
h /= norms
return h, binedges
def _ratio_spline(self, mc, **params):
r"""Create the ratio of signal over background probabilities. With same
binning, the bin hypervolume cancels out, ensuring correct
normalisation of the histograms.
Parameters
-----------
mc : recarray
Monte Carlo events to use for spline creation
params : dict
(Physics) parameters used for signal pdf calculation.
Returns
--------
spline : scipy.interpolate.RectBivariateSpline
Spline for parameter values *params*
"""
mcvars = [mc[p] if not p == "sinDec" else np.sin(mc["trueDec"])
for p in self.hist_pars]
# create MC histogram
wSh, wSb = self._hist(mcvars, weights=self._get_weights(mc, **params))
wSh = kernel_func(wSh, self._XX)
wSd = wSh > 0.
# calculate ratio
ratio = np.ones_like(self._wB_hist, dtype=np.float)
ratio[wSd & self._wB_domain] = (wSh[wSd & self._wB_domain]
/ self._wB_hist[wSd & self._wB_domain])
# values outside of the exp domain, but inside the MC one are mapped to
# the most signal-like value
min_ratio = np.percentile(ratio[ratio > 1.], _ratio_perc)
np.copyto(ratio, min_ratio, where=wSd & ~self._wB_domain)
binmids = [(wSb_i[1:] + wSb_i[:-1]) / 2. for wSb_i in wSb]
binmids[-1][[0, -1]] = wSb_i[0], wSb_i[-1]
binmids = tuple(binmids)
spline = scipy.interpolate.RegularGridInterpolator(
binmids, np.log(ratio),
method="linear",
bounds_error=False,
fill_value=0.)
self._w_spline_dict[tuple(params.items())] = spline
return spline
def _setup(self, exp):
r"""Set up everything for weight calculation.
"""
# set up weights for background distribution, reset all cached values
self._w_spline_dict = dict()
expvars = [exp[p] for p in self.hist_pars]
self._wB_hist, self._wB_bins = self._hist(expvars)
self._wB_hist = kernel_func(self._wB_hist, self._XX)
self._wB_domain = self._wB_hist > 0
# overwrite bins
self._ndim_bins = self._wB_bins
self._ndim_range = tuple([(wB_i[0], wB_i[-1])
for wB_i in self._wB_bins])
return
def _spline_eval(self, spline, ev):
r"""Evaluate spline on coordinates using the important parameters.
"""
return spline(np.vstack([ev[p] for p in self.hist_pars]).T)
@property
def hist_pars(self):
return self._hist_pars
@hist_pars.setter
def hist_pars(self, val):
self._hist_pars = list(val)
return
def reset(self):
r"""Energy weights are cached, reset all cached values.
"""
self._w_cache = _parab_cache
return
def weight(self, ev, **params):
r"""Evaluate spline for given parameters.
Parameters
-----------
ev : structured array
Events to be evaluated
params : dict
Parameters for evaluation
Returns
--------
val : array-like (N), N events
Function value.
grad : array-like (N, M), N events in M parameter dimensions
Gradients at function value.
"""
# get params
gamma = params["gamma"]
# evaluate on finite gridpoints in spectral index gamma
g1 = self._around(gamma)
dg = self._precision
# check whether the grid point of evaluation has changed
if (np.isfinite(self._g1)
and g1 == self._g1
and len(ev) == len(self._w_cache)):
S1 = self._w_cache["S1"]
a = self._w_cache["a"]
b = self._w_cache["b"]
else:
# evaluate neighbouring gridpoints and parametrize a parabola
g0 = self._around(g1 - dg)
g2 = self._around(g1 + dg)
S0 = self._spline_eval(self._w_spline_dict[(("gamma", g0), )], ev)
S1 = self._spline_eval(self._w_spline_dict[(("gamma", g1), )], ev)
S2 = self._spline_eval(self._w_spline_dict[(("gamma", g2), )], ev)
a = (S0 - 2. * S1 + S2) / (2. * dg**2)
b = (S2 - S0) / (2. * dg)
# cache values
self._g1 = g1
self._w_cache = np.zeros((len(ev),),
dtype=[("S1", np.float), ("a", np.float),
("b", np.float)])
self._w_cache["S1"] = S1
self._w_cache["a"] = a
self._w_cache["b"] = b
# calculate value at the parabola
val = np.exp(a * (gamma - g1)**2 + b * (gamma - g1) + S1)
grad = val * (2. * a * (gamma - g1) + b)
return val, np.atleast_2d(grad)
class PowerLawLLH(WeightLLH):
r"""Weighted LLH class assuming unbroken power-law spectra for weighting.
Optional Parameters
--------------------
seed : float
Seed for gamma parameter
bonds : ndarray (len 2)
Bounds for minimisation
"""
def __init__(self, *args, **kwargs):
params = dict(gamma=(
kwargs.pop("seed", _gamma_params["gamma"][0]),
deepcopy(kwargs.pop("bounds", deepcopy(_gamma_params["gamma"][1])))
))
super(PowerLawLLH, self).__init__(params, *args, **kwargs)
return
def _effA(self, mc, livetime, **pars):
r"""Calculate two dimensional spline of effective Area versus
declination and spectral index for Monte Carlo.
"""
gamma_vals = pars["gamma"]
x = np.sin(mc["trueDec"])
hist = np.vstack([np.histogram(
x, weights=self._get_weights(mc, gamma=gm) * livetime * 86400.,
bins=self.sinDec_bins)[0] for gm in gamma_vals]).T
# normalize bins by their binvolume, one dimension is the parameter
# with width of *precision*
bin_vol = np.diff(self.sinDec_bins)
hist /= bin_vol[:, np.newaxis] * np.full_like(
gamma_vals, self._precision)
self._spl_effA = scipy.interpolate.RectBivariateSpline(
(self.sinDec_bins[1:] + self.sinDec_bins[:-1]), gamma_vals,
np.log(hist), kx=2, ky=2, s=0)
return
@staticmethod
def _get_weights(mc, **params):
r"""Calculate weights using the given parameters.
Parameters
-----------
params : dict
Dictionary containing the parameter values for the weighting.
Returns
--------
weights : array-like
Weights for each event
"""
return mc["ow"] * mc["trueE"]**(-params["gamma"])
def effA(self, dec, **params):
r"""Evaluate effective Area at declination and spectral index.
Parameters
-----------
dec : float
Declination.
gamma : float
Spectral index.
Returns
--------
effA : float
Effective area at given point(s).
grad_effA : float
Gradient at given point(s).
"""
if (np.sin(dec) < self.sinDec_bins[0]
or np.sin(dec) > self.sinDec_bins[-1]):
return 0., None
gamma = params["gamma"]
val = np.exp(self._spl_effA(np.sin(dec), gamma, grid=False, dy=0.))
grad = val * self._spl_effA(np.sin(dec), gamma, grid=False, dy=1.)
return val, dict(gamma=grad)
class EnergyLLH(PowerLawLLH):
r"""Likelihood using Energy Proxy and declination, where declination is
used for normalisation to account for changing energy distributions.
"""
def __init__(self, twodim_bins=_2dim_bins, twodim_range=None,
**kwargs):
r"""Constructor
"""
super(EnergyLLH, self).__init__(["logE", "sinDec"],
twodim_bins, range=twodim_range,
normed=1,
**kwargs)
return
class EnergyDistLLH(PowerLawLLH):
r"""Likelihood using Energy Proxy and starting distance for evaluation.
Declination is not used for normalisation assuming that the energy does not
change rapidly with declination.
"""
def __init__(self, twodim_bins=_2dim_bins, twodim_range=None,
**kwargs):
r"""Constructor
"""
super(EnergyDistLLH, self).__init__(["logE", "dist"],
twodim_bins, range=twodim_range,
**kwargs)
return
class EnergyLLHfixed(EnergyLLH):
r"""Energy Likelihood that uses external data to create the splines, and
splines are not evaluated using the data given by call method.
"""
def __init__(self, exp, mc, livetime, **kwargs):
r"""Constructor
"""
# call constructor of super-class, settings are set.
super(EnergyLLHfixed, self).__init__(**kwargs)
# do the call already
super(EnergyLLHfixed, self).__call__(exp, mc, livetime)
return
def __call__(self, exp, mc, livetime):
r"""Call function not used here
"""
print("EnergyLLH with FIXED splines used here, call has no effect")
return
| coenders/skylab | skylab/ps_model.py | Python | gpl-3.0 | 25,950 | [
"Gaussian"
] | d556d3b1b089b4da280fd96e02508d0fe45ae4e6fbbf62ba1667bdeb79d7f062 |
"""
CBMPy: fluxmodules decomposiiton test module
This module only exists for testing purposes
=====================
PySCeS Constraint Based Modelling (http://cbmpy.sourceforge.net)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Author: Arne C. Reimers
Contact email: arne.c.reimers@gmail.com
"""
# preparing for Python 3 port
from __future__ import division, print_function
from __future__ import absolute_import
#from __future__ import unicode_literals
import unittest
import os
from cbmpy import CBRead
from . import matroid
from .decomposition import Decomposition, Vertex
from .sparserationals import Matrix
class TestDecomposition(unittest.TestCase):
def setUp(self):
""" create test matroid """
mat = Matrix([[-2, -1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 0, -1, 0, 0, 0, 0, 0],
[0, 1, -1, 1, -1, -1, 0, 0, 0],
[1, 0, 0, 0, 1, 0, -1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, -1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, -1]])
elems = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.matroid = matroid.fromMatrix(mat, elems)
self.decomp = Decomposition(self.matroid)
def tearDown(self):
pass
def testZeroModules(self):
self.decomp.splitZeroModules(self.decomp.root)
self.assertEqual(len(self.decomp.root.edges), 2)
for e in self.decomp.root.edges.values():
self.assertEqual(len(e), 0)
for e in self.decomp.root.edges.keys():
self.assertIsInstance(e, Vertex)
self.assertEqual(len(e.edges), len(e.matroid.elems) + 1)
if len(e.matroid.elems) == 2:
self.assertSetEqual(set(e.matroid.elems), {8, 9})
else:
self.assertSetEqual(set(e.matroid.elems), {1, 2, 3, 4, 5, 6, 7})
def testOneModules(self):
for v in self.decomp.listNonLeaves():
self.decomp.splitOneModules(v)
for v in self.decomp.listNonLeaves():
print(v)
print(v.edges)
print("")
self.assertTrue(self.decomp.isParallelSeries())
for v in self.decomp.listNonLeaves():
for (w, interface) in v.edges.items():
sep = w.getLeaves(v)
print(sep)
conn = self.matroid.connectivity(sep)
self.assertLessEqual(conn, 1)
self.assertEqual(len(interface), conn)
class TestDecomposition2(unittest.TestCase):
def setUp(self):
cDir = os.path.dirname(__file__)
model_dir = os.path.join(cDir, '../models')
model_file = "toy_model_maarleveld2014.l3.xml"
self.cmod = CBRead.readSBML3FBC(model_file, model_dir)
matrix = Matrix()
labels, _ = matrix.addMetabolicNetwork(self.cmod)
self.matroid = matroid.fromMatrix(matrix, labels)
print(self.matroid.basis)
print(self.matroid.nbasis)
print(self.matroid.rep.toNumpy())
def tearDown(self):
pass
def testPoolmanDirectRows(self):
decomp = Decomposition(self.matroid)
sim = self.matroid.similarityMatrix("rows")
# print("rows:")
# print(sim.matrix)
# we expect coparallel elements to have similarity 1
coparallel = self.matroid.coparallel()
for group in coparallel:
for e in group:
for f in group:
if e != f:
ei = sim.elems.index(e)
fi = sim.elems.index(f)
self.assertAlmostEqual(sim[ei, fi], 1,
msg="at %s(%d) %s(%d)" % (e, ei, f, fi))
decomp.poolmanMethod(decomp.root, sim)
# for v in decomp.listNonLeaves():
# print(v)
# print(v.edges)
# print("")
write(decomp.makeGraphViz(), "poolman_direct_rows.gv")
def testPoolmanDirectCols(self):
decomp = Decomposition(self.matroid)
sim = self.matroid.similarityMatrix("cols")
# print("cols:")
# print(sim.matrix)
# we expect parallel elements to have similarity 1
parallel = self.matroid.parallel()
for group in parallel:
for e in group:
for f in group:
if e != f:
ei = sim.elems.index(e)
fi = sim.elems.index(f)
self.assertAlmostEqual(sim[ei, fi], 1,
msg="at %s(%d) %s(%d)" % (e, ei, f, fi))
decomp.poolmanMethod(decomp.root, sim)
# for v in decomp.listNonLeaves():
# print(v)
# print(v.edges)
# print("")
write(decomp.makeGraphViz(), "poolman_direct_cols.gv")
def testPoolmanDirectDirect(self):
decomp = Decomposition(self.matroid)
sim = self.matroid.similarityMatrix("direct")
# print("cols:")
# print(sim.matrix)
# we expect parallel elements to have similarity 1
parallel = self.matroid.parallel()
for group in parallel:
for e in group:
for f in group:
if e != f:
ei = sim.elems.index(e)
fi = sim.elems.index(f)
self.assertAlmostEqual(sim[ei, fi], 1,
msg="at %s(%d) %s(%d)" % (e, ei, f, fi))
decomp.poolmanMethod(decomp.root, sim)
# for v in decomp.listNonLeaves():
# print(v)
# print(v.edges)
# print("")
write(decomp.makeGraphViz(), "poolman_direct_direct.gv")
def testPoolmanDirectCombinded(self):
decomp = Decomposition(self.matroid)
sim = self.matroid.similarityMatrix("combined")
# print("combined:")
# print(sim.matrix)
# we expect coparallel elements to have similarity 1
coparallel = self.matroid.coparallel()
for group in coparallel:
for e in group:
for f in group:
if e != f:
ei = sim.elems.index(e)
fi = sim.elems.index(f)
self.assertAlmostEqual(sim[ei, fi], 1,
msg="at %s(%d) %s(%d)" % (e, ei, f, fi))
# we expect parallel elements to have similarity 1
parallel = self.matroid.parallel()
for group in parallel:
for e in group:
for f in group:
if e != f:
ei = sim.elems.index(e)
fi = sim.elems.index(f)
self.assertAlmostEqual(sim[ei, fi], 1,
msg="at %s(%d) %s(%d)" % (e, ei, f, fi))
decomp.poolmanMethod(decomp.root, sim)
# for v in decomp.listNonLeaves():
# print(v)
# print(v.edges)
# print("")
write(decomp.makeGraphViz(), "poolman_direct_combined.gv")
def testOneModules(self):
decomp = Decomposition(self.matroid)
decomp.splitOneModules(decomp.root)
write(decomp.makeGraphViz(), "one_modules.gv")
def testPoolmanRows(self):
decomp = Decomposition(self.matroid)
decomp.splitOneModules(decomp.root)
sim = decomp.root.matroid.similarityMatrix("rows")
decomp.poolmanMethod(decomp.root, sim)
write(decomp.makeGraphViz(), "poolman_rows.gv")
# @unittest.skip("takes quite long")
class TestDecompositionITM686(unittest.TestCase):
def setUp(self):
cDir = os.path.dirname(__file__)
model_dir = os.path.join(cDir, '../models')
model_file = "iTM686_sbml3.xml"
self.cmod = CBRead.readSBML3FBC(model_file, model_dir)
matrix = Matrix()
labels, _ = matrix.addMetabolicNetwork(self.cmod)
self.matroid = matroid.fromMatrix(matrix, labels)
def tearDown(self):
pass
@unittest.skip("takes quite long")
def testPoolmanRows(self):
decomp = Decomposition(self.matroid)
decomp.splitSimple()
for v in decomp.listNonLeaves():
if not v.isSimple():
sim = v.matroid.similarityMatrix("rows")
decomp.poolmanMethod(v, sim)
write(decomp.makeGraphViz(), "iTM686_poolman_rows.gv")
self.assertTrue(decomp.verifyFullyBranched())
self.assertTrue(decomp.verifyEdgeWidth())
print("width rows = %d" % decomp.getWidth())
@unittest.skip("takes quite long")
def testPoolmanCols(self):
decomp = Decomposition(self.matroid)
decomp.splitSimple()
for v in decomp.listNonLeaves():
if not v.isSimple():
sim = v.matroid.similarityMatrix("cols")
decomp.poolmanMethod(v, sim)
write(decomp.makeGraphViz(), "iTM686_poolman_cols.gv")
# self.assertTrue(decomp.verifyFullyBranched())
# self.assertTrue(decomp.verifyEdgeWidth())
print("width cols = %d" % decomp.getWidth())
# @unittest.skip("takes quite long")
def testPoolmanCombined(self):
decomp = Decomposition(self.matroid)
decomp.splitSimple()
for v in decomp.listNonLeaves():
if not v.isSimple():
sim = v.matroid.similarityMatrix("combined")
decomp.poolmanMethod(v, sim)
write(decomp.makeGraphViz(), "iTM686_poolman_combined.gv")
# self.assertTrue(decomp.verifyFullyBranched())
# self.assertTrue(decomp.verifyEdgeWidth())
print("width combined = %d" % decomp.getWidth())
@unittest.skip("takes quite long")
def testPoolmanDirect(self):
decomp = Decomposition(self.matroid)
decomp.splitSimple()
sim = decomp.matroid.similarityMatrix("direct")
for v in decomp.listNonLeaves():
if not v.isSimple():
decomp.poolmanMethod(v, sim)
write(decomp.makeGraphViz(), "iTM686_poolman_direct.gv")
# self.assertTrue(decomp.verifyFullyBranched())
# self.assertTrue(decomp.verifyEdgeWidth())
print("width direct = %d" % decomp.getWidth())
class TestDecompositionEcore(unittest.TestCase):
def setUp(self):
cDir = os.path.dirname(__file__)
model_dir = os.path.join(cDir, '../models')
model_file = "ecoli_core_COBRA.xml"
self.cmod = CBRead.readCOBRASBML(model_file, model_dir)
matrix = Matrix()
labels, _ = matrix.addMetabolicNetwork(self.cmod)
self.matroid = matroid.fromMatrix(matrix, labels)
def tearDown(self):
pass
def testPoolmanRows(self):
decomp = Decomposition(self.matroid)
decomp.splitSimple()
for v in decomp.listNonLeaves():
if not v.isSimple():
sim = v.matroid.similarityMatrix("rows")
decomp.poolmanMethod(v, sim)
write(decomp.makeGraphViz(cmod=self.cmod), "ecoli_core_poolman_rows.gv")
self.assertTrue(decomp.verifyFullyBranched())
self.assertTrue(decomp.verifyEdgeWidth())
print("width rows = %d" % decomp.getWidth())
def testPoolmanCols(self):
decomp = Decomposition(self.matroid)
decomp.splitSimple()
for v in decomp.listNonLeaves():
if not v.isSimple():
sim = v.matroid.similarityMatrix("cols")
decomp.poolmanMethod(v, sim)
write(decomp.makeGraphViz(cmod=self.cmod), "ecoli_core_poolman_cols.gv")
self.assertTrue(decomp.verifyFullyBranched())
self.assertTrue(decomp.verifyEdgeWidth())
print("width cols = %d" % decomp.getWidth())
def testPoolmanCombined(self):
decomp = Decomposition(self.matroid)
decomp.splitSimple()
for v in decomp.listNonLeaves():
if not v.isSimple():
sim = v.matroid.similarityMatrix("combined")
decomp.poolmanMethod(v, sim)
write(decomp.makeGraphViz(cmod=self.cmod), "ecoli_core_poolman_combined.gv")
self.assertTrue(decomp.verifyFullyBranched())
self.assertTrue(decomp.verifyEdgeWidth())
print("width combined = %d" % decomp.getWidth())
def testPoolmanDirect(self):
decomp = Decomposition(self.matroid)
decomp.splitSimple()
sim = decomp.matroid.similarityMatrix("direct")
for v in decomp.listNonLeaves():
if not v.isSimple():
decomp.poolmanMethod(v, sim)
write(decomp.makeGraphViz(cmod=self.cmod), "ecoli_core_poolman_direct.gv")
self.assertTrue(decomp.verifyFullyBranched())
self.assertTrue(decomp.verifyEdgeWidth())
print("width direct = %d" % decomp.getWidth())
def write(result, filename):
cDir = os.path.dirname(__file__)
result_dir = os.path.join(cDir, '../../results')
result_file = os.path.join(result_dir, filename)
fo = open(result_file, mode='w')
fo.write(result)
fo.close()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| SystemsBioinformatics/cbmpy | cbmpy/fluxmodules/TestDecomposition.py | Python | gpl-3.0 | 13,673 | [
"PySCeS"
] | f833dfc37250caeb34afd07dc31e75b91facf53b9022d2b29bd043d679f5e75c |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to interface with the Materials Project REST
API v2 to enable the creation of data structures and pymatgen objects using
Materials Project data.
To make use of the Materials API, you need to be a registered user of the
Materials Project, and obtain an API key by going to your dashboard at
https://www.materialsproject.org/dashboard.
"""
import itertools
import json
import logging
import platform
import re
import sys
import warnings
from collections import defaultdict
from enum import Enum, unique
from time import sleep
import requests
from monty.json import MontyDecoder, MontyEncoder
from monty.serialization import dumpfn
from pymatgen.core import SETTINGS, SETTINGS_FILE, yaml
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.surface import get_symmetrically_equivalent_miller_indices
from pymatgen.entries.computed_entries import ComputedEntry, ComputedStructureEntry
from pymatgen.entries.exp_entries import ExpEntry
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.sequence import PBar, get_chunks
from pymatgen.core import __version__ as PMG_VERSION
logger = logging.getLogger(__name__)
@unique
class TaskType(Enum):
"""task types available in MP"""
GGA_OPT = "GGA Structure Optimization"
GGAU_OPT = "GGA+U Structure Optimization"
SCAN_OPT = "SCAN Structure Optimization"
GGA_LINE = "GGA NSCF Line"
GGAU_LINE = "GGA+U NSCF Line"
GGA_UNIFORM = "GGA NSCF Uniform"
GGAU_UNIFORM = "GGA+U NSCF Uniform"
GGA_STATIC = "GGA Static"
GGAU_STATIC = "GGA+U Static"
GGA_STATIC_DIEL = "GGA Static Dielectric"
GGAU_STATIC_DIEL = "GGA+U Static Dielectric"
GGA_DEF = "GGA Deformation"
GGAU_DEF = "GGA+U Deformation"
LDA_STATIC_DIEL = "LDA Static Dielectric"
class MPRester:
"""
A class to conveniently interface with the Materials Project REST
interface. The recommended way to use MPRester is with the "with" context
manager to ensure that sessions are properly closed after usage::
with MPRester("API_KEY") as m:
do_something
MPRester uses the "requests" package, which provides for HTTP connection
pooling. All connections are made via https for security.
For more advanced uses of the Materials API, please consult the API
documentation at https://github.com/materialsproject/mapidoc.
"""
supported_properties = (
"energy",
"energy_per_atom",
"volume",
"formation_energy_per_atom",
"nsites",
"unit_cell_formula",
"pretty_formula",
"is_hubbard",
"elements",
"nelements",
"e_above_hull",
"hubbards",
"is_compatible",
"spacegroup",
"task_ids",
"band_gap",
"density",
"icsd_id",
"icsd_ids",
"cif",
"total_magnetization",
"material_id",
"oxide_type",
"tags",
"elasticity",
)
supported_task_properties = (
"energy",
"energy_per_atom",
"volume",
"formation_energy_per_atom",
"nsites",
"unit_cell_formula",
"pretty_formula",
"is_hubbard",
"elements",
"nelements",
"e_above_hull",
"hubbards",
"is_compatible",
"spacegroup",
"band_gap",
"density",
"icsd_id",
"cif",
)
def __init__(
self,
api_key=None,
endpoint=None,
notify_db_version=True,
include_user_agent=True,
):
"""
Args:
api_key (str): A String API key for accessing the MaterialsProject
REST interface. Please obtain your API key at
https://www.materialsproject.org/dashboard. If this is None,
the code will check if there is a "PMG_MAPI_KEY" setting.
If so, it will use that environment variable. This makes
easier for heavy users to simply add this environment variable to
their setups and MPRester can then be called without any arguments.
endpoint (str): Url of endpoint to access the MaterialsProject REST
interface. Defaults to the standard Materials Project REST
address at "https://materialsproject.org/rest/v2", but
can be changed to other urls implementing a similar interface.
notify_db_version (bool): If True, the current MP database version will
be retrieved and logged locally in the ~/.pmgrc.yaml. If the database
version changes, you will be notified. The current database version is
also printed on instantiation. These local logs are not sent to
materialsproject.org and are not associated with your API key, so be
aware that a notification may not be presented if you run MPRester
from multiple computing environments.
include_user_agent (bool): If True, will include a user agent with the
HTTP request including information on pymatgen and system version
making the API request. This helps MP support pymatgen users, and
is similar to what most web browsers send with each page request.
Set to False to disable the user agent.
"""
if api_key is not None:
self.api_key = api_key
else:
self.api_key = SETTINGS.get("PMG_MAPI_KEY", "")
if endpoint is not None:
self.preamble = endpoint
else:
self.preamble = SETTINGS.get("PMG_MAPI_ENDPOINT", "https://materialsproject.org/rest/v2")
if self.preamble != "https://materialsproject.org/rest/v2":
warnings.warn("Non-default endpoint used: {}".format(self.preamble))
self.session = requests.Session()
self.session.headers = {"x-api-key": self.api_key}
if include_user_agent:
pymatgen_info = "pymatgen/" + PMG_VERSION
python_info = "Python/{}.{}.{}".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro
)
platform_info = "{}/{}".format(platform.system(), platform.release())
self.session.headers["user-agent"] = "{} ({} {})".format(pymatgen_info, python_info, platform_info)
if notify_db_version:
db_version = self.get_database_version()
logger.debug(f"Connection established to Materials Project database, version {db_version}.")
try:
with open(SETTINGS_FILE, "rt") as f:
d = yaml.safe_load(f)
except IOError:
d = {}
d = d if d else {}
if "MAPI_DB_VERSION" not in d:
d["MAPI_DB_VERSION"] = {"LOG": {}, "LAST_ACCESSED": None}
# store a log of what database versions are being connected to
if db_version not in d["MAPI_DB_VERSION"]["LOG"]:
d["MAPI_DB_VERSION"]["LOG"][db_version] = 1
else:
d["MAPI_DB_VERSION"]["LOG"][db_version] += 1
# alert user if db version changed
last_accessed = d["MAPI_DB_VERSION"]["LAST_ACCESSED"]
if last_accessed and last_accessed != db_version:
print(
f"This database version has changed from the database last accessed ({last_accessed}).\n"
f"Please see release notes on materialsproject.org for information about what has changed."
)
d["MAPI_DB_VERSION"]["LAST_ACCESSED"] = db_version
# write out new database log if possible
# bare except is not ideal (perhaps a PermissionError, etc.) but this is not critical
# and should be allowed to fail regardless of reason
try:
dumpfn(d, SETTINGS_FILE)
except Exception:
pass
def __enter__(self):
"""
Support for "with" context.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Support for "with" context.
"""
self.session.close()
def _make_request(self, sub_url, payload=None, method="GET", mp_decode=True):
response = None
url = self.preamble + sub_url
try:
if method == "POST":
response = self.session.post(url, data=payload, verify=True)
else:
response = self.session.get(url, params=payload, verify=True)
if response.status_code in [200, 400]:
if mp_decode:
data = json.loads(response.text, cls=MontyDecoder)
else:
data = json.loads(response.text)
if data["valid_response"]:
if data.get("warning"):
warnings.warn(data["warning"])
return data["response"]
raise MPRestError(data["error"])
raise MPRestError("REST query returned with error status code {}".format(response.status_code))
except Exception as ex:
msg = "{}. Content: {}".format(str(ex), response.content) if hasattr(response, "content") else str(ex)
raise MPRestError(msg)
def get_database_version(self):
"""
The Materials Project database is periodically updated and has a
database version associated with it. When the database is updated,
consolidated data (information about "a material") may and does
change, while calculation data about a specific calculation task
remains unchanged and available for querying via its task_id.
The database version is set as a date in the format YYYY-MM-DD,
where "-DD" may be optional. An additional numerical suffix
might be added if multiple releases happen on the same day.
Returns: database version as a string
"""
d = self._make_request("/api_check")
return d["version"]["db"]
def get_materials_id_from_task_id(self, task_id):
"""
Returns a new MP materials id from a task id (which can be
equivalent to an old materials id)
Args:
task_id (str): A task id.
Returns:
materials_id (str)
"""
return self._make_request("/materials/mid_from_tid/%s" % task_id)
def get_materials_id_references(self, material_id):
"""
Returns all references for a materials id.
Args:
material_id (str): A material id.
Returns:
BibTeX (str)
"""
return self._make_request("/materials/%s/refs" % material_id)
def get_data(self, chemsys_formula_id, data_type="vasp", prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
This is generally a call to
https://www.materialsproject.org/rest/v2/materials/vasp/<prop>.
See https://github.com/materialsproject/mapidoc for details.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
data_type (str): Type of data to return. Currently can either be
"vasp" or "exp".
prop (str): Property to be obtained. Should be one of the
MPRester.supported_task_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/materials/%s/%s" % (chemsys_formula_id, data_type)
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_materials_ids(self, chemsys_formula):
"""
Get all materials ids for a formula or chemsys.
Args:
chemsys_formula (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3).
Returns:
([str]) List of all materials ids.
"""
return self._make_request("/materials/%s/mids" % chemsys_formula, mp_decode=False)
def get_doc(self, materials_id):
"""
Get the entire data document for one materials id. Use this judiciously.
REST Endpoint: https://www.materialsproject.org/materials/<mp-id>/doc.
Args:
materials_id (str): E.g., mp-1143 for Al2O3
Returns:
Dict of json document of all data that is displayed on a materials
details page.
"""
return self._make_request("/materials/%s/doc" % materials_id, mp_decode=False)
def get_xas_data(self, material_id, absorbing_element):
"""
Get X-ray absorption spectroscopy data for absorbing element in the
structure corresponding to a material_id. Only X-ray Absorption Near Edge
Structure (XANES) for K-edge is supported.
REST Endpoint:
https://www.materialsproject.org/materials/<mp-id>/xas/<absorbing_element>.
Args:
material_id (str): E.g., mp-1143 for Al2O3
absorbing_element (str): The absorbing element in the corresponding
structure. E.g., Al in Al2O3
"""
element_list = self.get_data(material_id, prop="elements")[0]["elements"]
if absorbing_element not in element_list:
raise ValueError(
"{} element not contained in corresponding structure with "
"mp_id: {}".format(absorbing_element, material_id)
)
data = self._make_request(
"/materials/{}/xas/{}".format(material_id, absorbing_element),
mp_decode=False,
)
return data[0]
def get_task_data(self, chemsys_formula_id, prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Unlike the :func:`get_data`_, this method queries the task collection
for specific run information.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
prop (str): Property to be obtained. Should be one of the
MPRester.supported_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/tasks/%s" % chemsys_formula_id
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_structures(self, chemsys_formula_id, final=True):
"""
Get a list of Structures corresponding to a chemical system, formula,
or materials_id.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
Returns:
List of Structure objects.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(chemsys_formula_id, prop=prop)
return [d[prop] for d in data]
def find_structure(self, filename_or_structure):
"""
Finds matching structures on the Materials Project site.
Args:
filename_or_structure: filename or Structure object
Returns:
A list of matching materials project ids for structure.
Raises:
MPRestError
"""
try:
if isinstance(filename_or_structure, str):
s = Structure.from_file(filename_or_structure)
elif isinstance(filename_or_structure, Structure):
s = filename_or_structure
else:
raise MPRestError("Provide filename or Structure object.")
payload = {"structure": json.dumps(s.as_dict(), cls=MontyEncoder)}
response = self.session.post("{}/find_structure".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
return resp["response"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_entries(
self,
chemsys_formula_id_criteria,
compatible_only=True,
inc_structure=None,
property_data=None,
conventional_unit_cell=False,
sort_by_e_above_hull=False,
):
"""
Get a list of ComputedEntries or ComputedStructureEntries corresponding
to a chemical system, formula, or materials_id or full criteria.
Args:
chemsys_formula_id_criteria (str/dict): A chemical system
(e.g., Li-Fe-O), or formula (e.g., Fe2O3) or materials_id
(e.g., mp-1234) or full Mongo-style dict criteria.
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProject2020Compatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="initial",
ComputedStructureEntries with initial structures are returned.
Otherwise, ComputedStructureEntries with final structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
sort_by_e_above_hull (bool): Whether to sort the list of entries by
e_above_hull (will query e_above_hull as a property_data if True).
Returns:
List of ComputedEntry or ComputedStructureEntry objects.
"""
# TODO: This is a very hackish way of doing this. It should be fixed
# on the REST end.
params = [
"run_type",
"is_hubbard",
"pseudo_potential",
"hubbards",
"potcar_symbols",
"oxide_type",
]
props = ["energy", "unit_cell_formula", "task_id"] + params
if sort_by_e_above_hull:
if property_data and "e_above_hull" not in property_data:
property_data.append("e_above_hull")
elif not property_data:
property_data = ["e_above_hull"]
if property_data:
props += property_data
if inc_structure:
if inc_structure == "initial":
props.append("initial_structure")
else:
props.append("structure")
if not isinstance(chemsys_formula_id_criteria, dict):
criteria = MPRester.parse_criteria(chemsys_formula_id_criteria)
else:
criteria = chemsys_formula_id_criteria
data = self.query(criteria, props)
entries = []
for d in data:
d["potcar_symbols"] = [
"%s %s" % (d["pseudo_potential"]["functional"], l) for l in d["pseudo_potential"]["labels"]
]
data = {"oxide_type": d["oxide_type"]}
if property_data:
data.update({k: d[k] for k in property_data})
if not inc_structure:
e = ComputedEntry(
d["unit_cell_formula"],
d["energy"],
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"],
)
else:
prim = d["initial_structure"] if inc_structure == "initial" else d["structure"]
if conventional_unit_cell:
s = SpacegroupAnalyzer(prim).get_conventional_standard_structure()
energy = d["energy"] * (len(s) / len(prim))
else:
s = prim.copy()
energy = d["energy"]
e = ComputedStructureEntry(
s,
energy,
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"],
)
entries.append(e)
if compatible_only:
from pymatgen.entries.compatibility import MaterialsProject2020Compatibility
# suppress the warning about missing oxidation states
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Failed to guess oxidation states.*")
entries = MaterialsProject2020Compatibility().process_entries(entries, clean=True)
if sort_by_e_above_hull:
entries = sorted(entries, key=lambda entry: entry.data["e_above_hull"])
return entries
def get_pourbaix_entries(self, chemsys, solid_compat="MaterialsProject2020Compatibility"):
"""
A helper function to get all entries necessary to generate
a pourbaix diagram from the rest interface.
Args:
chemsys (str or [str]): Chemical system string comprising element
symbols separated by dashes, e.g., "Li-Fe-O" or List of element
symbols, e.g., ["Li", "Fe", "O"].
solid_compat: Compatiblity scheme used to pre-process solid DFT energies prior to applying aqueous
energy adjustments. May be passed as a class (e.g. MaterialsProject2020Compatibility) or an instance
(e.g., MaterialsProject2020Compatibility()). If None, solid DFT energies are used as-is.
Default: MaterialsProject2020Compatibility
"""
# imports are not top-level due to expense
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.pourbaix_diagram import IonEntry, PourbaixEntry
from pymatgen.core.ion import Ion
from pymatgen.entries.compatibility import (
Compatibility,
MaterialsProjectAqueousCompatibility,
MaterialsProject2020Compatibility,
MaterialsProjectCompatibility,
)
if solid_compat == "MaterialsProjectCompatibility":
self.solid_compat = MaterialsProjectCompatibility()
elif solid_compat == "MaterialsProject2020Compatibility":
self.solid_compat = MaterialsProject2020Compatibility()
elif isinstance(solid_compat, Compatibility):
self.solid_compat = solid_compat
else:
raise ValueError(
"Solid compatibility can only be 'MaterialsProjectCompatibility', "
"'MaterialsProject2020Compatibility', or an instance of a Compatability class"
)
pbx_entries = []
if isinstance(chemsys, str):
chemsys = chemsys.split("-")
# Get ion entries first, because certain ions have reference
# solids that aren't necessarily in the chemsys (Na2SO4)
url = "/pourbaix_diagram/reference_data/" + "-".join(chemsys)
ion_data = self._make_request(url)
ion_ref_comps = [Composition(d["Reference Solid"]) for d in ion_data]
ion_ref_elts = list(itertools.chain.from_iterable(i.elements for i in ion_ref_comps))
ion_ref_entries = self.get_entries_in_chemsys(
list(set([str(e) for e in ion_ref_elts] + ["O", "H"])),
property_data=["e_above_hull"],
compatible_only=False,
)
# suppress the warning about supplying the required energies; they will be calculated from the
# entries we get from MPRester
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="You did not provide the required O2 and H2O energies.",
)
compat = MaterialsProjectAqueousCompatibility(solid_compat=self.solid_compat)
# suppress the warning about missing oxidation states
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Failed to guess oxidation states.*")
ion_ref_entries = compat.process_entries(ion_ref_entries)
ion_ref_pd = PhaseDiagram(ion_ref_entries)
# position the ion energies relative to most stable reference state
for n, i_d in enumerate(ion_data):
ion = Ion.from_formula(i_d["Name"])
refs = [e for e in ion_ref_entries if e.composition.reduced_formula == i_d["Reference Solid"]]
if not refs:
raise ValueError("Reference solid not contained in entry list")
stable_ref = sorted(refs, key=lambda x: x.data["e_above_hull"])[0]
rf = stable_ref.composition.get_reduced_composition_and_factor()[1]
solid_diff = ion_ref_pd.get_form_energy(stable_ref) - i_d["Reference solid energy"] * rf
elt = i_d["Major_Elements"][0]
correction_factor = ion.composition[elt] / stable_ref.composition[elt]
energy = i_d["Energy"] + solid_diff * correction_factor
ion_entry = IonEntry(ion, energy)
pbx_entries.append(PourbaixEntry(ion_entry, "ion-{}".format(n)))
# Construct the solid pourbaix entries from filtered ion_ref entries
extra_elts = set(ion_ref_elts) - {Element(s) for s in chemsys} - {Element("H"), Element("O")}
for entry in ion_ref_entries:
entry_elts = set(entry.composition.elements)
# Ensure no OH chemsys or extraneous elements from ion references
if not (entry_elts <= {Element("H"), Element("O")} or extra_elts.intersection(entry_elts)):
# Create new computed entry
form_e = ion_ref_pd.get_form_energy(entry)
new_entry = ComputedEntry(entry.composition, form_e, entry_id=entry.entry_id)
pbx_entry = PourbaixEntry(new_entry)
pbx_entries.append(pbx_entry)
return pbx_entries
def get_structure_by_material_id(self, material_id, final=True, conventional_unit_cell=False):
"""
Get a Structure corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
Structure object.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(material_id, prop=prop)
if not data:
try:
new_material_id = self.get_materials_id_from_task_id(material_id)
if new_material_id:
warnings.warn(
"The calculation task {} is mapped to canonical mp-id {}, "
"so structure for {} returned. "
"This is not an error, see documentation. "
"If original task data for {} is required, "
"use get_task_data(). To find the canonical mp-id from a task id "
"use get_materials_id_from_task_id().".format(
material_id, new_material_id, new_material_id, material_id
)
)
return self.get_structure_by_material_id(new_material_id)
except MPRestError:
raise MPRestError(
"material_id {} unknown, if this seems like "
"an error please let us know at "
"matsci.org/materials-project".format(material_id)
)
if conventional_unit_cell:
data[0][prop] = SpacegroupAnalyzer(data[0][prop]).get_conventional_standard_structure()
return data[0][prop]
def get_entry_by_material_id(
self,
material_id,
compatible_only=True,
inc_structure=None,
property_data=None,
conventional_unit_cell=False,
):
"""
Get a ComputedEntry corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProject2020Compatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
ComputedEntry or ComputedStructureEntry object.
"""
data = self.get_entries(
material_id,
compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell,
)
return data[0]
def get_dos_by_material_id(self, material_id):
"""
Get a Dos corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/dos
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
Returns:
A Dos object.
"""
data = self.get_data(material_id, prop="dos")
return data[0]["dos"]
def get_bandstructure_by_material_id(self, material_id, line_mode=True):
"""
Get a BandStructure corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure or
https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure_uniform
Args:
material_id (str): Materials Project material_id.
line_mode (bool): If True, fetch a BandStructureSymmLine object
(default). If False, return the uniform band structure.
Returns:
A BandStructure object.
"""
prop = "bandstructure" if line_mode else "bandstructure_uniform"
data = self.get_data(material_id, prop=prop)
return data[0][prop]
def get_phonon_dos_by_material_id(self, material_id):
"""
Get phonon density of states data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
CompletePhononDos: A phonon DOS object.
"""
return self._make_request("/materials/{}/phonondos".format(material_id))
def get_phonon_bandstructure_by_material_id(self, material_id):
"""
Get phonon dispersion data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
PhononBandStructureSymmLine: A phonon band structure.
"""
return self._make_request("/materials/{}/phononbs".format(material_id))
def get_phonon_ddb_by_material_id(self, material_id):
"""
Get ABINIT Derivative Data Base (DDB) output for phonon calculations.
Args:
material_id (str): Materials Project material_id.
Returns:
str: ABINIT DDB file as a string.
"""
return self._make_request("/materials/{}/abinit_ddb".format(material_id))
def get_entries_in_chemsys(
self,
elements,
compatible_only=True,
inc_structure=None,
property_data=None,
conventional_unit_cell=False,
):
"""
Helper method to get a list of ComputedEntries in a chemical system.
For example, elements = ["Li", "Fe", "O"] will return a list of all
entries in the Li-Fe-O chemical system, i.e., all LixOy,
FexOy, LixFey, LixFeyOz, Li, Fe and O phases. Extremely useful for
creating phase diagrams of entire chemical systems.
Args:
elements (str or [str]): Chemical system string comprising element
symbols separated by dashes, e.g., "Li-Fe-O" or List of element
symbols, e.g., ["Li", "Fe", "O"].
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProject2020Compatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
List of ComputedEntries.
"""
if isinstance(elements, str):
elements = elements.split("-")
all_chemsyses = []
for i in range(len(elements)):
for els in itertools.combinations(elements, i + 1):
all_chemsyses.append("-".join(sorted(els)))
entries = self.get_entries(
{"chemsys": {"$in": all_chemsyses}},
compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell,
)
return entries
def get_exp_thermo_data(self, formula):
"""
Get a list of ThermoData objects associated with a formula using the
Materials Project REST interface.
Args:
formula (str): A formula to search for.
Returns:
List of ThermoData objects.
"""
return self.get_data(formula, data_type="exp")
def get_exp_entry(self, formula):
"""
Returns an ExpEntry object, which is the experimental equivalent of a
ComputedEntry and can be used for analyses using experimental data.
Args:
formula (str): A formula to search for.
Returns:
An ExpEntry object.
"""
return ExpEntry(Composition(formula), self.get_exp_thermo_data(formula))
def query(
self,
criteria,
properties,
chunk_size=500,
max_tries_per_chunk=5,
mp_decode=True,
):
r"""
Performs an advanced query using MongoDB-like syntax for directly
querying the Materials Project database. This allows one to perform
queries which are otherwise too cumbersome to perform using the standard
convenience methods.
Please consult the Materials API documentation at
https://github.com/materialsproject/mapidoc, which provides a
comprehensive explanation of the document schema used in the Materials
Project (supported criteria and properties) and guidance on how best to
query for the relevant information you need.
For queries that request data on more than CHUNK_SIZE materials at once,
this method will chunk a query by first retrieving a list of material
IDs that satisfy CRITERIA, and then merging the criteria with a
restriction to one chunk of materials at a time of size CHUNK_SIZE. You
can opt out of this behavior by setting CHUNK_SIZE=0. To guard against
intermittent server errors in the case of many chunks per query,
possibly-transient server errors will result in re-trying a give chunk
up to MAX_TRIES_PER_CHUNK times.
Args:
criteria (str/dict): Criteria of the query as a string or
mongo-style dict.
If string, it supports a powerful but simple string criteria.
E.g., "Fe2O3" means search for materials with reduced_formula
Fe2O3. Wild cards are also supported. E.g., "\\*2O" means get
all materials whose formula can be formed as \\*2O, e.g.,
Li2O, K2O, etc.
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or *2O3: Interpreted as reduced formulas.
Li-Fe-O or *-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g. "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Using a full dict syntax, even more powerful queries can be
constructed. For example, {"elements":{"$in":["Li",
"Na", "K"], "$all": ["O"]}, "nelements":2} selects all Li, Na
and K oxides. {"band_gap": {"$gt": 1}} selects all materials
with band gaps greater than 1 eV.
properties (list): Properties to request for as a list. For
example, ["formula", "formation_energy_per_atom"] returns
the formula and formation energy per atom.
chunk_size (int): Number of materials for which to fetch data at a
time. More data-intensive properties may require smaller chunk
sizes. Use chunk_size=0 to force no chunking -- this is useful
when fetching only properties such as 'material_id'.
max_tries_per_chunk (int): How many times to re-try fetching a given
chunk when the server gives a 5xx error (e.g. a timeout error).
mp_decode (bool): Whether to do a decoding to a Pymatgen object
where possible. In some cases, it might be useful to just get
the raw python dict, i.e., set to False.
Returns:
List of results. E.g.,
[{u'formula': {u'O': 1, u'Li': 2.0}},
{u'formula': {u'Na': 2.0, u'O': 2.0}},
{u'formula': {u'K': 1, u'O': 3.0}},
...]
"""
if not isinstance(criteria, dict):
criteria = self.parse_criteria(criteria)
payload = {
"criteria": json.dumps(criteria),
"properties": json.dumps(properties),
}
if chunk_size == 0:
return self._make_request("/query", payload=payload, method="POST", mp_decode=mp_decode)
count_payload = payload.copy()
count_payload["options"] = json.dumps({"count_only": True})
num_results = self._make_request("/query", payload=count_payload, method="POST")
if num_results <= chunk_size:
return self._make_request("/query", payload=payload, method="POST", mp_decode=mp_decode)
data = []
mids = [d["material_id"] for d in self.query(criteria, ["material_id"], chunk_size=0)]
chunks = get_chunks(mids, size=chunk_size)
progress_bar = PBar(total=len(mids))
for chunk in chunks:
chunk_criteria = criteria.copy()
chunk_criteria.update({"material_id": {"$in": chunk}})
num_tries = 0
while num_tries < max_tries_per_chunk:
try:
data.extend(
self.query(
chunk_criteria,
properties,
chunk_size=0,
mp_decode=mp_decode,
)
)
break
except MPRestError as e:
# pylint: disable=E1101
match = re.search(r"error status code (\d+)", str(e))
if match:
if not match.group(1).startswith("5"):
raise e
num_tries += 1
print(
"Unknown server error. Trying again in five "
"seconds (will try at most {} times)...".format(max_tries_per_chunk)
)
sleep(5)
progress_bar.update(len(chunk))
return data
def submit_structures(
self,
structures,
authors,
projects=None,
references="",
remarks=None,
data=None,
histories=None,
created_at=None,
):
"""
Submits a list of structures to the Materials Project as SNL files.
The argument list mirrors the arguments for the StructureNL object,
except that a list of structures with the same metadata is used as an
input.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
structures: A list of Structure objects
authors (list): List of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
data ([dict]): A list of free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. The length of data should be the same as the list of
structures if not None.
histories: List of list of dicts - [[{'name':'', 'url':'',
'description':{}}], ...] The length of histories should be the
same as the list of structures if not None.
created_at (datetime): A datetime object
Returns:
A list of inserted submission ids.
"""
from pymatgen.util.provenance import StructureNL
snl_list = StructureNL.from_structures(
structures,
authors,
projects,
references,
remarks,
data,
histories,
created_at,
)
self.submit_snl(snl_list)
def submit_snl(self, snl):
"""
Submits a list of StructureNL to the Materials Project site.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl (StructureNL/[StructureNL]): A single StructureNL, or a list
of StructureNL objects
Returns:
A list of inserted submission ids.
Raises:
MPRestError
"""
try:
snl = snl if isinstance(snl, list) else [snl]
jsondata = [s.as_dict() for s in snl]
payload = {"snl": json.dumps(jsondata, cls=MontyEncoder)}
response = self.session.post("{}/snl/submit".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["inserted_ids"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def delete_snl(self, snl_ids):
"""
Delete earlier submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl_ids: List of SNL ids.
Raises:
MPRestError
"""
try:
payload = {"ids": json.dumps(snl_ids)}
response = self.session.post("{}/snl/delete".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def query_snl(self, criteria):
"""
Query for submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
criteria (dict): Query criteria.
Returns:
A dict, with a list of submitted SNLs in the "response" key.
Raises:
MPRestError
"""
try:
payload = {"criteria": json.dumps(criteria)}
response = self.session.post("{}/snl/query".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def submit_vasp_directory(
self,
rootdir,
authors,
projects=None,
references="",
remarks=None,
master_data=None,
master_history=None,
created_at=None,
ncpus=None,
):
"""
Assimilates all vasp run directories beneath a particular
directory using BorgQueen to obtain structures, and then submits thhem
to the Materials Project as SNL files. VASP related meta data like
initial structure and final energies are automatically incorporated.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
rootdir (str): Rootdir to start assimilating VASP runs from.
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors. The same
list of authors should apply to all runs.
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
master_data (dict): A free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. This data is added to all structures detected in the
directory, in addition to other vasp data on a per structure
basis.
master_history: A master history to be added to all entries.
created_at (datetime): A datetime object
ncpus (int): Number of cpus to use in using BorgQueen to
assimilate. Defaults to None, which means serial.
"""
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
drone = VaspToComputedEntryDrone(inc_structure=True, data=["filename", "initial_structure"])
queen = BorgQueen(drone, number_of_drones=ncpus)
queen.parallel_assimilate(rootdir)
structures = []
metadata = []
histories = []
for e in queen.get_data():
structures.append(e.structure)
m = {
"_vasp": {
"parameters": e.parameters,
"final_energy": e.energy,
"final_energy_per_atom": e.energy_per_atom,
"initial_structure": e.data["initial_structure"].as_dict(),
}
}
if "history" in e.parameters:
histories.append(e.parameters["history"])
if master_data is not None:
m.update(master_data)
metadata.append(m)
if master_history is not None:
histories = master_history * len(structures)
return self.submit_structures(
structures,
authors,
projects=projects,
references=references,
remarks=remarks,
data=metadata,
histories=histories,
created_at=created_at,
)
def get_stability(self, entries):
"""
Returns the stability of all entries.
"""
try:
payload = {"entries": json.dumps(entries, cls=MontyEncoder)}
response = self.session.post(
"{}/phase_diagram/calculate_stability".format(self.preamble),
data=payload,
)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_cohesive_energy(self, material_id, per_atom=False):
"""
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
"""
entry = self.get_entry_by_material_id(material_id)
ebulk = entry.energy / entry.composition.get_integer_formula_and_factor()[1]
comp_dict = entry.composition.reduced_composition.as_dict()
isolated_atom_e_sum, n = 0, 0
for el in comp_dict.keys():
e = self._make_request("/element/%s/tasks/isolated_atom" % (el), mp_decode=False)[0]
isolated_atom_e_sum += e["output"]["final_energy_per_atom"] * comp_dict[el]
n += comp_dict[el]
ecoh_per_formula = isolated_atom_e_sum - ebulk
return ecoh_per_formula / n if per_atom else ecoh_per_formula
def get_reaction(self, reactants, products):
"""
Gets a reaction from the Materials Project.
Args:
reactants ([str]): List of formulas
products ([str]): List of formulas
Returns:
rxn
"""
return self._make_request(
"/reaction",
payload={"reactants[]": reactants, "products[]": products},
mp_decode=False,
)
def get_substrates(self, material_id, number=50, orient=None):
"""
Get a substrate list for a material id. The list is in order of
increasing elastic energy if a elastic tensor is available for
the material_id. Otherwise the list is in order of increasing
matching area.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
orient (list) : substrate orientation to look for
number (int) : number of substrates to return
n=0 returns all available matches
Returns:
list of dicts with substrate matches
"""
req = "/materials/{}/substrates?n={}".format(material_id, number)
if orient:
req += "&orient={}".format(" ".join(map(str, orient)))
return self._make_request(req)
def get_all_substrates(self):
"""
Gets the list of all possible substrates considered in the
Materials Project substrate database
Returns:
list of material_ids corresponding to possible substrates
"""
return self._make_request("/materials/all_substrate_ids")
def get_surface_data(self, material_id, miller_index=None, inc_structures=False):
"""
Gets surface data for a material. Useful for Wulff shapes.
Reference for surface data:
Tran, R., Xu, Z., Radhakrishnan, B., Winston, D., Sun, W., Persson, K.
A., & Ong, S. P. (2016). Data Descripter: Surface energies of elemental
crystals. Scientific Data, 3(160080), 1–13.
http://dx.doi.org/10.1038/sdata.2016.80
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
miller_index (list of integer): The miller index of the surface.
e.g., [3, 2, 1]. If miller_index is provided, only one dictionary
of this specific plane will be returned.
inc_structures (bool): Include final surface slab structures.
These are unnecessary for Wulff shape construction.
Returns:
Surface data for material. Energies are given in SI units (J/m^2).
"""
req = "/materials/{}/surfaces".format(material_id)
if inc_structures:
req += "?include_structures=true"
if miller_index:
surf_data_dict = self._make_request(req)
surf_list = surf_data_dict["surfaces"]
ucell = self.get_structure_by_material_id(material_id, conventional_unit_cell=True)
eq_indices = get_symmetrically_equivalent_miller_indices(ucell, miller_index)
for one_surf in surf_list:
if tuple(one_surf["miller_index"]) in eq_indices:
return one_surf
raise ValueError("Bad miller index.")
return self._make_request(req)
def get_wulff_shape(self, material_id):
"""
Constructs a Wulff shape for a material.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
Returns:
pymatgen.analysis.wulff.WulffShape
"""
from pymatgen.analysis.wulff import WulffShape
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
structure = self.get_structure_by_material_id(material_id)
surfaces = self.get_surface_data(material_id)["surfaces"]
lattice = SpacegroupAnalyzer(structure).get_conventional_standard_structure().lattice
miller_energy_map = {}
for surf in surfaces:
miller = tuple(surf["miller_index"])
# Prefer reconstructed surfaces, which have lower surface energies.
if (miller not in miller_energy_map) or surf["is_reconstructed"]:
miller_energy_map[miller] = surf["surface_energy"]
millers, energies = zip(*miller_energy_map.items())
return WulffShape(lattice, millers, energies)
def get_gb_data(
self,
material_id=None,
pretty_formula=None,
chemsys=None,
sigma=None,
gb_plane=None,
rotation_axis=None,
include_work_of_separation=False,
):
"""
Gets grain boundary data for a material.
Args:
material_id (str): Materials Project material_id, e.g., 'mp-129'.
pretty_formula (str): The formula of metals. e.g., 'Fe'
sigma(int): The sigma value of a certain type of grain boundary
gb_plane(list of integer): The Miller index of grain
boundary plane. e.g., [1, 1, 1]
rotation_axis(list of integer): The Miller index of rotation
axis. e.g., [1, 0, 0], [1, 1, 0], and [1, 1, 1]
Sigma value is determined by the combination of rotation axis and
rotation angle. The five degrees of freedom (DOF) of one grain boundary
include: rotation axis (2 DOFs), rotation angle (1 DOF), and grain
boundary plane (2 DOFs).
include_work_of_separation (bool): whether to include the work of separation
(in unit of (J/m^2)). If you want to query the work of separation, please
specify the material_id.
Returns:
A list of grain boundaries that satisfy the query conditions (sigma, gb_plane).
Energies are given in SI units (J/m^2).
"""
if gb_plane:
gb_plane = ",".join([str(i) for i in gb_plane])
if rotation_axis:
rotation_axis = ",".join([str(i) for i in rotation_axis])
payload = {
"material_id": material_id,
"pretty_formula": pretty_formula,
"chemsys": chemsys,
"sigma": sigma,
"gb_plane": gb_plane,
"rotation_axis": rotation_axis,
}
if include_work_of_separation and material_id:
list_of_gbs = self._make_request("/grain_boundaries", payload=payload)
for i, gb_dict in enumerate(list_of_gbs):
gb_energy = gb_dict["gb_energy"]
gb_plane_int = gb_dict["gb_plane"]
surface_energy = self.get_surface_data(material_id=material_id, miller_index=gb_plane_int)[
"surface_energy"
]
wsep = 2 * surface_energy - gb_energy # calculate the work of separation
gb_dict["work_of_separation"] = wsep
return list_of_gbs
return self._make_request("/grain_boundaries", payload=payload)
def get_interface_reactions(
self,
reactant1,
reactant2,
open_el=None,
relative_mu=None,
use_hull_energy=False,
):
"""
Gets critical reactions between two reactants.
Get critical reactions ("kinks" in the mixing ratio where
reaction products change) between two reactants. See the
`pymatgen.analysis.interface_reactions` module for more info.
Args:
reactant1 (str): Chemical formula for reactant
reactant2 (str): Chemical formula for reactant
open_el (str): Element in reservoir available to system
relative_mu (float): Relative chemical potential of element in
reservoir with respect to pure substance. Must be non-positive.
use_hull_energy (bool): Whether to use the convex hull energy for a
given composition for the reaction energy calculation. If false,
the energy of the ground state structure will be preferred; if a
ground state can not be found for a composition, the convex hull
energy will be used with a warning message.
Returns:
list: list of dicts of form {ratio,energy,rxn} where `ratio` is the
reactant mixing ratio, `energy` is the reaction energy
in eV/atom, and `rxn` is a
`pymatgen.analysis.reaction_calculator.Reaction`.
"""
payload = {
"reactants": " ".join([reactant1, reactant2]),
"open_el": open_el,
"relative_mu": relative_mu,
"use_hull_energy": use_hull_energy,
}
return self._make_request("/interface_reactions", payload=payload, method="POST")
def get_download_info(self, material_ids, task_types=None, file_patterns=None):
"""
get a list of URLs to retrieve raw VASP output files from the NoMaD repository
Args:
material_ids (list): list of material identifiers (mp-id's)
task_types (list): list of task types to include in download (see TaskType Enum class)
file_patterns (list): list of wildcard file names to include for each task
Returns:
a tuple of 1) a dictionary mapping material_ids to task_ids and
task_types, and 2) a list of URLs to download zip archives from
NoMaD repository. Each zip archive will contain a manifest.json with
metadata info, e.g. the task/external_ids that belong to a directory
"""
# task_id's correspond to NoMaD external_id's
task_types = [t.value for t in task_types if isinstance(t, TaskType)] if task_types else []
meta = defaultdict(list)
for doc in self.query({"material_id": {"$in": material_ids}}, ["material_id", "blessed_tasks"]):
for task_type, task_id in doc["blessed_tasks"].items():
if task_types and task_type not in task_types:
continue
meta[doc["material_id"]].append({"task_id": task_id, "task_type": task_type})
if not meta:
raise ValueError("No tasks found.")
# return a list of URLs for NoMaD Downloads containing the list of files
# for every external_id in `task_ids`
prefix = "http://labdev-nomad.esc.rzg.mpg.de/fairdi/nomad/mp/api/raw/query?"
if file_patterns is not None:
for file_pattern in file_patterns:
prefix += f"file_pattern={file_pattern}&"
prefix += "external_id="
# NOTE: IE has 2kb URL char limit
nmax = int((2000 - len(prefix)) / 11) # mp-<7-digit> + , = 11
task_ids = [t["task_id"] for tl in meta.values() for t in tl]
chunks = get_chunks(task_ids, size=nmax)
urls = [prefix + ",".join(tids) for tids in chunks]
return meta, urls
@staticmethod
def parse_criteria(criteria_string):
"""
Parses a powerful and simple string criteria and generates a proper
mongo syntax criteria.
Args:
criteria_string (str): A string representing a search criteria.
Also supports wild cards. E.g.,
something like "*2O" gets converted to
{'pretty_formula': {'$in': [u'B2O', u'Xe2O', u"Li2O", ...]}}
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or *2O3: Interpreted as reduced formulas.
Li-Fe-O or *-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g., "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Returns:
A mongo query dict.
"""
toks = criteria_string.split()
def parse_sym(sym):
if sym == "*":
return [el.symbol for el in Element]
m = re.match(r"\{(.*)\}", sym)
if m:
return [s.strip() for s in m.group(1).split(",")]
return [sym]
def parse_tok(t):
if re.match(r"\w+-\d+", t):
return {"task_id": t}
if "-" in t:
elements = [parse_sym(sym) for sym in t.split("-")]
chemsyss = []
for cs in itertools.product(*elements):
if len(set(cs)) == len(cs):
# Check for valid symbols
cs = [Element(s).symbol for s in cs]
chemsyss.append("-".join(sorted(cs)))
return {"chemsys": {"$in": chemsyss}}
all_formulas = set()
explicit_els = []
wild_card_els = []
for sym in re.findall(r"(\*[\.\d]*|\{.*\}[\.\d]*|[A-Z][a-z]*)[\.\d]*", t):
if ("*" in sym) or ("{" in sym):
wild_card_els.append(sym)
else:
m = re.match(r"([A-Z][a-z]*)[\.\d]*", sym)
explicit_els.append(m.group(1))
nelements = len(wild_card_els) + len(set(explicit_els))
parts = re.split(r"(\*|\{.*\})", t)
parts = [parse_sym(s) for s in parts if s != ""]
for f in itertools.product(*parts):
c = Composition("".join(f))
if len(c) == nelements:
# Check for valid Elements in keys.
for e in c.keys():
Element(e.symbol)
all_formulas.add(c.reduced_formula)
return {"pretty_formula": {"$in": list(all_formulas)}}
if len(toks) == 1:
return parse_tok(toks[0])
return {"$or": list(map(parse_tok, toks))}
class MPRestError(Exception):
"""
Exception class for MPRestAdaptor.
Raised when the query has problems, e.g., bad query format.
"""
pass
| gmatteo/pymatgen | pymatgen/ext/matproj.py | Python | mit | 67,879 | [
"ABINIT",
"VASP",
"pymatgen"
] | 1a1c25d400a27434d923289e410875e083860d483f895721bd1e03c8835a355b |
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import absolute_import, print_function
import os
import argparse
import ujson
from sys import stdout, exit
import socket
import numpy as np
import mdtraj as md
import time, random
import simtk.unit as u
from simtk.openmm import Platform, XmlSerializer
from simtk.openmm.app import PDBFile, Simulation, DCDReporter, StateDataReporter
def get_xml(xml_file):
# TODO file access control
attempt = 0
retries = 500
if not xml_file.endswith('.xml'):
raise IOError("{} must end in '.xml' for reading as XML file".format(xml_file))
while True:
try:
with open(xml_file) as f:
xml = f.read()
cereal = XmlSerializer.deserialize(xml)
return xml, cereal
except ValueError as e:
if attempt < retries:
attempt += 1
time.sleep(5*random.random())
else:
raise e
def get_platform(platform_name):
if platform_name == 'fastest':
platform = None
else:
# TODO file access control
attempt = 0
retries = 500
while True:
try:
platform = Platform.getPlatformByName(platform_name)
return platform
except IndexError as e:
if attempt < retries:
attempt += 1
time.sleep(5*random.random())
else:
raise e
def get_pdbfile(topology_pdb):
# TODO file access control
attempt = 0
retries = 500
if not topology_pdb.endswith('.pdb'):
raise IOError("{} must end in '.pdb' for reading as PDB file".format(topology_pdb))
while True:
try:
pdb = PDBFile(topology_pdb)
return pdb
except IndexError as e:
if attempt < retries:
attempt += 1
time.sleep(5*random.random())
else:
raise e
def read_input(platform, pdbfile, system, integrator):
return_order = ['get_platform', 'get_pdbfile',
'get_system', 'get_integrator']
funcs = {'get_platform': [get_platform, platform],
'get_pdbfile': [get_pdbfile, pdbfile],
'get_system': [get_xml, system],
'get_integrator': [get_xml, integrator]}
kfuncs = list(funcs.keys())
random.shuffle(kfuncs)
returns = dict()
while kfuncs:
op_name = kfuncs.pop(0)
func, arg = funcs[op_name]
returns.update({op_name: func(arg)})
return [returns[nxt] for nxt in return_order]
if __name__ == '__main__':
# add further auto options here
platform_properties = {
'CUDA': ['Cuda_Device_Index', 'Cuda_Precision', 'Cuda_Use_Cpu_Pme',
'Cuda_Cuda_Compiler', 'Cuda_Temp_Directory', 'Cuda_Use_Blocking_Sync',
'Cuda_Deterministic_Forces'],
'OpenCL': ['OpenCL_Device_Index', 'OpenCL_Precision', 'OpenCL_Use_Cpu_Pme',
'OpenCL_OpenCL_Platform_Index'],
'CPU': ['CPU_Threads'],
'Reference': []
}
platform_names = [
Platform.getPlatform(no_platform).getName()
for no_platform in range(Platform.getNumPlatforms())]
parser = argparse.ArgumentParser(
description='Run an MD simulation using OpenMM')
parser.add_argument(
'output',
metavar='output/',
help='the output directory',
type=str)
parser.add_argument(
'-l', '--length', dest='length',
type=int, default=100, nargs='?',
help='the number of frames to be simulated')
parser.add_argument(
'--store-interval', dest='interval_store',
type=int, default=1, nargs='?',
help='store every nth interval')
parser.add_argument(
'--report-interval', dest='interval_report',
type=int, default=1, nargs='?',
help='report every nth interval')
parser.add_argument(
'-s', '--system', dest='system_xml',
type=str, default='system.xml', nargs='?',
help='the path to the system.xml file')
parser.add_argument(
'--restart', dest='restart',
type=str, default='', nargs='?',
help='the path to the restart file. If given the coordinates in the topology file '
'will be ignored.')
parser.add_argument(
'-i', '--integrator', dest='integrator_xml',
type=str, default='integrator.xml', nargs='?',
help='the path to the integrator.xml file')
parser.add_argument(
'-t', '--topology', dest='topology_pdb',
type=str, default='topology.pdb', nargs='?',
help='the path to the topology.pdb file')
parser.add_argument(
'-v', '--verbose',
dest='verbose', action='store_true',
default=False,
help='if set then text output is send to the ' +
'console.')
parser.add_argument(
'--types', dest='types',
type=str, default='', nargs='?',
help='alternative definition for output files and strides')
for p in platform_properties:
for v in platform_properties[p]:
p_name = (p + '_' + v)
parser.add_argument(
'--' + p_name.lower().replace('_', '-'),
dest=v.lower(), type=str,
default="",
help=(
'This will set the platform property `%s`. ' % p_name.replace('_', '') +
'If not set the environment variable '
'`%s` will be used instead. ' % p_name.upper()
) + '[NOT INSTALLED!]' if p not in platform_names else ''
)
parser.add_argument(
'-r', '--report',
dest='report', action='store_true',
default=False,
help='if set then a report is send to STDOUT')
parser.add_argument(
'-p', '--platform', dest='platform',
type=str, default='fastest', nargs='?',
help=('used platform. Currently allowed choices are ' +
', '.join(['`%s`' % p if p in platform_names else '`(%s)`' % p
for p in platform_properties.keys()]) +
' but are machine and installation dependend'))
parser.add_argument(
'--temperature',
type=int, default=300,
help='temperature if not given in integrator xml')
args = parser.parse_args()
print('GO...')
properties = None
if args.platform in platform_properties:
properties = {}
props = platform_properties[args.platform]
for v in props:
p_name = args.platform + '_' + v
value = os.environ.get(p_name.upper(), None)
if hasattr(args, p_name.lower()):
value = getattr(args, v.lower())
if value:
properties[
args.platform + '_' + v.replace('_', '')
] = value
# Randomizes the order of file reading to
# alleviate traffic from synchronization
platform, pdb, (system_xml, system), (integrator_xml, integrator) \
= read_input(args.platform, args.topology_pdb,
args.system_xml, args.integrator_xml)
print('Done')
print('Initialize Simulation')
try:
simulation = Simulation(
pdb.topology,
system,
integrator,
platform,
properties
)
print("SIMULATION: ",simulation)
except Exception:
print('EXCEPTION', (socket.gethostname()))
raise
print('Done.')
print('# platform used:', simulation.context.getPlatform().getName())
if args.verbose:
print('# platforms available')
for no_platform in range(Platform.getNumPlatforms()):
# noinspection PyCallByClass,PyTypeChecker
print('(%d) %s' % (no_platform, Platform.getPlatform(no_platform).getName()))
print(os.environ)
print(Platform.getPluginLoadFailures())
print(Platform.getDefaultPluginsDirectory())
if args.restart:
arr = np.load(args.restart)
simulation.context.setPositions(arr['positions'] * u.nanometers)
simulation.context.setVelocities(arr['velocities'] * u.nanometers/u.picosecond)
simulation.context.setPeriodicBoxVectors(*arr['box_vectors'] * u.nanometers)
else:
simulation.context.setPositions(pdb.positions)
pbv = system.getDefaultPeriodicBoxVectors()
simulation.context.setPeriodicBoxVectors(*pbv)
# set velocities to temperature in integrator
try:
temperature = integrator.getTemperature()
except AttributeError:
assert args.temperature > 0
temperature = args.temperature * u.kelvin
print('# temperature:', temperature)
simulation.context.setVelocitiesToTemperature(temperature)
output = args.output
types = None
if args.types:
# seems like we have JSON
types_str = args.types.replace("'", '"')
print(types_str)
types = ujson.loads(types_str)
if isinstance(types, dict):
for name, opts in types.items():
if 'filename' in opts and 'stride' in opts:
output_file = os.path.join(output, opts['filename'])
selection = opts['selection']
if selection is not None:
mdtraj_topology = md.Topology.from_openmm(pdb.topology)
atom_subset = mdtraj_topology.select(selection)
else:
atom_subset = None
simulation.reporters.append(
md.reporters.DCDReporter(
output_file, opts['stride'], atomSubset=atom_subset))
print('Writing stride %d to file `%s` with selection `%s`' % (
opts['stride'], opts['filename'], opts['selection']))
else:
# use defaults from arguments
output_file = os.path.join(output, 'output.dcd')
simulation.reporters.append(
DCDReporter(output_file, args.interval_store))
if not args.restart:
# if not a restart write first frame
state = simulation.context.getState(getPositions=True)
for r in simulation.reporters:
r.report(simulation, state)
if args.report and args.verbose:
output_stride = args.interval_store
if types:
output_stride = min([oty['stride'] for oty in types.values()])
simulation.reporters.append(
StateDataReporter(
stdout,
output_stride,
step=True,
potentialEnergy=True,
temperature=True,
speed=True,
separator=" || ",
))
restart_file = os.path.join(output, 'restart.npz')
print('START SIMULATION')
simulation.step(args.length)
print('DONE')
state = simulation.context.getState(getPositions=True, getVelocities=True)
pbv = state.getPeriodicBoxVectors(asNumpy=True)
vel = state.getVelocities(asNumpy=True)
pos = state.getPositions(asNumpy=True)
np.savez(restart_file, positions=pos, box_vectors=pbv, velocities=vel, index=args.length)
print('Written to directory `%s`' % args.output)
exit(0)
| jrossyra/adaptivemd | adaptivemd/engine/openmm/openmmrun.py | Python | lgpl-2.1 | 12,440 | [
"MDTraj",
"OpenMM"
] | 3a043db568cc6604de2ff06378905e1413bd3d959bc16851ebd498d627a37ee9 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from cinder.api import xmlutil
from cinder import test
class SelectorTest(test.TestCase):
obj_for_test = {'test': {'name': 'test',
'values': [1, 2, 3],
'attrs': {'foo': 1,
'bar': 2,
'baz': 3, }, }, }
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertIsNone(sel(self.obj_for_test))
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
class TemplateElementTest(test.TestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertIsNone(elem.subselector)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertIn('child', elem)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'), ]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertNotIn('child2', elem)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertIsNone(elem.text)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertIsNone(elem.text)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertIsNone(elem.text)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']), ]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
class TemplateTest(test.TestCase):
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__splitTagName(self):
test_cases = [
('a', ['a']),
('a:b', ['a', 'b']),
('{http://test.com}a:b', ['{http://test.com}a', 'b']),
('a:b{http://test.com}:c', ['a', 'b{http://test.com}', 'c']),
]
for test_case, expected in test_cases:
result = xmlutil.TemplateElement._splitTagName(test_case)
self.assertEqual(expected, result)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {'test': {'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {'a': 1,
'b': 2,
'c': 3,
'd': 4, },
'image': {'name': 'image_foobar', 'id': 42, }, }, }
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
def test_serialize_with_delimiter(self):
# Our test object to serialize
obj = {'test': {'scope0:key1': 'Value1',
'scope0:scope1:key2': 'Value2',
'scope0:scope1:scope2:key3': 'Value3'
}}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test')
key1 = xmlutil.SubTemplateElement(root, 'scope0:key1',
selector='scope0:key1')
key1.text = xmlutil.Selector()
key2 = xmlutil.SubTemplateElement(root, 'scope0:scope1:key2',
selector='scope0:scope1:key2')
key2.text = xmlutil.Selector()
key3 = xmlutil.SubTemplateElement(root, 'scope0:scope1:scope2:key3',
selector='scope0:scope1:scope2:key3')
key3.text = xmlutil.Selector()
serializer = xmlutil.MasterTemplate(root, 1)
xml_list = []
xml_list.append("<?xmlversion='1.0'encoding='UTF-8'?><test>")
xml_list.append("<scope0><key1>Value1</key1><scope1>")
xml_list.append("<key2>Value2</key2><scope2><key3>Value3</key3>")
xml_list.append("</scope2></scope1></scope0></test>")
expected_xml = ''.join(xml_list)
result = serializer.serialize(obj)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(result, expected_xml)
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.TestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(MasterTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertIsNotNone(MasterTemplateBuilder._tmpl)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(SlaveTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.TestCase):
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
| saeki-masaki/cinder | cinder/tests/unit/api/test_xmlutil.py | Python | apache-2.0 | 27,333 | [
"VisIt"
] | 2d43c5821ce2ecb03abcd5d7ccf593c3b160040dbab25ab6b5ac16adc4658414 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Dummy setup.py file solely for the purposes of getting an on-the-fly
computed version number into the conda recipe.
"""
import sys
from distutils.core import setup
def version_func():
import subprocess
command = 'python psi4/versioner.py --formatonly --format={versionlong}'
process = subprocess.Popen(command.split(), shell=False, stdout=subprocess.PIPE)
(out, err) = process.communicate()
if sys.version_info >= (3, 0):
return out.decode('utf-8').strip()
else:
return out.strip()
setup(
version=version_func(),
)
| jH0ward/psi4 | conda/_conda_vers.py | Python | lgpl-3.0 | 1,480 | [
"Psi4"
] | 8d6656a374a61898b4583768d38b3408913f8a4d1ec2fbf37fe2801adcd9d350 |
'''Connection weight figures.
.. currentmodule:: noisefigs.plotters.connections
Classes
-------
.. autosummary::
WeightExamplesHists
WeightOutE2IPlotter
WeightOutI2EPlotter
WeightInE2IPlotter
WeightInI2EPlotter
WeightGridPlotter
Burak2009ConnectionPlotter
'''
from __future__ import absolute_import, print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ti
from matplotlib.transforms import Bbox
from matplotlib.gridspec import GridSpec
from grid_cell_model.parameters.param_space import JobTrialSpace2D, DataSpace
import grid_cell_model.plotting.connections as pconn
from grid_cell_model.analysis.image import Position2D
from simtools.plotting.plotters import FigurePlotter
from ..EI_plotting import aggregate as aggr
DS = DataSpace
__all__ = [
'WeightExamplesHists',
'WeightOutE2IPlotter',
'WeightOutI2EPlotter',
'WeightInE2IPlotter',
'WeightInI2EPlotter',
'WeightGridPlotter',
'Burak2009ConnectionPlotter',
]
iterList = ['g_AMPA_total', 'g_GABA_total']
def plotEToI(sp, gIdx, neuronIdx, trialNum=0, **kw):
title = kw.pop('title', 'I cell')
ylim = kw.pop('ylim', None)
gE, gI = aggr.computeYX(sp, iterList)
M = sp[0][gIdx][trialNum].data['g_IE']
conns = M[neuronIdx, :]
ax = pconn.plotConnHistogram(conns,
title=title, **kw)
annG = gE[0, gIdx]
if (annG - int(annG) == 0):
annG = int(annG)
#ann = '$g_E$ = {0} nS'.format(annG)
#ax.text(0.95, 0.9, ann, ha='right', va='bottom', fontsize='x-small',
# transform=ax.transAxes)
ax.set_xlim([0, annG])
ax.set_xticks([0, annG])
ax.xaxis.set_ticklabels([0, '$g_E$'])
ax.set_ylim(ylim)
def plotIToE(sp, gIdx, neuronIdx, trialNum=0, **kw):
title = kw.pop('title', 'E cell')
gE, gI = aggr.computeYX(sp, iterList)
M = sp[0][gIdx][trialNum].data['g_EI']
conns = M[neuronIdx, :]
ax = pconn.plotConnHistogram(conns,
title=title, **kw)
annG = gI[0, gIdx]
if (annG - int(annG) == 0):
annG = int(annG)
ann = '$g_I$ = {0} nS'.format(annG)
ax.text(0.95, 0.9, ann, ha='right', va='bottom', fontsize='x-small',
transform=ax.transAxes)
ax.set_xlim([0, annG])
ax.set_xticks([0, annG])
def plotIToEBrokenAxis(sp, gIdx, neuronIdx, trialNum=0, axBoundaries=None,
axesProportions=(0.5, 0.5), bottomLimits=None,
topLimits=None, **kw):
if axBoundaries is None:
axBoundaries = [0, 0, 1, 1]
left, bottom, right, top = axBoundaries
title = kw.pop('title', 'E cell')
fig = kw.pop('fig', plt.gcf())
h = top - bottom
w = right - left
hBottom = h*axesProportions[0]
hTop = h*axesProportions[1]
axBottom = fig.add_axes(Bbox.from_extents(left, bottom, right, bottom +
hBottom))
axTop = fig.add_axes(Bbox.from_extents(left, top - hTop, right, top),
sharex=axBottom)
_, gI = aggr.computeYX(sp, iterList)
M = sp[0][gIdx][trialNum].data['g_EI']
conns = M[neuronIdx, :]
pconn.plotConnHistogram(conns, title=title, ax=axBottom, **kw)
kw['ylabel'] = ''
pconn.plotConnHistogram(conns, title=title, ax=axTop, **kw)
annG = gI[0, gIdx]
if annG - int(annG) == 0:
annG = int(annG)
#ann = '$g_I$ = {0} nS'.format(annG)
#fig.text(left+0.95*w, bottom+0.9*h, ann, ha='right', va='bottom',
# fontsize='x-small')
axBottom.set_xlim([0, annG])
axBottom.set_xticks([0, annG])
axBottom.xaxis.set_ticklabels([0, '$g_I$'])
axBottom.set_ylim(bottomLimits)
axBottom.set_yticks(bottomLimits)
axBottom.yaxis.set_minor_locator(ti.NullLocator())
axTop.set_ylim(topLimits)
axTop.set_yticks([topLimits[1]])
axTop.xaxis.set_visible(False)
axTop.spines['bottom'].set_visible(False)
divLen = 0.07
d = .015
kwargs = dict(transform=fig.transFigure, color='k', clip_on=False)
axBottom.plot((left-divLen*w, left+divLen*w), (bottom+hBottom + d,
bottom+hBottom - d),
**kwargs)
axTop.plot((left-divLen*w, left+divLen*w), (top-hTop + d, top-hTop - d),
**kwargs)
return axBottom, axTop
class WeightExamplesHists(FigurePlotter):
left = 0.35
bottom = 0.32
right = 0.95
top = 0.85
neuronIdx = 0
def __init__(self, *args, **kwargs):
super(WeightExamplesHists, self).__init__(*args, **kwargs)
def plot(self, *args, **kwargs):
ps = self.env.ps
output_dir = self.config['output_dir']
exampleFigSize = (1.6, 1.4)
exLeft = 0.4
exBottom = 0.32
exRight = 0.95
exTop = 0.85
exampleRC = ( (5, 15), (15, 5) )
for exIdx, example in enumerate(exampleRC):
kw = dict()
if exIdx == 1:
kw['xlabel'] = ''
fig = self._get_final_fig(exampleFigSize)
ax = fig.add_axes(Bbox.from_extents(exLeft, exBottom, exRight, exTop))
plotEToI(ps.conn, example[0], self.neuronIdx, ylabel='', title='',
rwidth=0.8,
linewidth=0,
**kw)
ax.yaxis.set_minor_locator(ti.NullLocator())
ax.set_xlabel(ax.xaxis.get_label_text(), labelpad=-5)
fname = output_dir + "/figure_connections_examples_E2I{0}.pdf"
plt.savefig(fname.format(exIdx), dpi=300, transparent=True)
plt.close()
fig = self._get_final_fig(exampleFigSize)
axBoundaries = (exLeft, exBottom, exRight, exTop)
axBottom, axTop = plotIToEBrokenAxis(ps.conn, example[1],
self.neuronIdx,
ylabel='', title='',
axBoundaries=axBoundaries,
axesProportions=(0.75, 0.2),
bottomLimits=(0, 60),
topLimits=(800, 900),
rwidth=0.8,
linewidth=0,
**kw)
axBottom.set_xlabel(axBottom.xaxis.get_label_text(), labelpad=-5)
fig.text(exLeft - 0.27, 0.5*(self.bottom+self.top), 'Count',
rotation=90, ha='center', va='center')
fname = output_dir + "/figure_connections_examples_I2E{0}.pdf"
plt.savefig(fname.format(exIdx), dpi=300, transparent=True)
plt.close()
class WeightPlotter(FigurePlotter):
'''Color plots of incoming/outgoing synaptic weights of a single neuron.'''
def __init__(self, *args, **kwargs):
super(WeightPlotter, self).__init__(*args, **kwargs)
def _get_plotting_kwargs(self):
return {
'xlabel': self.myc.get('xlabel', 'Neuron #'),
'ylabel': self.myc.get('ylabel', 'Neuron #'),
'use_title': self.myc.get('use_title', True),
}
def plotOutgoing(self, gIdx, type, neuronIdx, trialNum=0, **kw):
'''Plot outgoing weights from a single neuron to all other neurons.'''
Nx = None
Ny = None
use_title = kw.pop('use_title', True)
data = self.env.ps.conn[0][gIdx][trialNum].data
if type == 'E':
var = 'g_IE'
Nx = DS.getNetParam(data, 'Ni_x')
Ny = DS.getNetParam(data, 'Ni_y')
kw['title'] = 'E cell $\\rightarrow$ I cells'
elif type == 'I':
var = 'g_EI'
Nx = DS.getNetParam(data, 'Ne_x')
Ny = DS.getNetParam(data, 'Ne_y')
kw['title'] = 'I cell $\\rightarrow$ E cells'
if not use_title:
kw['title'] = ''
conns = np.reshape(data[var][:, neuronIdx], (Ny, Nx))
pconn.plot2DWeightMatrix(conns, **kw)
def plotIncoming(self, gIdx, type, neuronIdx, trialNum=0, **kw):
'''Plot incoming weights of a single neuron from all other neurons.'''
Nx = None
Ny = None
use_title = kw.pop('use_title', True)
data = self.env.ps.conn[0][gIdx][trialNum].data
if type == 'I':
var = 'g_IE'
Nx = DS.getNetParam(data, 'Ne_x')
Ny = DS.getNetParam(data, 'Ne_y')
kw['title'] = 'E cells $\\rightarrow$ I cell'
elif type == 'E':
var = 'g_EI'
Nx = DS.getNetParam(data, 'Ni_x')
Ny = DS.getNetParam(data, 'Ni_y')
kw['title'] = 'I cells $\\rightarrow$ E cell'
if not use_title:
kw['title'] = ''
conns = np.reshape(data[var][neuronIdx, :], (Ny, Nx))
pconn.plot2DWeightMatrix(conns, **kw)
class WeightOutE2IPlotter(WeightPlotter):
'''Outgoing E-->I connections.'''
def __init__(self, *args, **kwargs):
super(WeightOutE2IPlotter, self).__init__(*args, **kwargs)
def plot(self, *args, **kwargs):
fig_size = self.myc['fig_size']
g_idx = self.myc['g_idx']
neuron_idx = self.myc['neuron_idx']
# E-->I
fig = plt.figure(figsize=fig_size)
self.plotOutgoing(g_idx, "E", neuron_idx,
**self._get_plotting_kwargs())
fig.tight_layout()
fname = self.get_fname("/connections_pcolor_out_E2I.pdf")
plt.savefig(fname, dpi=300, transparent=True)
plt.close()
class WeightOutI2EPlotter(WeightPlotter):
'''Outgoing I-->E connections.'''
def __init__(self, *args, **kwargs):
super(WeightOutI2EPlotter, self).__init__(*args, **kwargs)
def plot(self, *args, **kwargs):
fig_size = self.myc['fig_size']
g_idx = self.myc['g_idx']
neuron_idx = self.myc['neuron_idx']
# I-->E
fig = plt.figure(figsize=fig_size)
self.plotOutgoing(g_idx, "I", neuron_idx,
**self._get_plotting_kwargs())
fig.tight_layout()
fname = self.get_fname("/connections_pcolor_out_I2E.pdf")
plt.savefig(fname, dpi=300, transparent=True)
plt.close()
class WeightInE2IPlotter(WeightPlotter):
'''Incoming E-->I connections.'''
def __init__(self, *args, **kwargs):
super(WeightInE2IPlotter, self).__init__(*args, **kwargs)
def plot(self, *args, **kwargs):
fig_size = self.myc['fig_size']
g_idx = self.myc['g_idx']
neuron_idx = self.myc['neuron_idx']
# Out of curiosity: plot the weights to one neuron (incoming)
# E-->I
fig = plt.figure('g_in_E2I', figsize=fig_size)
self.plotIncoming(g_idx, "I", neuron_idx,
**self._get_plotting_kwargs())
fig.tight_layout()
fname = self.get_fname("/connections_pcolor_in_E2I.pdf")
plt.savefig(fname, dpi=300, transparent=True)
class WeightInI2EPlotter(WeightPlotter):
'''Incoming I-->E connections.'''
def __init__(self, *args, **kwargs):
super(WeightInI2EPlotter, self).__init__(*args, **kwargs)
def plot(self, *args, **kwargs):
fig_size = self.myc['fig_size']
g_idx = self.myc['g_idx']
neuron_idx = self.myc['neuron_idx']
# I-->E
fig = plt.figure('g_in_I2E', figsize=fig_size)
self.plotIncoming(g_idx, "E", neuron_idx,
**self._get_plotting_kwargs())
fig.tight_layout()
fname = self.get_fname("/connections_pcolor_in_I2E.pdf")
plt.savefig(fname, dpi=300, transparent=True)
class WeightGridPlotter(WeightPlotter):
'''Color plots of weights of selected neurons in a grid.'''
def __init__(self, *args, **kwargs):
super(WeightGridPlotter, self).__init__(*args, **kwargs)
def plot(self, *args, **kwargs):
g_idx = self.myc['g_idx']
neuron_idx = self.myc['neuron_idx']
l, b, r, t = self.myc['bbox_rect']
fig = self._get_final_fig(self.myc['fig_size'])
gs = GridSpec(2, 2)
gs.update(left=l, right=r, bottom=b, top=t, hspace=0)
# E-->I outgoing
ax = fig.add_subplot(gs[0, 0])
self.plotOutgoing(g_idx, "E", neuron_idx, ax=ax, xlabel='', ylabel='',
use_title=False)
ax.set_xticks([])
# I-->E input
ax = fig.add_subplot(gs[0, 1])
self.plotIncoming(g_idx, "E", neuron_idx, ax=ax, ylabel='', xlabel='',
use_title=False)
ax.set_xticks([])
ax.set_yticks([])
# I-->E outgoing
ax = fig.add_subplot(gs[1, 0])
self.plotOutgoing(g_idx, "I", neuron_idx, ax=ax, use_title=False,
xlabel='', ylabel='')
# E-->I input
ax = fig.add_subplot(gs[1, 1])
self.plotIncoming(g_idx, "I", neuron_idx, ax=ax, xlabel='', ylabel='',
use_title=False)
ax.set_yticks([])
fname = self.get_fname("/connections_pcolor_grid.pdf")
plt.savefig(fname, dpi=300, transparent=True)
plt.close()
# Add an extra colorbar
fig = self._get_final_fig(self.myc['cbar_fig_size'])
ax_cbar = fig.add_axes([0.05, 0.80, 0.8, 0.15])
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=mpl.cm.jet,
norm=mpl.colors.Normalize(vmin=0,
vmax=1),
ticks=[0, 1],
orientation='horizontal')
ax_cbar.xaxis.set_ticklabels(['0', '$g_{E/I}$'])
fname_cbar = self.get_fname("/connections_pcolor_grid_colorbar.pdf")
plt.savefig(fname_cbar, dpi=300, transparent=True)
plt.close()
class Burak2009ConnectionPlotter(FigurePlotter):
def __init__(self, *args, **kwargs):
super(Burak2009ConnectionPlotter, self).__init__(*args, **kwargs)
def compute_weights(self, X1, X2, a, gamma, beta, l, pref_theta):
'''Compute the outgoing weights between neurons at positions specified
by ``X1`` and ``X2``. See Burak and Fiete (2009).
'''
X_sq = ((X1.x - X2.x - l * pref_theta.x)**2 +
(X1.y - X2.y - l * pref_theta.y)**2)
return a * np.exp(-gamma * X_sq) - np.exp(-beta * X_sq)
def plot(self, *args, **kwargs):
output_dir = self.config['output_dir']
lambda_net = 20.
a = 1.
beta = 3. / lambda_net**2
gamma = 1.05 * beta
l = 5.
X1 = Position2D(0., 0.)
n_range = 30
X2_x, X2_y = np.meshgrid(np.arange(-n_range, n_range),
np.arange(-n_range, n_range))
X2 = Position2D(X2_x, X2_y)
fig = self._get_final_fig(self.myc['fig_size'])
# Shift up
ax_up = fig.add_subplot(2, 2, 1)
pref_theta = Position2D(0, -1)
w = self.compute_weights(X1, X2, a, gamma, beta, l, pref_theta)
ax_up.pcolor(X2.x, X2.y, w, rasterized=True)
ax_up.set_xticks([])
ax_up.set_yticks([])
# Shift down
ax_down = fig.add_subplot(2, 2, 2)
pref_theta = Position2D(0, 1)
w = self.compute_weights(X1, X2, a, gamma, beta, l, pref_theta)
ax_down.pcolor(X2.x, X2.y, w, rasterized=True)
ax_down.set_xticks([])
ax_down.set_yticks([])
# Shift left
ax_left = fig.add_subplot(2, 2, 3)
pref_theta = Position2D(1, 0)
w = self.compute_weights(X1, X2, a, gamma, beta, l, pref_theta)
ax_left.pcolor(X2.x, X2.y, w, rasterized=True)
ax_left.set_xticks([])
ax_left.set_yticks([])
# Shift right
ax_right = fig.add_subplot(2, 2, 4)
pref_theta = Position2D(-1, 0)
w = self.compute_weights(X1, X2, a, gamma, beta, l, pref_theta)
ax_right.pcolor(X2.x, X2.y, w, rasterized=True)
ax_right.set_xticks([])
ax_right.set_yticks([])
fig.tight_layout()
fname = self.config['output_dir'] + "/intro_burak2009_conn_weights.pdf"
fig.savefig(fname, dpi=300, transparent=True)
plt.close()
| MattNolanLab/ei-attractor | noisefigs/noisefigs/plotters/connections.py | Python | gpl-3.0 | 16,318 | [
"NEURON"
] | 910a86fc0dc2df2a26335160a37628e1a48d2593f00f19e7ccc0eb5278489c5b |
"""Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
from inspect import signature
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..base import clone
from ..utils.validation import _num_samples
import warnings
from sklearn.exceptions import ConvergenceWarning
def _check_length_scale(X, length_scale):
length_scale = np.squeeze(length_scale).astype(float)
if np.ndim(length_scale) > 1:
raise ValueError("length_scale cannot be of dimension greater than 1")
if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
raise ValueError("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (length_scale.shape[0], X.shape[1]))
return length_scale
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
.. versionadded:: 0.18
Attributes
----------
name : str
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : str
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default=None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
Examples
--------
>>> from sklearn.gaussian_process.kernels import ConstantKernel
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import Hyperparameter
>>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
>>> kernel = ConstantKernel(constant_value=1.0,
... constant_value_bounds=(0.0, 10.0))
We can access each hyperparameter:
>>> for hyperparameter in kernel.hyperparameters:
... print(hyperparameter)
Hyperparameter(name='constant_value', value_type='numeric',
bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)
>>> params = kernel.get_params()
>>> for key in sorted(params): print(f"{key} : {params[key]}")
constant_value : 1.0
constant_value_bounds : (0.0, 10.0)
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, str) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, str) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other):
return (self.name == other.name and
self.value_type == other.value_type and
np.all(self.bounds == other.bounds) and
self.n_elements == other.n_elements and
self.fixed == other.fixed)
class Kernel(metaclass=ABCMeta):
"""Base class for all kernels.
.. versionadded:: 0.18
"""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta.
Parameters
----------
theta : ndarray of shape (n_dims,)
The hyperparameters
"""
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = [getattr(self, attr) for attr in dir(self)
if attr.startswith("hyperparameter_")]
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i:i + hyperparameter.n_elements])
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = [hyperparameter.bounds
for hyperparameter in self.hyperparameters
if not hyperparameter.fixed]
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples,)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on fixed-length feature
vectors or generic objects. Defaults to True for backward
compatibility."""
return True
def _check_bounds_params(self):
"""Called after fitting to warn if bounds may have been too tight."""
list_close = np.isclose(self.bounds,
np.atleast_2d(self.theta).T)
idx = 0
for hyp in self.hyperparameters:
if hyp.fixed:
continue
for dim in range(hyp.n_elements):
if list_close[idx, 0]:
warnings.warn("The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified lower "
"bound %s. Decreasing the bound and"
" calling fit again may find a "
"better value." %
(dim, hyp.name, hyp.bounds[dim][0]),
ConvergenceWarning)
elif list_close[idx, 1]:
warnings.warn("The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified upper "
"bound %s. Increasing the bound and"
" calling fit again may find a "
"better value." %
(dim, hyp.name, hyp.bounds[dim][1]),
ConvergenceWarning)
idx += 1
class NormalizedKernelMixin:
"""Mixin for kernels which are normalized: k(X, X)=1.
.. versionadded:: 0.18
"""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin:
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
.. versionadded:: 0.18
"""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class GenericKernelMixin:
"""Mixin for kernels which operate on generic objects such as variable-
length sequences, trees, and graphs.
.. versionadded:: 0.22
"""
@property
def requires_vector_input(self):
"""Whether the kernel works only on fixed-length feature vectors."""
return False
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels.
.. versionadded:: 0.18
Parameters
----------
kernels : list of Kernels
The other kernels
Examples
--------
>>> from sklearn.gaussian_process.kernels import WhiteKernel
>>> from sklearn.gaussian_process.kernels import RBF
>>> from sklearn.gaussian_process.kernels import CompoundKernel
>>> kernel = CompoundKernel(
... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
>>> print(kernel.bounds)
[[-11.51292546 11.51292546]
[-11.51292546 11.51292546]]
>>> print(kernel.n_dims)
2
>>> print(kernel.theta)
[1.09861229 0.69314718]
"""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : ndarray of shape \
(n_samples_X, n_samples_X, n_dims, n_kernels), optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on discrete structures. """
return np.any([kernel.requires_vector_input
for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to `np.diag(self(X))`; however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators.
.. versionadded:: 0.18
"""
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = [Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds, hyperparameter.n_elements)
for hyperparameter in self.k1.hyperparameters]
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
@property
def requires_vector_input(self):
"""Returns whether the kernel is stationary. """
return (self.k1.requires_vector_input or
self.k2.requires_vector_input)
class Sum(KernelOperator):
"""The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2`
and combines them via
.. math::
k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y)
Note that the `__add__` magic method is overridden, so
`Sum(RBF(), RBF())` is equivalent to using the + operator
with `RBF() + RBF()`.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel
The first base-kernel of the sum-kernel
k2 : Kernel
The second base-kernel of the sum-kernel
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Sum(ConstantKernel(2), RBF())
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
1.0
>>> kernel
1.41**2 + RBF(length_scale=1)
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to `np.diag(self(X))`; however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2`
and combines them via
.. math::
k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y)
Note that the `__mul__` magic method is overridden, so
`Product(RBF(), RBF())` is equivalent to using the * operator
with `RBF() * RBF()`.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel
The first base-kernel of the product-kernel
k2 : Kernel
The second base-kernel of the product-kernel
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import (RBF, Product,
... ConstantKernel)
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Product(ConstantKernel(2), RBF())
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
1.0
>>> kernel
1.41**2 * RBF(length_scale=1)
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""The Exponentiation kernel takes one base kernel and a scalar parameter
:math:`p` and combines them via
.. math::
k_{exp}(X, Y) = k(X, Y) ^p
Note that the `__pow__` magic method is overridden, so
`Exponentiation(RBF(), 2)` is equivalent to using the ** operator
with `RBF() ** 2`.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
kernel : Kernel
The base kernel
exponent : float
The exponent for the base kernel
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import (RationalQuadratic,
... Exponentiation)
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Exponentiation(RationalQuadratic(), exponent=2)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.419...
>>> gpr.predict(X[:1,:], return_std=True)
(array([635.5...]), array([0.559...]))
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on discrete structures. """
return self.kernel.requires_vector_input
class ConstantKernel(StationaryKernelMixin, GenericKernelMixin,
Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
.. math::
k(x_1, x_2) = constant\\_value \\;\\forall\\; x_1, x_2
Adding a constant kernel is equivalent to adding a constant::
kernel = RBF() + ConstantKernel(constant_value=2)
is the same as::
kernel = RBF() + 2
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
constant_value : float, default=1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on `constant_value`.
If set to "fixed", `constant_value` cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = RBF() + ConstantKernel(constant_value=2)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3696...
>>> gpr.predict(X[:1,:], return_std=True)
(array([606.1...]), array([0.24...]))
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter(
"constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.full((_num_samples(X), _num_samples(Y)), self.constant_value,
dtype=np.array(self.constant_value).dtype)
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, np.full((_num_samples(X), _num_samples(X), 1),
self.constant_value,
dtype=np.array(self.constant_value).dtype))
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(_num_samples(X), self.constant_value,
dtype=np.array(self.constant_value).dtype)
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, GenericKernelMixin,
Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise of the signal as independently and identically
normally-distributed. The parameter noise_level equals the variance of this
noise.
.. math::
k(x_1, x_2) = noise\\_level \\text{ if } x_i == x_j \\text{ else } 0
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
noise_level : float, default=1.0
Parameter controlling the noise level (variance)
noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'noise_level'.
If set to "fixed", 'noise_level' cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel(noise_level=0.5)
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1... ]), array([316.6..., 316.6...]))
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter(
"noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(_num_samples(X))
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(_num_samples(X))[:, :, np.newaxis])
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
else:
return np.zeros((_num_samples(X), _num_samples(Y)))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(_num_samples(X), self.noise_level,
dtype=np.array(self.noise_level).dtype)
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length scale
parameter :math:`l>0`, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
.. math::
k(x_i, x_j) = \\exp\\left(- \\frac{d(x_i, x_j)^2}{2l^2} \\right)
where :math:`l` is the length scale of the kernel and
:math:`d(\\cdot,\\cdot)` is the Euclidean distance.
For advice on how to set the length scale parameter, see e.g. [1]_.
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float or ndarray of shape (n_features,), default=1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `David Duvenaud (2014). "The Kernel Cookbook:
Advice on Covariance functions".
<https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
.. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RBF
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * RBF(1.0)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.8354..., 0.03228..., 0.1322...],
[0.7906..., 0.0652..., 0.1441...]])
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(self.length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0])
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the :class:`RBF`.
It has an additional parameter :math:`\\nu` which controls the
smoothness of the resulting function. The smaller :math:`\\nu`,
the less smooth the approximated function is.
As :math:`\\nu\\rightarrow\\infty`, the kernel becomes equivalent to
the :class:`RBF` kernel. When :math:`\\nu = 1/2`, the Matérn kernel
becomes identical to the absolute exponential kernel.
Important intermediate values are
:math:`\\nu=1.5` (once differentiable functions)
and :math:`\\nu=2.5` (twice differentiable functions).
The kernel is given by:
.. math::
k(x_i, x_j) = \\frac{1}{\\Gamma(\\nu)2^{\\nu-1}}\\Bigg(
\\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )
\\Bigg)^\\nu K_\\nu\\Bigg(
\\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )\\Bigg)
where :math:`d(\\cdot,\\cdot)` is the Euclidean distance,
:math:`K_{\\nu}(\\cdot)` is a modified Bessel function and
:math:`\\Gamma(\\cdot)` is the gamma function.
See [1]_, Chapter 4, Section 4.2, for details regarding the different
variants of the Matern kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float or ndarray of shape (n_features,), default=1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
nu : float, default=1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
References
----------
.. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import Matern
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.8513..., 0.0368..., 0.1117...],
[0.8086..., 0.0693..., 0.1220...]])
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super().__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
elif self.nu == np.inf:
K = np.exp(-dists ** 2 / 2.0)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
elif self.nu == np.inf:
K_gradient = D * K[..., np.newaxis]
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else:
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0],
self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length scales. It is
parameterized by a length scale parameter :math:`l>0` and a scale
mixture parameter :math:`\\alpha>0`. Only the isotropic variant
where length_scale :math:`l` is a scalar is supported at the moment.
The kernel is given by:
.. math::
k(x_i, x_j) = \\left(
1 + \\frac{d(x_i, x_j)^2 }{ 2\\alpha l^2}\\right)^{-\\alpha}
where :math:`\\alpha` is the scale mixture parameter, :math:`l` is
the length scale of the kernel and :math:`d(\\cdot,\\cdot)` is the
Euclidean distance.
For advice on how to set the parameters, see e.g. [1]_.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default=1.0
The length scale of the kernel.
alpha : float > 0, default=1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'alpha'.
If set to "fixed", 'alpha' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `David Duvenaud (2014). "The Kernel Cookbook:
Advice on Covariance functions".
<https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import Matern
>>> X, y = load_iris(return_X_y=True)
>>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9733...
>>> gpc.predict_proba(X[:2,:])
array([[0.8881..., 0.0566..., 0.05518...],
[0.8678..., 0.0707... , 0.0614...]])
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_alpha(self):
return Hyperparameter("alpha", "numeric", self.alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if len(np.atleast_1d(self.length_scale)) > 1:
raise AttributeError(
"RationalQuadratic kernel only supports isotropic version, "
"please use a single scalar for length_scale")
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
r"""Exp-Sine-Squared kernel (aka periodic kernel).
The ExpSineSquared kernel allows one to model functions which repeat
themselves exactly. It is parameterized by a length scale
parameter :math:`l>0` and a periodicity parameter :math:`p>0`.
Only the isotropic variant where :math:`l` is a scalar is
supported at the moment. The kernel is given by:
.. math::
k(x_i, x_j) = \text{exp}\left(-
\frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right)
where :math:`l` is the length scale of the kernel, :math:`p` the
periodicity of the kernel and :math:`d(\\cdot,\\cdot)` is the
Euclidean distance.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default=1.0
The length scale of the kernel.
periodicity : float > 0, default=1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'periodicity'.
If set to "fixed", 'periodicity' cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import ExpSineSquared
>>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
>>> kernel = ExpSineSquared(length_scale=1, periodicity=1)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.0144...
>>> gpr.predict(X[:2,:], return_std=True)
(array([425.6..., 457.5...]), array([0.3894..., 0.3467...]))
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
@property
def hyperparameter_length_scale(self):
"""Returns the length scale"""
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_periodicity(self):
return Hyperparameter(
"periodicity", "numeric", self.periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
r"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting :math:`N(0, 1)` priors on the coefficients
of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)`
on the bias. The DotProduct kernel is invariant to a rotation of
the coordinates about the origin, but not translations.
It is parameterized by a parameter sigma_0 :math:`\sigma`
which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`,
the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
.. math::
k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
See [1]_, Chapter 4, Section 4.2, for further details regarding the
DotProduct kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
sigma_0 : float >= 0, default=1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'sigma_0'.
If set to "fixed", 'sigma_0' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
@property
def hyperparameter_sigma_0(self):
return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y).
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X).
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
.. versionadded:: 0.18
Parameters
----------
gamma : float, default=1.0
Parameter gamma of the pairwise kernel specified by metric. It should
be positive.
gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'gamma'.
If set to "fixed", 'gamma' cannot be changed during
hyperparameter tuning.
metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial", \
"rbf", "laplacian", "sigmoid", "cosine"} or callable, \
default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default=None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import PairwiseKernel
>>> X, y = load_iris(return_X_y=True)
>>> kernel = PairwiseKernel(metric='rbf')
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9733...
>>> gpc.predict_proba(X[:2,:])
array([[0.8880..., 0.05663..., 0.05532...],
[0.8676..., 0.07073..., 0.06165...]])
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.metric = metric
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
@property
def hyperparameter_gamma(self):
return Hyperparameter("gamma", "numeric", self.gamma_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X).ravel()
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bnaul/scikit-learn | sklearn/gaussian_process/kernels.py | Python | bsd-3-clause | 84,172 | [
"Gaussian"
] | 82d279f2856f16638d1e53d3fd96156c03f1e593f8803b960434fb2b52f41cf6 |
"""
Simple wrapper class for a Basic GP.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from ..utils.models import printable
from ..likelihoods import Gaussian
from ..kernels import SE, Matern
from .exact import ExactGP
__all__ = ['BasicGP']
@printable
class BasicGP(ExactGP):
"""
Basic GP frontend which assumes an ARD kernel and a Gaussian likelihood
(and hence performs exact inference).
"""
def __init__(self, sn, sf, ell, mu=0, ndim=None, kernel='se'):
likelihood = Gaussian(sn)
kernel = (
SE(sf, ell, ndim) if (kernel == 'se') else
Matern(sf, ell, 1, ndim) if (kernel == 'matern1') else
Matern(sf, ell, 3, ndim) if (kernel == 'matern3') else
Matern(sf, ell, 5, ndim) if (kernel == 'matern5') else None)
if kernel is None:
raise ValueError('Unknown kernel type')
super(BasicGP, self).__init__(likelihood, kernel, mu)
def _params(self):
# replace the parameters for the base GP model with a simplified
# structure and rename the likelihood's sigma parameter to sn (ie its
# the sigma corresponding to the noise).
params = [('sn', 1, True)]
params += self._kernel._params()
params += [('mu', 1, False)]
return params
@classmethod
def from_gp(cls, gp):
if not isinstance(gp._likelihood, Gaussian):
raise ValueError('BasicGP instances must have Gaussian likelihood')
if isinstance(gp._kernel, SE):
kernel = 'se'
elif isinstance(gp._kernel, Matern):
kernel = 'matern%d' % gp._kernel._d
else:
raise ValueError('BasicGP instances must have a SE/Matern kernel')
# get the relevant parameters.
sn = np.sqrt(gp._likelihood.s2)
sf = np.exp(gp._kernel._logsf)
ell = np.exp(gp._kernel._logell)
mu = gp._mean
# create the new gp and maybe add data.
newgp = cls(sn, sf, ell, mu)
if gp.ndata > 0:
X, y = gp.data
newgp.add_data(X, y)
return newgp
| mwhoffman/pygp | pygp/inference/basic.py | Python | bsd-2-clause | 2,189 | [
"Gaussian"
] | 5a2bba8d0c638a9dce4155feb3e1549756a3d487573ed3eaf9514ec0e4a97ccd |
#!/usr/bin/env python
import os
import sys
import argparse
from src.ipr import read_ipr
from src.sprot import read_sprot
from src.sprot import get_fasta_info
from src.sprot import get_blast_info
from src.sprot import get_gff_info
from src.annotation import write_annotations
from src.fix import fix_anno
def main(args):
parser = argparse.ArgumentParser(
epilog="""
Docs at http://genomeannotation.github.io/annie/
Bugs and feature requests at https://github.com/genomeannotation/annie/issues
""",
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-ipr', '--iprscan', help="IPRScan output file, tab-separated")
parser.add_argument('-b', '--blast-output')
parser.add_argument('-g', '--gff', help="GFF3 file corresponding to assembly")
parser.add_argument('-db', '--blast-database', help="The fasta file against which BLAST was run")
parser.add_argument('--blacklist')
parser.add_argument('--whitelist')
parser.add_argument('-o', '--output')
parser.add_argument('--fix_bad_products', action='store_true',
help="Attempt to fix annotations that violate NCBI guidelines")
args = parser.parse_args()
# Make sure we got enough args
ipr = False
sprot = False
if args.iprscan:
ipr = True
if args.blast_output and args.gff and args.blast_database:
sprot = True
if not (ipr or sprot):
sys.stderr.write("Error: must provide --iprscan OR --blast-output, --gff and --blast-database\n\n")
parser.print_help()
sys.exit()
# Open output file
out = "annie_output.tsv"
if args.output:
out = args.output
outfile = open(out, 'w')
# Create an empty list to store Annotation objects
annotations = []
# Add IPRScan results if requested
if ipr:
try:
ipr_file = open(args.iprscan, 'r')
except IOError:
print("Sorry, Annie says either one of the files doesn't exist or it could not be read.")
exit()
if args.whitelist:
#obtain whitelist and get rid of lowercase and whitespace padding
with open(args.whitelist, 'r') as whitelist_file:
whitelist = [word.strip().lower() for word in whitelist_file]
else:
whitelist = []
annotations.extend(read_ipr(ipr_file, whitelist))
ipr_file.close()
# Add SwissProt results if requested
if sprot:
try:
blast_file = open(args.blast_output, 'r')
gff_file = open(args.gff, 'r')
fasta_file = open(args.blast_database, 'r')
except IOError:
print("Sorry, Annie says either one of the files doesn't exist or it could not be read.")
exit()
annotations.extend(read_sprot(blast_file, gff_file, fasta_file))
blast_file.close()
gff_file.close()
fasta_file.close()
# Now go back and remove stuff if requested
if args.blacklist:
with open(args.blacklist, "r") as bad_products_file:
bad_products = []
bad_features = []
keepers = []
# Get bad products
for line in bad_products_file:
bad_products.append(line.strip())
# Find bad features
for anno in annotations:
for product in bad_products:
if anno.key == "product" and anno.value == product:
bad_features.append(anno.feature_id)
# Decide what to keep
for anno in annotations:
if anno.feature_id not in bad_features:
keepers.append(anno)
annotations = keepers
# Optional step to fix annotations
if args.fix_bad_products:
with open("fix_bad_products.log", 'w') as fixlog:
fixlog.write("Original\tUpdated\n")
for anno in annotations:
# only fix if it's a 'product'
if anno.key == "product":
new_value = fix_anno(anno.value)
if new_value != anno.value:
fixlog.write(anno.value + "\t" + new_value + "\n")
anno.value = new_value
#write the annotations to file and close
write_annotations(annotations, outfile)
outfile.close()
####################################################################################################
if __name__ == "__main__":
main(sys.argv)
| genomeannotation/annie | annie.py | Python | mit | 4,465 | [
"BLAST"
] | 5521ade3720a38720f6ab8aa7cd42cc6cc724009cceec58a37d4be0f33dd8add |
# -*- coding: utf-8 -*-
""" GIS Module
@requires: U{B{I{gluon}} <http://web2py.com>}
@requires: U{B{I{shapely}} <http://trac.gispython.org/lab/wiki/Shapely>}
@copyright: (c) 2010-2012 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["GIS", "S3Map", "GoogleGeocoder", "YahooGeocoder"]
import os
import re
import sys
#import logging
import urllib # Needed for urlencoding
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from datetime import timedelta # Needed for Feed Refresh checks
try:
from lxml import etree # Needed to follow NetworkLinks
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
KML_NAMESPACE = "http://earth.google.com/kml/2.2"
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
# Here are dependencies listed for reference:
#from gluon import current
#from gluon.html import *
#from gluon.http import HTTP, redirect
from gluon.dal import Rows
from gluon.storage import Storage, Messages
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from s3fields import s3_all_meta_field_names
from s3search import S3Search
from s3track import S3Trackable
from s3utils import s3_debug, s3_fullname, s3_has_foreign_key
DEBUG = False
if DEBUG:
import datetime
print >> sys.stderr, "S3GIS: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# Map WKT types to db types
GEOM_TYPES = {
"point": 1,
"linestring": 2,
"polygon": 3,
"multipoint": 4,
"multilinestring": 5,
"multipolygon": 6,
"geometrycollection": 7,
}
# km
RADIUS_EARTH = 6371.01
# Garmin GPS Symbols
GPS_SYMBOLS = [
"Airport",
"Amusement Park"
"Ball Park",
"Bank",
"Bar",
"Beach",
"Bell",
"Boat Ramp",
"Bowling",
"Bridge",
"Building",
"Campground",
"Car",
"Car Rental",
"Car Repair",
"Cemetery",
"Church",
"Circle with X",
"City (Capitol)",
"City (Large)",
"City (Medium)",
"City (Small)",
"Civil",
"Contact, Dreadlocks",
"Controlled Area",
"Convenience Store",
"Crossing",
"Dam",
"Danger Area",
"Department Store",
"Diver Down Flag 1",
"Diver Down Flag 2",
"Drinking Water",
"Exit",
"Fast Food",
"Fishing Area",
"Fitness Center",
"Flag",
"Forest",
"Gas Station",
"Geocache",
"Geocache Found",
"Ghost Town",
"Glider Area",
"Golf Course",
"Green Diamond",
"Green Square",
"Heliport",
"Horn",
"Hunting Area",
"Information",
"Levee",
"Light",
"Live Theater",
"Lodging",
"Man Overboard",
"Marina",
"Medical Facility",
"Mile Marker",
"Military",
"Mine",
"Movie Theater",
"Museum",
"Navaid, Amber",
"Navaid, Black",
"Navaid, Blue",
"Navaid, Green",
"Navaid, Green/Red",
"Navaid, Green/White",
"Navaid, Orange",
"Navaid, Red",
"Navaid, Red/Green",
"Navaid, Red/White",
"Navaid, Violet",
"Navaid, White",
"Navaid, White/Green",
"Navaid, White/Red",
"Oil Field",
"Parachute Area",
"Park",
"Parking Area",
"Pharmacy",
"Picnic Area",
"Pizza",
"Post Office",
"Private Field",
"Radio Beacon",
"Red Diamond",
"Red Square",
"Residence",
"Restaurant",
"Restricted Area",
"Restroom",
"RV Park",
"Scales",
"Scenic Area",
"School",
"Seaplane Base",
"Shipwreck",
"Shopping Center",
"Short Tower",
"Shower",
"Skiing Area",
"Skull and Crossbones",
"Soft Field",
"Stadium",
"Summit",
"Swimming Area",
"Tall Tower",
"Telephone",
"Toll Booth",
"TracBack Point",
"Trail Head",
"Truck Stop",
"Tunnel",
"Ultralight Area",
"Water Hydrant",
"Waypoint",
"White Buoy",
"White Dot",
"Zoo"
]
# -----------------------------------------------------------------------------
class GIS(object):
"""
GeoSpatial functions
"""
def __init__(self):
messages = current.messages
#messages.centroid_error = str(A("Shapely", _href="http://pypi.python.org/pypi/Shapely/", _target="_blank")) + " library not found, so can't find centroid!"
messages.centroid_error = "Shapely library not functional, so can't find centroid! Install Geos & Shapely for Line/Polygon support"
messages.unknown_type = "Unknown Type!"
messages.invalid_wkt_point = "Invalid WKT: must be like POINT(3 4)"
messages.invalid_wkt = "Invalid WKT: see http://en.wikipedia.org/wiki/Well-known_text"
messages.lon_empty = "Invalid: Longitude can't be empty if Latitude specified!"
messages.lat_empty = "Invalid: Latitude can't be empty if Longitude specified!"
messages.unknown_parent = "Invalid: %(parent_id)s is not a known Location"
self.DEFAULT_SYMBOL = "White Dot"
self.hierarchy_level_keys = ["L0", "L1", "L2", "L3", "L4"]
self.hierarchy_levels = {}
self.max_allowed_level_num = 4
# -------------------------------------------------------------------------
@staticmethod
def abbreviate_wkt(wkt, max_length=30):
if not wkt:
# Blank WKT field
return None
elif len(wkt) > max_length:
return "%s(...)" % wkt[0:wkt.index("(")]
else:
return wkt
# -------------------------------------------------------------------------
@staticmethod
def gps_symbols():
return GPS_SYMBOLS
# -------------------------------------------------------------------------
def download_kml(self, record_id, filename):
"""
Download a KML file:
- unzip it if-required
- follow NetworkLinks recursively if-required
Save the file to the /uploads folder
Designed to be called asynchronously using:
current.s3task.async("download_kml", [record_id, filename])
@ToDo: Pass error messages to Result & have JavaScript listen for these
"""
layer = KMLLayer()
query = (layer.table.id == record_id)
record = current.db(query).select(limitby=(0, 1)).first()
url = record.url
cachepath = layer.cachepath
filepath = os.path.join(cachepath, filename)
warning = self.fetch_kml(url, filepath)
# @ToDo: Handle errors
#query = (cachetable.name == name)
if "URLError" in warning or "HTTPError" in warning:
# URL inaccessible
if os.access(filepath, os.R_OK):
statinfo = os.stat(filepath)
if statinfo.st_size:
# Use cached version
#date = db(query).select(cachetable.modified_on,
# limitby=(0, 1)).first().modified_on
#response.warning += "%s %s %s\n" % (url,
# T("not accessible - using cached version from"),
# str(date))
#url = URL(c="default", f="download",
# args=[filename])
pass
else:
# 0k file is all that is available
#response.warning += "%s %s\n" % (url,
# T("not accessible - no cached version available!"))
# skip layer
return
else:
# No cached version available
#response.warning += "%s %s\n" % (url,
# T("not accessible - no cached version available!"))
# skip layer
return
else:
# Download was succesful
#db(query).update(modified_on=request.utcnow)
if "ParseError" in warning:
# @ToDo Parse detail
#response.warning += "%s: %s %s\n" % (T("Layer"),
# name,
# T("couldn't be parsed so NetworkLinks not followed."))
pass
if "GroundOverlay" in warning or "ScreenOverlay" in warning:
#response.warning += "%s: %s %s\n" % (T("Layer"),
# name,
# T("includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly."))
pass
# -------------------------------------------------------------------------
def fetch_kml(self, url, filepath):
"""
Fetch a KML file:
- unzip it if-required
- follow NetworkLinks recursively if-required
Returns a file object
Designed as a helper function for download_kml()
"""
from gluon.tools import fetch
response = current.response
public_url = current.deployment_settings.get_base_public_url()
warning = ""
local = False
if not url.startswith("http"):
local = True
url = "%s%s" % (public_url, url)
elif len(url) > len(public_url) and url[:len(public_url)] == public_url:
local = True
if local:
# Keep Session for local URLs
import Cookie
cookie = Cookie.SimpleCookie()
cookie[response.session_id_name] = response.session_id
current.session._unlock(response)
try:
file = fetch(url, cookie=cookie)
except urllib2.URLError:
warning = "URLError"
return warning
except urllib2.HTTPError:
warning = "HTTPError"
return warning
else:
try:
file = fetch(url)
except urllib2.URLError:
warning = "URLError"
return warning
except urllib2.HTTPError:
warning = "HTTPError"
return warning
filenames = []
if file[:2] == "PK":
# Unzip
fp = StringIO(file)
import zipfile
myfile = zipfile.ZipFile(fp)
files = myfile.infolist()
main = None
candidates = []
for _file in files:
filename = _file.filename
if filename == "doc.kml":
main = filename
elif filename[-4:] == ".kml":
candidates.append(filename)
if not main:
if candidates:
# Any better way than this to guess which KML file is the main one?
main = candidates[0]
else:
response.error = "KMZ contains no KML Files!"
return ""
# Write files to cache (other than the main one)
request = current.request
path = os.path.join(request.folder, "static", "cache", "kml")
if not os.path.exists(path):
os.makedirs(path)
for _file in files:
filename = _file.filename
if filename != main:
if "/" in filename:
_filename = filename.split("/")
dir = os.path.join(path, _filename[0])
if not os.path.exists(dir):
os.mkdir(dir)
_filepath = os.path.join(path, *_filename)
else:
_filepath = os.path.join(path, filename)
try:
f = open(_filepath, "wb")
except:
# Trying to write the Folder
pass
else:
filenames.append(filename)
__file = myfile.read(filename)
f.write(__file)
f.close()
# Now read the main one (to parse)
file = myfile.read(main)
myfile.close()
# Check for NetworkLink
if "<NetworkLink>" in file:
try:
# Remove extraneous whitespace
parser = etree.XMLParser(recover=True, remove_blank_text=True)
tree = etree.XML(file, parser)
# Find contents of href tag (must be a better way?)
url = ""
for element in tree.iter():
if element.tag == "{%s}href" % KML_NAMESPACE:
url = element.text
if url:
# Follow NetworkLink (synchronously)
warning2 = self.fetch_kml(url, filepath)
warning += warning2
except (etree.XMLSyntaxError,):
e = sys.exc_info()[1]
warning += "<ParseError>%s %s</ParseError>" % (e.line, e.errormsg)
# Check for Overlays
if "<GroundOverlay>" in file:
warning += "GroundOverlay"
if "<ScreenOverlay>" in file:
warning += "ScreenOverlay"
for filename in filenames:
replace = "%s/%s" % (URL(c="static", f="cache", args=["kml"]),
filename)
# Rewrite all references to point to the correct place
# need to catch <Icon><href> (which could be done via lxml)
# & also <description><![CDATA[<img src=" (which can't)
file = file.replace(filename, replace)
# Write main file to cache
f = open(filepath, "w")
f.write(file)
f.close()
return warning
# -------------------------------------------------------------------------
@staticmethod
def get_bearing(lat_start, lon_start, lat_end, lon_end):
"""
Given a Start & End set of Coordinates, return a Bearing
Formula from: http://www.movable-type.co.uk/scripts/latlong.html
"""
import math
# shortcuts
cos = math.cos
sin = math.sin
delta_lon = lon_start - lon_end
bearing = math.atan2(sin(delta_lon) * cos(lat_end),
(cos(lat_start) * sin(lat_end)) - \
(sin(lat_start) * cos(lat_end) * cos(delta_lon))
)
# Convert to a compass bearing
bearing = (bearing + 360) % 360
return bearing
# -------------------------------------------------------------------------
def get_bounds(self, features=[], parent=None):
"""
Calculate the Bounds of a list of Point Features
e.g. When a map is displayed that focuses on a collection of points,
the map is zoomed to show just the region bounding the points.
e.g. To use in GPX export for correct zooming
`
Ensure a minimum size of bounding box, and that the points
are inset from the border.
@param features: A list of point features
@param parent: A location_id to provide a polygonal bounds suitable
for validating child locations
"""
if parent:
table = current.s3db.gis_location
db = current.db
parent = db(table.id == parent).select(table.level,
table.name,
table.parent,
table.path,
table.lon,
table.lat,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max).first()
if parent.lon_min is None or \
parent.lon_max is None or \
parent.lat_min is None or \
parent.lat_max is None or \
parent.lon == parent.lon_min or \
parent.lon == parent.lon_max or \
parent.lat == parent.lat_min or \
parent.lat == parent.lat_max:
# This is unsuitable - try higher parent
if parent.level == "L1":
if parent.parent:
# We can trust that L0 should have the data from prepop
L0 = db(table.id == parent.parent).select(table.name,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max).first()
return L0.lat_min, L0.lon_min, L0.lat_max, L0.lon_max, L0.name
if parent.path:
path = parent.path
else:
path = self.update_location_tree(dict(id=parent))
path_list = map(int, path.split("/"))
rows = db(table.id.belongs(path_list)).select(table.level,
table.name,
table.lat,
table.lon,
table.lon_min,
table.lat_min,
table.lon_max,
table.lat_max,
orderby=table.level)
row_list = rows.as_list()
row_list.reverse()
ok = False
for row in row_list:
if row["lon_min"] is not None and row["lon_max"] is not None and \
row["lat_min"] is not None and row["lat_max"] is not None and \
row["lon"] != row["lon_min"] != row["lon_max"] and \
row["lat"] != row["lat_min"] != row["lat_max"]:
ok = True
break
if ok:
# This level is suitable
return row["lat_min"], row["lon_min"], row["lat_max"], row["lon_max"], row["name"]
else:
# This level is suitable
return parent.lat_min, parent.lon_min, parent.lat_max, parent.lon_max, parent.name
return -90, -180, 90, 180, None
# Minimum Bounding Box
# - gives a minimum width and height in degrees for the region shown.
# Without this, a map showing a single point would not show any extent around that point.
bbox_min_size = 0.05
# Bounding Box Insets
# - adds a small amount of distance outside the points.
# Without this, the outermost points would be on the bounding box, and might not be visible.
bbox_inset = 0.007
if len(features) > 0:
min_lon = 180
min_lat = 90
max_lon = -180
max_lat = -90
# Is this a simple feature set or the result of a join?
try:
lon = features[0].lon
simple = True
except (AttributeError, KeyError):
simple = False
# @ToDo: Optimised Geospatial routines rather than this crude hack
for feature in features:
try:
if simple:
lon = feature.lon
lat = feature.lat
else:
# A Join
lon = feature.gis_location.lon
lat = feature.gis_location.lat
except AttributeError:
# Skip any rows without the necessary lat/lon fields
continue
# Also skip those set to None. Note must use explicit test,
# as zero is a legal value.
if lon is None or lat is None:
continue
min_lon = min(lon, min_lon)
min_lat = min(lat, min_lat)
max_lon = max(lon, max_lon)
max_lat = max(lat, max_lat)
# Assure a reasonable-sized box.
delta_lon = (bbox_min_size - (max_lon - min_lon)) / 2.0
if delta_lon > 0:
min_lon -= delta_lon
max_lon += delta_lon
delta_lat = (bbox_min_size - (max_lat - min_lat)) / 2.0
if delta_lat > 0:
min_lat -= delta_lat
max_lat += delta_lat
# Move bounds outward by specified inset.
min_lon -= bbox_inset
max_lon += bbox_inset
min_lat -= bbox_inset
max_lat += bbox_inset
else:
# no features
config = GIS.get_config()
if config.min_lat is not None:
min_lat = config.min_lat
else:
min_lat = -90
if config.min_lon is not None:
min_lon = config.min_lon
else:
min_lon = -180
if config.max_lat is not None:
max_lat = config.max_lat
else:
max_lat = 90
if config.max_lon is not None:
max_lon = config.max_lon
else:
max_lon = 180
return dict(min_lon=min_lon, min_lat=min_lat,
max_lon=max_lon, max_lat=max_lat)
# -------------------------------------------------------------------------
@staticmethod
def _lookup_parent_path(feature_id):
"""
Helper that gets parent and path for a location.
"""
db = current.db
table = db.gis_location
feature = db(table.id == feature_id).select(table.id,
table.name,
table.level,
table.path,
table.parent,
limitby=(0, 1)).first()
return feature
# -------------------------------------------------------------------------
@staticmethod
def get_children(id, level=None):
"""
Return a list of IDs of all GIS Features which are children of
the requested feature, using Materialized path for retrieving
the children
@author: Aravind Venkatesan and Ajay Kumar Sreenivasan from NCSU
This has been chosen over Modified Preorder Tree Traversal for
greater efficiency:
http://eden.sahanafoundation.org/wiki/HaitiGISToDo#HierarchicalTrees
@param: level - optionally filter by level
"""
db = current.db
table = db.gis_location
query = (table.deleted == False)
if level:
query = query & (table.level == level)
term = str(id)
query = query & ((table.path.like(term + "/%")) | \
(table.path.like("%/" + term + "/%")))
children = db(query).select(table.id,
table.name)
return children
# -------------------------------------------------------------------------
def get_parents(self, feature_id, feature=None, ids_only=False):
"""
Returns a list containing ancestors of the requested feature.
If the caller already has the location row, including path and
parent fields, they can supply it via feature to avoid a db lookup.
If ids_only is false, each element in the list is a gluon.sql.Row
containing the gis_location record of an ancestor of the specified
location.
If ids_only is true, just returns a list of ids of the parents.
This avoids a db lookup for the parents if the specified feature
has a path.
List elements are in the opposite order as the location path and
exclude the specified location itself, i.e. element 0 is the parent
and the last element is the most distant ancestor.
Assists lazy update of a database without location paths by calling
update_location_tree to get the path.
"""
if not feature or "path" not in feature or "parent" not in feature:
feature = self._lookup_parent_path(feature_id)
if feature and (feature.path or feature.parent):
if feature.path:
path = feature.path
else:
path = self.update_location_tree(feature)
path_list = map(int, path.split("/"))
if len(path_list) == 1:
# No parents -- path contains only this feature.
return None
# Get path in the desired order, without current feature.
reverse_path = path_list[:-1]
reverse_path.reverse()
# If only ids are wanted, stop here.
if ids_only:
return reverse_path
# Retrieve parents - order in which they're returned is arbitrary.
s3db = current.s3db
table = s3db.gis_location
query = (table.id.belongs(reverse_path))
fields = [table.id, table.name, table.level, table.lat, table.lon]
unordered_parents = current.db(query).select(cache=s3db.cache,
*fields)
# Reorder parents in order of reversed path.
unordered_ids = [row.id for row in unordered_parents]
parents = [unordered_parents[unordered_ids.index(path_id)]
for path_id in reverse_path if path_id in unordered_ids]
return parents
else:
return None
# -------------------------------------------------------------------------
def get_parent_per_level(self, results, feature_id,
feature=None,
ids=True,
names=True):
"""
Adds ancestor of requested feature for each level to supplied dict.
If the caller already has the location row, including path and
parent fields, they can supply it via feature to avoid a db lookup.
If a dict is not supplied in results, one is created. The results
dict is returned in either case.
If ids=True and names=False (used by old S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.id
If ids=False and names=True (used by address_onvalidation):
For each ancestor, an entry is added to results, like
ancestor.level : ancestor.name
If ids=True and names=True (used by new S3LocationSelectorWidget):
For each ancestor, an entry is added to results, like
ancestor.level : {name : ancestor.name, id: ancestor.id}
"""
if not results:
results = {}
id = feature_id
# if we don't have a feature or a feature id return the dict as-is
if not feature_id and not feature:
return results
if not feature_id and "path" not in feature and "parent" in feature:
# gis_location_onvalidation on a Create => no ID yet
# Read the Parent's path instead
feature = self._lookup_parent_path(feature.parent)
id = feature.id
elif not feature or "path" not in feature or "parent" not in feature:
feature = self._lookup_parent_path(feature_id)
if feature and (feature.path or feature.parent):
if feature.path:
path = feature.path
else:
path = self.update_location_tree(feature)
# Get ids of ancestors at each level.
if feature.parent:
strict = self.get_strict_hierarchy(feature.parent)
else:
strict = self.get_strict_hierarchy(id)
if path and strict and not names:
# No need to do a db lookup for parents in this case -- we
# know the levels of the parents from their position in path.
# Note ids returned from db are ints, not strings, so be
# consistent with that.
path_ids = map(int, path.split("/"))
# This skips the last path element, which is the supplied
# location.
for (i, id) in enumerate(path_ids[:-1]):
results["L%i" % i] = id
elif path:
ancestors = self.get_parents(id, feature=feature)
if ancestors:
for ancestor in ancestors:
if ancestor.level and ancestor.level in self.hierarchy_level_keys:
if names and ids:
results[ancestor.level] = Storage()
results[ancestor.level].name = ancestor.name
results[ancestor.level].id = ancestor.id
elif names:
results[ancestor.level] = ancestor.name
else:
results[ancestor.level] = ancestor.id
if not feature_id:
# Add the Parent in (we only need the version required for gis_location onvalidation here)
results[feature.level] = feature.name
if names:
# We need to have entries for all levels
# (both for address onvalidation & new LocationSelector)
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if not results.has_key(key):
results[key] = None
return results
# -------------------------------------------------------------------------
def update_table_hierarchy_labels(self, tablename=None):
"""
Re-set table options that depend on location_hierarchy
Only update tables which are already defined
"""
levels = ["L1", "L2", "L3", "L4"]
labels = self.get_location_hierarchy()
db = current.db
if tablename and tablename in db:
# Update the specific table which has just been defined
table = db[tablename]
if tablename == "gis_location":
labels["L0"] = current.T("Country")
table.level.requires = \
IS_NULL_OR(IS_IN_SET(labels))
else:
for level in levels:
table[level].label = labels[level]
else:
# Do all Tables which are already defined
# gis_location
if "gis_location" in db:
table = db.gis_location
table.level.requires = \
IS_NULL_OR(IS_IN_SET(labels))
# These tables store location hierarchy info for XSLT export.
# Labels are used for PDF & XLS Reports
tables = ["org_office",
#"pr_person",
"pr_address",
"cr_shelter",
"asset_asset",
#"hms_hospital",
]
for tablename in tables:
if tablename in db:
table = db[tablename]
for level in levels:
table[level].label = labels[level]
# -------------------------------------------------------------------------
@staticmethod
def set_config(config_id=None, force_update_cache=False):
"""
Reads the specified GIS config from the DB, caches it in response.
Passing in a false or non-existent id will cause the personal config,
if any, to be used, else the site config (uuid SITE_DEFAULT), else
their fallback values defined in this class.
If force_update_cache is true, the config will be read and cached in
response even if the specified config is the same as what's already
cached. Used when the config was just written.
The config itself will be available in response.s3.gis.config.
Scalar fields from the gis_config record and its linked
gis_projection record have the same names as the fields in their
tables and can be accessed as response.s3.gis.<fieldname>.
Returns the id of the config it actually used, if any.
@param: config_id. use '0' to set the SITE_DEFAULT
@ToDo: Merge configs for Event
"""
session = current.session
s3 = current.response.s3
all_meta_field_names = s3_all_meta_field_names()
# If an id has been supplied, try it first. If it matches what's in
# response, there's no work to do.
if config_id and not force_update_cache and \
s3.gis.config and \
s3.gis.config.id == config_id:
return
db = current.db
s3db = current.s3db
ctable = s3db.gis_config
mtable = s3db.gis_marker
ptable = s3db.gis_projection
stable = s3db.gis_symbology
ltable = s3db.gis_layer_config
cache = Storage()
row = None
if config_id:
query = (ctable.id == config_id) & \
(mtable.id == stable.marker_id) & \
(stable.id == ctable.symbology_id) & \
(ptable.id == ctable.projection_id)
row = db(query).select(limitby=(0, 1)).first()
elif config_id is 0:
# Use site default.
config = db(ctable.uuid == "SITE_DEFAULT").select(limitby=(0, 1)).first()
if not config:
# No configs found at all
s3.gis.config = cache
return cache
query = (ctable.id == config.id) & \
(mtable.id == stable.marker_id) & \
(stable.id == ctable.symbology_id) & \
(ptable.id == ctable.projection_id)
row = db(query).select(limitby=(0, 1)).first()
# If no id supplied, or the requested config does not exist,
# fall back to personal or site config.
if not row:
# Read personalised config, if available.
auth = current.auth
if auth.is_logged_in():
pe_id = auth.user.pe_id
# OU configs
# List of roles to check (in order)
roles = ["Staff", "Volunteer"]
role_paths = s3db.pr_get_role_paths(pe_id, roles=roles)
# Unordered list of PEs
pes = []
append = pes.append
for role in roles:
if role in role_paths:
# @ToDo: Read the person's gis_config to disambiguate which Path to use, if there are issues
pes = role_paths[role].nodes()
# Staff don't check Volunteer's OUs
break
# Add Personal
pes.insert(0, pe_id)
query = (ctable.pe_id.belongs(pes)) | \
(ctable.uuid == "SITE_DEFAULT")
# Personal may well not be complete, so Left Join
left = [
ptable.on(ptable.id == ctable.projection_id),
stable.on(stable.id == ctable.symbology_id),
mtable.on(mtable.id == stable.marker_id),
]
# Order by pe_type (defined in gis_config)
# @ToDo: Do this purely from the hierarchy
rows = db(query).select(ctable.ALL,
mtable.ALL,
ptable.ALL,
left=left,
orderby=ctable.pe_type)
cache["ids"] = []
exclude = list(all_meta_field_names)
append = exclude.append
for fieldname in ["delete_record", "update_record",
"pe_path",
"gis_layer_config", "gis_menu"]:
append(fieldname)
for row in rows:
config = row["gis_config"]
if not config_id:
config_id = config.id
cache["ids"].append(config.id)
fields = filter(lambda key: key not in exclude,
config)
for key in fields:
if key not in cache or cache[key] is None:
cache[key] = config[key]
if "epsg" not in cache or cache["epsg"] is None:
projection = row["gis_projection"]
for key in ["epsg", "units", "maxResolution", "maxExtent"]:
cache[key] = projection[key] if key in projection else None
if "image" not in cache or cache["image"] is None:
marker = row["gis_marker"]
for key in ["image", "height", "width"]:
cache["marker_%s" % key] = marker[key] if key in marker else None
#if "base" not in cache:
# # Default Base Layer?
# query = (ltable.config_id == config.id) & \
# (ltable.base == True) & \
# (ltable.enabled == True)
# base = db(query).select(ltable.layer_id,
# limitby=(0, 1)).first()
# if base:
# cache["base"] = base.layer_id
# Add NULL values for any that aren't defined, to avoid KeyErrors
for key in ["epsg", "units", "maxResolution", "maxExtent",
"marker_image", "marker_height", "marker_width",
"base"]:
if key not in cache:
cache[key] = None
if not row:
# No personal config or not logged in. Use site default.
config = db(ctable.uuid == "SITE_DEFAULT").select(limitby=(0, 1)).first()
if not config:
# No configs found at all
s3.gis.config = cache
return cache
query = (ctable.id == config.id) & \
(mtable.id == stable.marker_id) & \
(stable.id == ctable.symbology_id) & \
(ptable.id == ctable.projection_id)
row = db(query).select(limitby=(0, 1)).first()
if row and not cache:
# We had a single row
config = row["gis_config"]
config_id = config.id
cache["ids"] = [config_id]
projection = row["gis_projection"]
marker = row["gis_marker"]
fields = filter(lambda key: key not in all_meta_field_names,
config)
for key in fields:
cache[key] = config[key]
for key in ["epsg", "units", "maxResolution", "maxExtent"]:
cache[key] = projection[key] if key in projection else None
for key in ["image", "height", "width"]:
cache["marker_%s" % key] = marker[key] if key in marker else None
# Default Base Layer?
#query = (ltable.config_id == config_id) & \
# (ltable.base == True) & \
# (ltable.enabled == True)
#base = db(query).select(ltable.layer_id,
# limitby=(0, 1)).first()
#if base:
# cache["base"] = base.layer_id
#else:
# cache["base"] = None
# Store the values
s3.gis.config = cache
# Let caller know if their id was valid.
return config_id if row else cache
# -------------------------------------------------------------------------
@staticmethod
def get_config():
"""
Returns the current GIS config structure.
@ToDo: Config() class
"""
gis = current.response.s3.gis
if not gis.config:
# Ask set_config to put the appropriate config in response.
if current.session.s3.gis_config_id:
GIS.set_config(current.session.s3.gis_config_id)
else:
GIS.set_config()
return gis.config
# -------------------------------------------------------------------------
def get_location_hierarchy(self, level=None, location=None):
"""
Returns the location hierarchy and it's labels
@param: level - a specific level for which to lookup the label
@param: location - the location_id to lookup the location for
currently only the actual location is supported
@ToDo: Do a search of parents to allow this
lookup for any location
"""
_levels = self.hierarchy_levels
_location = location
if not location and _levels:
# Use cached value
if level:
if level in _levels:
return _levels[level]
else:
return level
else:
return _levels
T = current.T
COUNTRY = str(T("Country"))
if level == "L0":
return COUNTRY
db = current.db
s3db = current.s3db
table = s3db.gis_hierarchy
fields = [table.uuid,
table.L1,
table.L2,
table.L3,
table.L4,
table.L5]
query = (table.uuid == "SITE_DEFAULT")
if not location:
config = GIS.get_config()
location = config.region_location_id
if location:
# Try the Region, but ensure we have the fallback available in a single query
query = query | (table.location_id == location)
rows = db(query).select(cache=s3db.cache,
*fields)
if len(rows) > 1:
# Remove the Site Default
filter = lambda row: row.uuid == "SITE_DEFAULT"
rows.exclude(filter)
elif not rows:
# prepop hasn't run yet
if level:
return level
levels = OrderedDict()
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if key == "L0":
levels[key] = COUNTRY
else:
levels[key] = key
return levels
row = rows.first()
if level:
try:
return T(row[level])
except:
return level
else:
levels = OrderedDict()
hierarchy_level_keys = self.hierarchy_level_keys
for key in hierarchy_level_keys:
if key == "L0":
levels[key] = COUNTRY
elif key in row and row[key]:
# Only include rows with values
levels[key] = str(T(row[key]))
if not _location:
# Cache the value
self.hierarchy_levels = levels
if level:
return levels[level]
else:
return levels
# -------------------------------------------------------------------------
def get_strict_hierarchy(self, location=None):
"""
Returns the strict hierarchy value from the current config.
@param: location - the location_id of the record to check
"""
s3db = current.s3db
table = s3db.gis_hierarchy
# Read the system default
# @ToDo: Check for an active gis_config region?
query = (table.uuid == "SITE_DEFAULT")
if location:
# Try the Location's Country, but ensure we have the fallback available in a single query
query = query | (table.location_id == self.get_parent_country(location))
rows = current.db(query).select(table.uuid,
table.strict_hierarchy,
cache=s3db.cache)
if len(rows) > 1:
# Remove the Site Default
filter = lambda row: row.uuid == "SITE_DEFAULT"
rows.exclude(filter)
row = rows.first()
if row:
strict = row.strict_hierarchy
else:
# Pre-pop hasn't run yet
return False
return strict
# -------------------------------------------------------------------------
def get_max_hierarchy_level(self):
"""
Returns the deepest level key (i.e. Ln) in the current hierarchy.
- used by gis_location_onvalidation()
"""
location_hierarchy = self.get_location_hierarchy()
return max(location_hierarchy)
# -------------------------------------------------------------------------
def get_all_current_levels(self, level=None):
"""
Get the current hierarchy levels plus non-hierarchy levels.
"""
all_levels = OrderedDict()
all_levels.update(self.get_location_hierarchy())
#T = current.T
#all_levels["GR"] = T("Location Group")
#all_levels["XX"] = T("Imported")
if level:
try:
return all_levels[level]
except Exception, exception:
return level
else:
return all_levels
# -------------------------------------------------------------------------
# @ToDo: There is nothing stopping someone from making extra configs that
# have country locations as their region location. Need to select here
# only those configs that belong to the hierarchy. If the L0 configs are
# created during initial db creation, then we can tell which they are
# either by recording the max id for an L0 config, or by taking the config
# with lowest id if there are more than one per country. This same issue
# applies to any other use of country configs that relies on getting the
# official set (e.g. looking up hierarchy labels).
def get_edit_level(self, level, id):
"""
Returns the edit_<level> value from the parent country hierarchy.
Used by gis_location_onvalidation()
@param id: the id of the location or an ancestor - used to find
the ancestor country location.
"""
country = self.get_parent_country(id)
s3db = current.s3db
table = s3db.gis_hierarchy
fieldname = "edit_%s" % level
# Read the system default
query = (table.uuid == "SITE_DEFAULT")
if country:
# Try the Location's Country, but ensure we have the fallback available in a single query
query = query | (table.location_id == country)
rows = current.db(query).select(table[fieldname],
cache=s3db.cache)
if len(rows) > 1:
# Remove the Site Default
filter = lambda row: row.uuid == "SITE_DEFAULT"
rows.exclude(filter)
row = rows.first()
edit = row[fieldname]
return edit
# -------------------------------------------------------------------------
@staticmethod
def get_countries(key_type="id"):
"""
Returns country code or L0 location id versus name for all countries.
The lookup is cached in the session
If key_type is "code", these are returned as an OrderedDict with
country code as the key. If key_type is "id", then the location id
is the key. In all cases, the value is the name.
"""
session = current.session
if "gis" not in session:
session.gis = Storage()
gis = session.gis
if gis.countries_by_id:
cached = True
else:
cached = False
if not cached:
s3db = current.s3db
table = s3db.gis_location
ttable = s3db.gis_location_tag
query = (table.level == "L0") & \
(ttable.tag == "ISO2") & \
(ttable.location_id == table.id)
countries = current.db(query).select(table.id,
table.name,
ttable.value,
orderby=table.name)
if not countries:
return []
countries_by_id = OrderedDict()
countries_by_code = OrderedDict()
for row in countries:
location = row["gis_location"]
countries_by_id[location.id] = location.name
countries_by_code[row["gis_location_tag"].value] = location.name
# Cache in the session
gis.countries_by_id = countries_by_id
gis.countries_by_code = countries_by_code
if key_type == "id":
return gis.countries_by_id
else:
return gis.countries_by_code
# -------------------------------------------------------------------------
@staticmethod
def get_country(key, key_type="id"):
"""
Returns country name for given code or id from L0 locations.
The key can be either location id or country code, as specified
by key_type.
"""
if key:
if current.gis.get_countries(key_type):
if key_type == "id":
return current.session.gis.countries_by_id[key]
else:
return current.session.gis.countries_by_code[key]
return None
# -------------------------------------------------------------------------
def get_parent_country(self, location, key_type="id"):
"""
Returns the parent country for a given record
@param: location: the location or id to search for
@param: key_type: whether to return an id or code
@ToDo: Optimise to not use try/except
"""
db = current.db
s3db = current.s3db
# @ToDo: Avoid try/except here!
# - separate parameters best as even isinstance is expensive
try:
# location is passed as integer (location_id)
table = s3db.gis_location
location = db(table.id == location).select(table.id,
table.path,
table.level,
limitby=(0, 1),
cache=s3db.cache).first()
except:
# location is passed as record
pass
if location.level == "L0":
if key_type == "id":
return location.id
elif key_type == "code":
ttable = s3db.gis_location_tag
query = (ttable.tag == "ISO2") & \
(ttable.location_id == location.id)
tag = db(query).select(ttable.value,
limitby=(0, 1)).first()
try:
return tag.value
except:
return None
else:
parents = self.get_parents(location.id,
feature=location)
if parents:
for row in parents:
if row.level == "L0":
if key_type == "id":
return row.id
elif key_type == "code":
ttable = s3db.gis_location_tag
query = (ttable.tag == "ISO2") & \
(ttable.location_id == row.id)
tag = db(query).select(ttable.value,
limitby=(0, 1)).first()
try:
return tag.value
except:
return None
return None
# -------------------------------------------------------------------------
def get_default_country(self, key_type="id"):
"""
Returns the default country for the active gis_config
@param: key_type: whether to return an id or code
"""
config = GIS.get_config()
if config.default_location_id:
return self.get_parent_country(config.default_location_id)
return None
# -------------------------------------------------------------------------
def get_features_in_polygon(self, location, tablename=None, category=None):
"""
Returns a gluon.sql.Rows of Features within a Polygon.
The Polygon can be either a WKT string or the ID of a record in the
gis_location table
Currently unused.
@ToDo: Optimise to not use try/except
"""
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
db = current.db
s3db = current.s3db
locations = s3db.gis_location
try:
location_id = int(location)
# Check that the location is a polygon
query = (locations.id == location_id)
location = db(query).select(locations.wkt,
locations.lon_min,
locations.lon_max,
locations.lat_min,
locations.lat_max,
limitby=(0, 1)).first()
if location:
wkt = location.wkt
if wkt and (wkt.startswith("POLYGON") or \
wkt.startswith("MULTIPOLYGON")):
# ok
lon_min = location.lon_min
lon_max = location.lon_max
lat_min = location.lat_min
lat_max = location.lat_max
else:
s3_debug("Location searched within isn't a Polygon!")
return None
except: # @ToDo: need specific exception
wkt = location
if (wkt.startswith("POLYGON") or wkt.startswith("MULTIPOLYGON")):
# ok
lon_min = None
else:
s3_debug("This isn't a Polygon!")
return None
try:
polygon = wkt_loads(wkt)
except: # @ToDo: need specific exception
s3_debug("Invalid Polygon!")
return None
table = s3db[tablename]
if "location_id" not in table.fields():
# @ToDo: Add any special cases to be able to find the linked location
s3_debug("This table doesn't have a location_id!")
return None
query = (table.location_id == locations.id)
if "deleted" in table.fields:
query = query & (table.deleted == False)
# @ToDo: Check AAA (do this as a resource filter?)
features = db(query).select(locations.wkt,
locations.lat,
locations.lon,
table.ALL)
output = Rows()
# @ToDo: provide option to use PostGIS/Spatialite
# settings = current.deployment_settings
# if settings.gis.spatialdb and settings.database.db_type == "postgres":
if lon_min is None:
# We have no BBOX so go straight to the full geometry check
for row in features:
_location = row.gis_location
wkt = _location.wkt
if wkt is None:
lat = _location.lat
lon = _location.lon
if lat is not None and lon is not None:
wkt = self.latlon_to_wkt(lat, lon)
else:
continue
try:
shape = wkt_loads(wkt)
if shape.intersects(polygon):
# Save Record
output.records.append(row)
except ReadingError:
s3_debug(
"Error reading wkt of location with id",
value=row.id
)
else:
# 1st check for Features included within the bbox (faster)
def in_bbox(row):
_location = row.gis_location
return (_location.lon > lon_min) & \
(_location.lon < lon_max) & \
(_location.lat > lat_min) & \
(_location.lat < lat_max)
for row in features.find(lambda row: in_bbox(row)):
# Search within this subset with a full geometry check
# Uses Shapely.
_location = row.gis_location
wkt = _location.wkt
if wkt is None:
lat = _location.lat
lon = _location.lon
if lat is not None and lon is not None:
wkt = self.latlon_to_wkt(lat, lon)
else:
continue
try:
shape = wkt_loads(wkt)
if shape.intersects(polygon):
# Save Record
output.records.append(row)
except ReadingError:
s3_debug(
"Error reading wkt of location with id",
value = row.id,
)
return output
# -------------------------------------------------------------------------
def get_features_in_radius(self, lat, lon, radius, tablename=None, category=None):
"""
Returns Features within a Radius (in km) of a LatLon Location
Unused
"""
import math
db = current.db
settings = current.deployment_settings
if settings.gis.spatialdb and settings.database.db_type == "postgres":
# Use PostGIS routine
# The ST_DWithin function call will automatically include a bounding box comparison that will make use of any indexes that are available on the geometries.
# @ToDo: Support optional Category (make this a generic filter?)
import psycopg2
import psycopg2.extras
dbname = settings.database.database
username = settings.database.username
password = settings.database.password
host = settings.database.host
port = settings.database.port or "5432"
# Convert km to degrees (since we're using the_geom not the_geog)
radius = math.degrees(float(radius) / RADIUS_EARTH)
connection = psycopg2.connect("dbname=%s user=%s password=%s host=%s port=%s" % (dbname, username, password, host, port))
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
info_string = "SELECT column_name, udt_name FROM information_schema.columns WHERE table_name = 'gis_location' or table_name = '%s';" % tablename
cursor.execute(info_string)
# @ToDo: Look at more optimal queries for just those fields we need
if tablename:
# Lookup the resource
query_string = cursor.mogrify("SELECT * FROM gis_location, %s WHERE %s.location_id = gis_location.id and ST_DWithin (ST_GeomFromText ('POINT (%s %s)', 4326), the_geom, %s);" % (tablename, tablename, lat, lon, radius))
else:
# Lookup the raw Locations
query_string = cursor.mogrify("SELECT * FROM gis_location WHERE ST_DWithin (ST_GeomFromText ('POINT (%s %s)', 4326), the_geom, %s);" % (lat, lon, radius))
cursor.execute(query_string)
# @ToDo: Export Rows?
features = []
for record in cursor:
d = dict(record.items())
row = Storage()
# @ToDo: Optional support for Polygons
if tablename:
row.gis_location = Storage()
row.gis_location.id = d["id"]
row.gis_location.lat = d["lat"]
row.gis_location.lon = d["lon"]
row.gis_location.lat_min = d["lat_min"]
row.gis_location.lon_min = d["lon_min"]
row.gis_location.lat_max = d["lat_max"]
row.gis_location.lon_max = d["lon_max"]
row[tablename] = Storage()
row[tablename].id = d["id"]
row[tablename].name = d["name"]
else:
row.name = d["name"]
row.id = d["id"]
row.lat = d["lat"]
row.lon = d["lon"]
row.lat_min = d["lat_min"]
row.lon_min = d["lon_min"]
row.lat_max = d["lat_max"]
row.lon_max = d["lon_max"]
features.append(row)
return features
#elif settings.database.db_type == "mysql":
# Do the calculation in MySQL to pull back only the relevant rows
# Raw MySQL Formula from: http://blog.peoplesdns.com/archives/24
# PI = 3.141592653589793, mysql's pi() function returns 3.141593
#pi = math.pi
#query = """SELECT name, lat, lon, acos(SIN( PI()* 40.7383040 /180 )*SIN( PI()*lat/180 ))+(cos(PI()* 40.7383040 /180)*COS( PI()*lat/180) *COS(PI()*lon/180-PI()* -73.99319 /180))* 3963.191
#AS distance
#FROM gis_location
#WHERE 1=1
#AND 3963.191 * ACOS( (SIN(PI()* 40.7383040 /180)*SIN(PI() * lat/180)) + (COS(PI()* 40.7383040 /180)*cos(PI()*lat/180)*COS(PI() * lon/180-PI()* -73.99319 /180))) < = 1.5
#ORDER BY 3963.191 * ACOS((SIN(PI()* 40.7383040 /180)*SIN(PI()*lat/180)) + (COS(PI()* 40.7383040 /180)*cos(PI()*lat/180)*COS(PI() * lon/180-PI()* -73.99319 /180)))"""
# db.executesql(query)
else:
# Calculate in Python
# Pull back all the rows within a square bounding box (faster than checking all features manually)
# Then check each feature within this subset
# http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
# @ToDo: Support optional Category (make this a generic filter?)
# shortcuts
radians = math.radians
degrees = math.degrees
MIN_LAT = radians(-90) # -PI/2
MAX_LAT = radians(90) # PI/2
MIN_LON = radians(-180) # -PI
MAX_LON = radians(180) # PI
# Convert to radians for the calculation
r = float(radius) / RADIUS_EARTH
radLat = radians(lat)
radLon = radians(lon)
# Calculate the bounding box
minLat = radLat - r
maxLat = radLat + r
if (minLat > MIN_LAT) and (maxLat < MAX_LAT):
deltaLon = math.asin(math.sin(r) / math.cos(radLat))
minLon = radLon - deltaLon
if (minLon < MIN_LON):
minLon += 2 * math.pi
maxLon = radLon + deltaLon
if (maxLon > MAX_LON):
maxLon -= 2 * math.pi
else:
# Special care for Poles & 180 Meridian:
# http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates#PolesAnd180thMeridian
minLat = max(minLat, MIN_LAT)
maxLat = min(maxLat, MAX_LAT)
minLon = MIN_LON
maxLon = MAX_LON
# Convert back to degrees
minLat = degrees(minLat)
minLon = degrees(minLon)
maxLat = degrees(maxLat)
maxLon = degrees(maxLon)
# shortcut
locations = db.gis_location
query = (locations.lat > minLat) & (locations.lat < maxLat) & (locations.lon > minLon) & (locations.lon < maxLon)
deleted = (locations.deleted == False)
empty = (locations.lat != None) & (locations.lon != None)
query = deleted & empty & query
if tablename:
# Lookup the resource
table = current.s3db[tablename]
query = query & (table.location_id == locations.id)
records = db(query).select(table.ALL,
locations.id,
locations.name,
locations.level,
locations.lat,
locations.lon,
locations.lat_min,
locations.lon_min,
locations.lat_max,
locations.lon_max)
else:
# Lookup the raw Locations
records = db(query).select(locations.id,
locations.name,
locations.level,
locations.lat,
locations.lon,
locations.lat_min,
locations.lon_min,
locations.lat_max,
locations.lon_max)
features = Rows()
for record in records:
# Calculate the Great Circle distance
if tablename:
distance = self.greatCircleDistance(lat,
lon,
record.gis_location.lat,
record.gis_location.lon)
else:
distance = self.greatCircleDistance(lat,
lon,
record.lat,
record.lon)
if distance < radius:
features.records.append(record)
else:
# skip
continue
return features
# -------------------------------------------------------------------------
def get_latlon(self, feature_id, filter=False):
"""
Returns the Lat/Lon for a Feature
used by display_feature() in gis controller
@param feature_id: the feature ID
@param filter: Filter out results based on deployment_settings
"""
db = current.db
table = db.gis_location
feature = db(table.id == feature_id).select(table.id,
table.lat,
table.lon,
table.parent,
table.path,
limitby=(0, 1)).first()
# Zero is an allowed value, hence explicit test for None.
if "lon" in feature and "lat" in feature and \
(feature.lat is not None) and (feature.lon is not None):
return dict(lon=feature.lon, lat=feature.lat)
else:
# Step through ancestors to first with lon, lat.
parents = self.get_parents(feature.id, feature=feature)
if parents:
lon = lat = None
for row in parents:
if "lon" in row and "lat" in row and \
(row.lon is not None) and (row.lat is not None):
return dict(lon=row.lon, lat=row.lat)
# Invalid feature_id
return None
# -------------------------------------------------------------------------
@staticmethod
def get_marker(controller=None,
function=None,
):
"""
Returns a Marker dict
- called by S3REST: S3Resource.export_tree() for non-geojson resources
- called by S3Search
"""
marker = None
if controller and function:
# Lookup marker in the gis_feature table
db = current.db
s3db = current.s3db
ftable = s3db.gis_layer_feature
ltable = s3db.gis_layer_symbology
mtable = s3db.gis_marker
try:
symbology_id = current.response.s3.gis.config.symbology_id
except:
# Config not initialised yet
config = current.gis.get_config()
symbology_id = config.symbology_id
query = (ftable.controller == controller) & \
(ftable.function == function) & \
(ftable.id == ltable.layer_id) & \
(ltable.symbology_id == symbology_id) & \
(ltable.marker_id == mtable.id)
marker = db(query).select(mtable.image,
mtable.height,
mtable.width,
ltable.gps_marker).first()
if marker:
_marker = marker["gis_marker"]
marker = dict(image=_marker.image,
height=_marker.height,
width=_marker.width,
gps_marker=marker["gis_layer_symbology"].gps_marker
)
if not marker:
# Default
marker = Marker().as_dict()
return marker
# -------------------------------------------------------------------------
@staticmethod
def get_locations_and_popups(resource,
layer_id=None
):
"""
Returns the locations and popup tooltips for a Map Layer
e.g. Feature Layers or Search results (Feature Resources)
Called by S3REST: S3Resource.export_tree()
@param: resource - S3Resource instance (required)
@param: layer_id - db.gis_layer_feature.id (Feature Layers only)
"""
if DEBUG:
start = datetime.datetime.now()
db = current.db
s3db = current.s3db
request = current.request
gis = current.gis
format = current.auth.permission.format
ftable = s3db.gis_layer_feature
layer = None
if layer_id:
# Feature Layer called by S3REST: S3Resource.export_tree()
query = (ftable.id == layer_id)
layer = db(query).select(ftable.trackable,
ftable.polygons,
ftable.popup_label,
ftable.popup_fields,
limitby=(0, 1)).first()
else:
# e.g. Search results loaded as a Feature Resource layer
query = (ftable.controller == request.controller) & \
(ftable.function == request.function)
layers = db(query).select(ftable.trackable,
ftable.polygons,
ftable.popup_label,
ftable.popup_fields,
)
if len(layers) > 1:
# We can't provide details for the whole layer, but need to do a per-record check
# Suggest creating separate controllers to avoid this problem
return None
elif layers:
layer = layers.first()
if layer:
popup_label = layer.popup_label
popup_fields = layer.popup_fields
trackable = layer.trackable
polygons = layer.polygons
else:
popup_label = ""
popup_fields = "name"
trackable = False
polygons = False
table = resource.table
tablename = resource.tablename
tooltips = {}
if format == "geojson":
# Build the Popup Tooltips now so that representations can be
# looked-up in bulk rather than as a separate lookup per record
label_off = request.vars.get("label_off", None)
if popup_label and not label_off:
_tooltip = "(%s)" % current.T(popup_label)
else:
_tooltip = ""
if popup_fields:
popup_fields = popup_fields.split("/")
if popup_fields:
represents = {}
for fieldname in popup_fields:
if fieldname in table:
field = table[fieldname]
_represents = GIS.get_representation(field, resource)
represents[fieldname] = _represents
else:
# Assume a virtual field
represents[fieldname] = None
for record in resource:
tooltip = _tooltip
if popup_fields:
first = True
for fieldname in popup_fields:
try:
value = record[fieldname]
except KeyError:
continue
if not value:
continue
field_reps = represents[fieldname]
if field_reps:
try:
represent = field_reps[value]
except:
# list:string
represent = field_reps[str(value)]
else:
# Virtual Field
represent = value
if first:
tooltip = "%s %s" % (represent, tooltip)
first = False
elif value:
tooltip = "%s<br />%s" % (tooltip, represent)
tooltips[record.id] = tooltip
tooltips[tablename] = tooltips
if DEBUG:
end = datetime.datetime.now()
duration = end - start
duration = '{:.2f}'.format(duration.total_seconds())
query = (ftable.id == layer_id)
layer_name = db(query).select(ftable.name,
limitby=(0, 1)).first().name
_debug("tooltip lookup of layer %s completed in %s seconds" % \
(layer_name, duration))
# Lookup the LatLons now so that it can be done as a single
# query rather than per record
if DEBUG:
start = datetime.datetime.now()
latlons = {}
wkts = {}
geojsons = {}
gtable = s3db.gis_location
if trackable:
# Use S3Track
ids = resource._ids
try:
tracker = S3Trackable(table, record_id=ids)
except SyntaxError:
# This table isn't trackable
pass
else:
_latlons = tracker.get_location(_fields=[gtable.lat,
gtable.lon])
index = 0
for id in ids:
_location = _latlons[index]
latlons[id] = (_location.lat, _location.lon)
index += 1
if not latlons:
if "location_id" in table.fields:
query = (table.id.belongs(resource._ids)) & \
(table.location_id == gtable.id)
elif "site_id" in table.fields:
stable = s3db.org_site
query = (table.id.belongs(resource._ids)) & \
(table.site_id == stable.site_id) & \
(stable.location_id == gtable.id)
else:
# Can't display this resource on the Map
return None
if polygons:
if current.deployment_settings.get_gis_spatialdb():
if format == "geojson":
# Do the Simplify & GeoJSON direct from the DB
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(0.001).st_asgeojson(precision=4).with_alias("geojson"))
for row in rows:
geojsons[row[tablename].id] = row["gis_location"].geojson
else:
# Do the Simplify direct from the DB
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(0.001).st_astext().with_alias("wkt"))
for row in rows:
wkts[row[tablename].id] = row["gis_location"].wkt
else:
rows = db(query).select(table.id,
gtable.wkt)
if format == "geojson":
for row in rows:
# Simplify the polygon to reduce download size
geojson = gis.simplify(row["gis_location"].wkt, output="geojson")
if geojson:
geojsons[row[tablename].id] = geojson
else:
for row in rows:
# Simplify the polygon to reduce download size
# & also to work around the recursion limit in libxslt
# http://blog.gmane.org/gmane.comp.python.lxml.devel/day=20120309
wkt = gis.simplify(row["gis_location"].wkt)
if wkt:
wkts[row[tablename].id] = wkt
else:
# Points
rows = db(query).select(table.id,
gtable.path,
gtable.lat,
gtable.lon)
for row in rows:
_location = row["gis_location"]
latlons[row[tablename].id] = (_location.lat, _location.lon)
_latlons = {}
_latlons[tablename] = latlons
_wkts = {}
_wkts[tablename] = wkts
_geojsons = {}
_geojsons[tablename] = geojsons
if DEBUG:
end = datetime.datetime.now()
duration = end - start
duration = '{:.2f}'.format(duration.total_seconds())
_debug("latlons lookup of layer %s completed in %s seconds" % \
(layer_name, duration))
# Used by S3XML's gis_encode()
return dict(latlons = _latlons,
wkts = _wkts,
geojsons = _geojsons,
tooltips = tooltips,
)
# -------------------------------------------------------------------------
@staticmethod
def get_representation(field,
resource=None,
value=None):
"""
Return a quick representation for a Field based on it's value
- faster than field.represent(value)
Used by get_locations_and_popup()
@ToDo: Move out of S3GIS
"""
db = current.db
s3db = current.s3db
cache = current.cache
fieldname = field.name
tablename = field.tablename
if resource:
# We can lookup the representations in bulk rather than 1/record
if DEBUG:
start = datetime.datetime.now()
represents = {}
values = [record[fieldname] for record in resource]
# Deduplicate including non-hashable types (lists)
#values = list(set(values))
seen = set()
values = [ x for x in values if str(x) not in seen and not seen.add(str(x)) ]
if fieldname == "type":
if tablename == "hrm_human_resource":
for value in values:
represents[value] = s3db.hrm_type_opts.get(value, "")
elif tablename == "org_office":
for value in values:
represents[value] = s3db.org_office_type_opts.get(value, "")
elif s3_has_foreign_key(field, m2m=False):
tablename = field.type[10:]
if tablename == "pr_person":
represents = s3_fullname(values)
# Need to modify this function to be able to handle bulk lookups
#for value in values:
# represents[value] = s3_fullname(value)
else:
table = s3db[tablename]
if "name" in table.fields:
# Simple Name lookup faster than full represent
rows = db(table.id.belongs(values)).select(table.id,
table.name)
for row in rows:
represents[row.id] = row.name
else:
# Do the normal represent
for value in values:
represents[value] = field.represent(value)
elif field.type.startswith("list"):
# Do the normal represent
for value in values:
represents[str(value)] = field.represent(value)
else:
# Fallback representation is the value itself
for value in values:
represents[value] = value
if DEBUG:
end = datetime.datetime.now()
duration = end - start
duration = '{:.2f}'.format(duration.total_seconds())
_debug("representation of %s completed in %s seconds" % \
(fieldname, duration))
return represents
else:
# We look up the represention for just this one value at a time
# If the field is an integer lookup then returning that isn't much help
if fieldname == "type":
if tablename == "hrm_human_resource":
represent = cache.ram("hrm_type_%s" % value,
lambda: s3db.hrm_type_opts.get(value, ""),
time_expire=60)
elif tablename == "org_office":
represent = cache.ram("office_type_%s" % value,
lambda: s3db.org_office_type_opts.get(value, ""),
time_expire=60)
elif s3_has_foreign_key(field, m2m=False):
tablename = field.type[10:]
if tablename == "pr_person":
# Unlikely to be the same person in multiple popups so no value to caching
represent = s3_fullname(value)
else:
table = s3db[tablename]
if "name" in table.fields:
# Simple Name lookup faster than full represent
represent = cache.ram("%s_%s_%s" % (tablename, fieldname, value),
lambda: db(table.id == value).select(table.name,
limitby=(0, 1)).first().name,
time_expire=60)
else:
# Do the normal represent
represent = cache.ram("%s_%s_%s" % (tablename, fieldname, value),
lambda: field.represent(value),
time_expire=60)
elif field.type.startswith("list"):
# Do the normal represent
represent = cache.ram("%s_%s_%s" % (tablename, fieldname, value),
lambda: field.represent(value),
time_expire=60)
else:
# Fallback representation is the value itself
represent = value
return represent
# -------------------------------------------------------------------------
@staticmethod
def get_theme_geojson(resource):
"""
Lookup Theme Layer polygons once per layer and not per-record
Called by S3REST: S3Resource.export_tree()
"""
db = current.db
s3db = current.s3db
tablename = "gis_theme_data"
table = s3db.gis_theme_data
gtable = s3db.gis_location
query = (table.id.belongs(resource._ids)) & \
(table.location_id == gtable.id)
geojsons = {}
if current.deployment_settings.get_gis_spatialdb():
# Do the Simplify & GeoJSON direct from the DB
rows = db(query).select(table.id,
gtable.the_geom.st_simplify(0.001).st_asgeojson(precision=4).with_alias("geojson"))
for row in rows:
geojsons[row[tablename].id] = row["gis_location"].geojson
else:
rows = db(query).select(table.id,
gtable.wkt)
gis = current.gis
for row in rows:
# Simplify the polygon to reduce download size
geojson = gis.simplify(row["gis_location"].wkt, output="geojson")
if geojson:
geojsons[row[tablename].id] = geojson
_geojsons = {}
_geojsons[tablename] = geojsons
# return 'locations'
return dict(geojsons = _geojsons)
# -------------------------------------------------------------------------
@staticmethod
def greatCircleDistance(lat1, lon1, lat2, lon2, quick=True):
"""
Calculate the shortest distance (in km) over the earth's sphere between 2 points
Formulae from: http://www.movable-type.co.uk/scripts/latlong.html
(NB We should normally use PostGIS functions, where possible, instead of this query)
"""
import math
# shortcuts
cos = math.cos
sin = math.sin
radians = math.radians
if quick:
# Spherical Law of Cosines (accurate down to around 1m & computationally quick)
acos = math.acos
lat1 = radians(lat1)
lat2 = radians(lat2)
lon1 = radians(lon1)
lon2 = radians(lon2)
distance = acos(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(lon2-lon1)) * RADIUS_EARTH
return distance
else:
# Haversine
#asin = math.asin
atan2 = math.atan2
sqrt = math.sqrt
pow = math.pow
dLat = radians(lat2-lat1)
dLon = radians(lon2-lon1)
a = pow(sin(dLat / 2), 2) + cos(radians(lat1)) * cos(radians(lat2)) * pow(sin(dLon / 2), 2)
c = 2 * atan2(sqrt(a), sqrt(1-a))
#c = 2 * asin(sqrt(a)) # Alternate version
# Convert radians to kilometers
distance = RADIUS_EARTH * c
return distance
# -------------------------------------------------------------------------
def import_admin_areas(self,
source="gadmv1",
countries=[],
levels=["L0", "L1", "L2"]
):
"""
Import Admin Boundaries into the Locations table
@param source - Source to get the data from.
Currently only GADM is supported: http://gadm.org
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
@param levels - Which levels of the hierarchy to import.
defaults to all 3 supported levels
"""
if source == "gadmv1":
try:
from osgeo import ogr
except:
s3_debug("Unable to import ogr. Please install python-gdal bindings: GDAL-1.8.1+")
return
if "L0" in levels:
self.import_gadm1_L0(ogr, countries=countries)
if "L1" in levels:
self.import_gadm1(ogr, "L1", countries=countries)
if "L2" in levels:
self.import_gadm1(ogr, "L2", countries=countries)
s3_debug("All done!")
elif source == "gadmv1":
try:
from osgeo import ogr
except:
s3_debug("Unable to import ogr. Please install python-gdal bindings: GDAL-1.8.1+")
return
if "L0" in levels:
self.import_gadm2(ogr, "L0", countries=countries)
if "L1" in levels:
self.import_gadm2(ogr, "L1", countries=countries)
if "L2" in levels:
self.import_gadm2(ogr, "L2", countries=countries)
s3_debug("All done!")
else:
s3_debug("Only GADM is currently supported")
return
return
# -------------------------------------------------------------------------
@staticmethod
def import_gadm1_L0(ogr, countries=[]):
"""
Import L0 Admin Boundaries into the Locations table from GADMv1
- designed to be called from import_admin_areas()
- assumes that basic prepop has been done, so that no new records need to be created
@param ogr - The OGR Python module
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
"""
db = current.db
s3db = current.s3db
table = s3db.gis_location
ttable = s3db.gis_location_tag
layer = {
"url" : "http://gadm.org/data/gadm_v1_lev0_shp.zip",
"zipfile" : "gadm_v1_lev0_shp.zip",
"shapefile" : "gadm1_lev0",
"codefield" : "ISO2", # This field is used to uniquely identify the L0 for updates
"code2field" : "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
}
# Copy the current working directory to revert back to later
old_working_directory = os.getcwd()
# Create the working directory
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp/GADMv1 as a cache
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv1")
try:
os.mkdir(tempPath)
except OSError:
# Folder already exists - reuse
pass
# Set the current working directory
os.chdir(tempPath)
layerName = layer["shapefile"]
# Check if file has already been downloaded
fileName = layer["zipfile"]
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
url = layer["url"]
s3_debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
s3_debug(exception)
return
fp = StringIO(file)
else:
s3_debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
s3_debug("Unzipping %s" % layerName)
import zipfile
myfile = zipfile.ZipFile(fp)
for ext in ["dbf", "prj", "sbn", "sbx", "shp", "shx"]:
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Use OGR to read Shapefile
s3_debug("Opening %s.shp" % layerName)
ds = ogr.Open( "%s.shp" % layerName )
if ds is None:
s3_debug("Open failed.\n")
return
lyr = ds.GetLayerByName( layerName )
lyr.ResetReading()
codeField = layer["codefield"]
code2Field = layer["code2field"]
for feat in lyr:
code = feat.GetField(codeField)
if not code:
# Skip the entries which aren't countries
continue
if countries and code not in countries:
# Skip the countries which we're not interested in
continue
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
pass
else:
query = (table.id == ttable.location_id) & \
(ttable.tag == "ISO2") & \
(ttable.value == code)
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
code2 = feat.GetField(code2Field)
#area = feat.GetField("Shape_Area")
try:
id = db(query).select(table.id,
limitby=(0, 1)).first().id
query = (table.id == id)
db(query).update(gis_feature_type=gis_feature_type,
wkt=wkt)
ttable.insert(location_id = id,
tag = "ISO3",
value = code2)
#ttable.insert(location_id = location_id,
# tag = "area",
# value = area)
except db._adapter.driver.OperationalError, exception:
s3_debug(exception)
else:
s3_debug("No geometry\n")
# Close the shapefile
ds.Destroy()
db.commit()
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
# -------------------------------------------------------------------------
def import_gadm1(self, ogr, level="L1", countries=[]):
"""
Import L1 Admin Boundaries into the Locations table from GADMv1
- designed to be called from import_admin_areas()
- assumes a fresh database with just Countries imported
@param ogr - The OGR Python module
@param level - "L1" or "L2"
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
"""
if level == "L1":
layer = {
"url" : "http://gadm.org/data/gadm_v1_lev1_shp.zip",
"zipfile" : "gadm_v1_lev1_shp.zip",
"shapefile" : "gadm1_lev1",
"namefield" : "NAME_1",
# Uniquely identify the L1 for updates
"sourceCodeField" : "ID_1",
"edenCodeField" : "GADM1",
# Uniquely identify the L0 for parenting the L1s
"parent" : "L0",
"parentSourceCodeField" : "ISO",
"parentEdenCodeField" : "ISO3",
}
elif level == "L2":
layer = {
"url" : "http://biogeo.ucdavis.edu/data/gadm/gadm_v1_lev2_shp.zip",
"zipfile" : "gadm_v1_lev2_shp.zip",
"shapefile" : "gadm_v1_lev2",
"namefield" : "NAME_2",
# Uniquely identify the L2 for updates
"sourceCodeField" : "ID_2",
"edenCodeField" : "GADM2",
# Uniquely identify the L0 for parenting the L1s
"parent" : "L1",
"parentSourceCodeField" : "ID_1",
"parentEdenCodeField" : "GADM1",
}
else:
s3_debug("Level %s not supported!" % level)
return
import csv
import shutil
import zipfile
db = current.db
s3db = current.s3db
cache = s3db.cache
table = s3db.gis_location
ttable = s3db.gis_location_tag
csv.field_size_limit(2**20 * 100) # 100 megs
# Not all the data is encoded like this
# (unable to determine encoding - appears to be damaged in source):
# Azerbaijan L1
# Vietnam L1 & L2
ENCODING = "cp1251"
# from http://docs.python.org/library/csv.html#csv-examples
def latin_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
for row in csv.reader(unicode_csv_data):
yield [unicode(cell, ENCODING) for cell in row]
def latin_dict_reader(data, dialect=csv.excel, **kwargs):
reader = latin_csv_reader(data, dialect=dialect, **kwargs)
headers = reader.next()
for r in reader:
yield dict(zip(headers, r))
# Copy the current working directory to revert back to later
old_working_directory = os.getcwd()
# Create the working directory
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp/GADMv1 as a cache
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv1")
try:
os.mkdir(tempPath)
except OSError:
# Folder already exists - reuse
pass
# Set the current working directory
os.chdir(tempPath)
# Remove any existing CSV folder to allow the new one to be created
try:
shutil.rmtree("CSV")
except OSError:
# Folder doesn't exist, so should be creatable
pass
layerName = layer["shapefile"]
# Check if file has already been downloaded
fileName = layer["zipfile"]
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
url = layer["url"]
s3_debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
s3_debug(exception)
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
fp = StringIO(file)
else:
s3_debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
s3_debug("Unzipping %s" % layerName)
myfile = zipfile.ZipFile(fp)
for ext in ["dbf", "prj", "sbn", "sbx", "shp", "shx"]:
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Convert to CSV
s3_debug("Converting %s.shp to CSV" % layerName)
# Simplified version of generic Shapefile Importer:
# http://svn.osgeo.org/gdal/trunk/gdal/swig/python/samples/ogr2ogr.py
bSkipFailures = False
nGroupTransactions = 200
nFIDToFetch = ogr.NullFID
inputFileName = "%s.shp" % layerName
inputDS = ogr.Open(inputFileName, False)
outputFileName = "CSV"
outputDriver = ogr.GetDriverByName("CSV")
outputDS = outputDriver.CreateDataSource(outputFileName, options=[])
# GADM only has 1 layer/source
inputLayer = inputDS.GetLayer(0)
inputFDefn = inputLayer.GetLayerDefn()
# Create the output Layer
outputLayer = outputDS.CreateLayer(layerName)
# Copy all Fields
papszFieldTypesToString = []
inputFieldCount = inputFDefn.GetFieldCount()
panMap = [-1 for i in range(inputFieldCount)]
outputFDefn = outputLayer.GetLayerDefn()
nDstFieldCount = 0
if outputFDefn is not None:
nDstFieldCount = outputFDefn.GetFieldCount()
for iField in range(inputFieldCount):
inputFieldDefn = inputFDefn.GetFieldDefn(iField)
oFieldDefn = ogr.FieldDefn(inputFieldDefn.GetNameRef(),
inputFieldDefn.GetType())
oFieldDefn.SetWidth(inputFieldDefn.GetWidth())
oFieldDefn.SetPrecision(inputFieldDefn.GetPrecision())
# The field may have been already created at layer creation
iDstField = -1;
if outputFDefn is not None:
iDstField = outputFDefn.GetFieldIndex(oFieldDefn.GetNameRef())
if iDstField >= 0:
panMap[iField] = iDstField
elif outputLayer.CreateField( oFieldDefn ) == 0:
# now that we've created a field, GetLayerDefn() won't return NULL
if outputFDefn is None:
outputFDefn = outputLayer.GetLayerDefn()
panMap[iField] = nDstFieldCount
nDstFieldCount = nDstFieldCount + 1
# Transfer features
nFeaturesInTransaction = 0
iSrcZField = -1
inputLayer.ResetReading()
if nGroupTransactions > 0:
outputLayer.StartTransaction()
while True:
poDstFeature = None
if nFIDToFetch != ogr.NullFID:
# Only fetch feature on first pass.
if nFeaturesInTransaction == 0:
poFeature = inputLayer.GetFeature(nFIDToFetch)
else:
poFeature = None
else:
poFeature = inputLayer.GetNextFeature()
if poFeature is None:
break
nParts = 0
nIters = 1
for iPart in range(nIters):
nFeaturesInTransaction = nFeaturesInTransaction + 1
if nFeaturesInTransaction == nGroupTransactions:
outputLayer.CommitTransaction()
outputLayer.StartTransaction()
nFeaturesInTransaction = 0
poDstFeature = ogr.Feature(outputLayer.GetLayerDefn())
if poDstFeature.SetFromWithMap(poFeature, 1, panMap) != 0:
if nGroupTransactions > 0:
outputLayer.CommitTransaction()
s3_debug("Unable to translate feature %d from layer %s" % (poFeature.GetFID() , inputFDefn.GetName() ))
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
poDstGeometry = poDstFeature.GetGeometryRef()
if poDstGeometry is not None:
if nParts > 0:
# For -explodecollections, extract the iPart(th) of the geometry
poPart = poDstGeometry.GetGeometryRef(iPart).Clone()
poDstFeature.SetGeometryDirectly(poPart)
poDstGeometry = poPart
if outputLayer.CreateFeature( poDstFeature ) != 0 and not bSkipFailures:
if nGroupTransactions > 0:
outputLayer.RollbackTransaction()
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
if nGroupTransactions > 0:
outputLayer.CommitTransaction()
# Cleanup
outputDS.Destroy()
inputDS.Destroy()
fileName = "%s.csv" % layerName
filePath = os.path.join("CSV", fileName)
os.rename(filePath, fileName)
os.removedirs("CSV")
# Use OGR to read SHP for geometry
s3_debug("Opening %s.shp" % layerName)
ds = ogr.Open( "%s.shp" % layerName )
if ds is None:
s3_debug("Open failed.\n")
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
# Use CSV for Name
s3_debug("Opening %s.csv" % layerName)
rows = latin_dict_reader(open("%s.csv" % layerName))
nameField = layer["namefield"]
sourceCodeField = layer["sourceCodeField"]
edenCodeField = layer["edenCodeField"]
parentSourceCodeField = layer["parentSourceCodeField"]
parentLevel = layer["parent"]
parentEdenCodeField = layer["parentEdenCodeField"]
parentCodeQuery = (ttable.tag == parentEdenCodeField)
count = 0
for row in rows:
# Read Attributes
feat = lyr[count]
parentCode = feat.GetField(parentSourceCodeField)
query = (table.level == parentLevel) & \
parentCodeQuery & \
(ttable.value == parentCode)
parent = db(query).select(table.id,
ttable.value,
limitby=(0, 1),
cache=cache).first()
if not parent:
# Skip locations for which we don't have a valid parent
s3_debug("Skipping - cannot find parent with key: %s, value: %s" % (parentEdenCodeField, parentCode))
count += 1
continue
if countries:
# Skip the countries which we're not interested in
if level == "L1":
if parent["gis_location_tag"].value not in countries:
#s3_debug("Skipping %s as not in countries list" % parent["gis_location_tag"].value)
count += 1
continue
else:
# Check grandparent
country = self.get_parent_country(parent.id, key_type="code")
if country not in countries:
count += 1
continue
# This is got from CSV in order to be able to handle the encoding
name = row.pop(nameField)
name.encode("utf8")
code = feat.GetField(sourceCodeField)
area = feat.GetField("Shape_Area")
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
lat = geom.GetX()
lon = geom.GetY()
id = table.insert(name=name,
level=level,
gis_feature_type=1,
lat=lat,
lon=lon,
parent=parent.id)
ttable.insert(location_id = id,
tag = edenCodeField,
value = code)
# ttable.insert(location_id = id,
# tag = "area",
# value = area)
else:
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
id = table.insert(name=name,
level=level,
gis_feature_type=gis_feature_type,
wkt=wkt,
parent=parent.id)
ttable.insert(location_id = id,
tag = edenCodeField,
value = code)
# ttable.insert(location_id = id,
# tag = "area",
# value = area)
else:
s3_debug("No geometry\n")
count += 1
# Close the shapefile
ds.Destroy()
db.commit()
s3_debug("Updating Location Tree...")
try:
self.update_location_tree()
except MemoryError:
# If doing all L2s, it can break memory limits
# @ToDo: Check now that we're doing by level
s3_debug("Memory error when trying to update_location_tree()!")
db.commit()
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
# -------------------------------------------------------------------------
@staticmethod
def import_gadm2(ogr, level="L0", countries=[]):
"""
Import Admin Boundaries into the Locations table from GADMv2
- designed to be called from import_admin_areas()
- assumes that basic prepop has been done, so that no new L0 records need to be created
@param ogr - The OGR Python module
@param level - The OGR Python module
@param countries - List of ISO2 countrycodes to download data for
defaults to all countries
@ToDo: Complete this
- not currently possible to get all data from the 1 file easily
- no ISO2
- needs updating for gis_location_tag model
- only the lowest available levels accessible
- use GADMv1 for L0, L1, L2 & GADMv2 for specific lower?
"""
if level == "L0":
codeField = "ISO2" # This field is used to uniquely identify the L0 for updates
code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
elif level == "L1":
nameField = "NAME_1"
codeField = "ID_1" # This field is used to uniquely identify the L1 for updates
code2Field = "ISO" # This field is used to uniquely identify the L0 for parenting the L1s
parent = "L0"
parentCode = "code2"
elif level == "L2":
nameField = "NAME_2"
codeField = "ID_2" # This field is used to uniquely identify the L2 for updates
code2Field = "ID_1" # This field is used to uniquely identify the L1 for parenting the L2s
parent = "L1"
parentCode = "code"
else:
s3_debug("Level %s not supported!" % level)
return
db = current.db
s3db = current.s3db
table = s3db.gis_location
url = "http://gadm.org/data2/gadm_v2_shp.zip"
zipfile = "gadm_v2_shp.zip"
shapefile = "gadm2"
# Copy the current working directory to revert back to later
old_working_directory = os.getcwd()
# Create the working directory
if os.path.exists(os.path.join(os.getcwd(), "temp")): # use web2py/temp/GADMv2 as a cache
TEMP = os.path.join(os.getcwd(), "temp")
else:
import tempfile
TEMP = tempfile.gettempdir()
tempPath = os.path.join(TEMP, "GADMv2")
try:
os.mkdir(tempPath)
except OSError:
# Folder already exists - reuse
pass
# Set the current working directory
os.chdir(tempPath)
layerName = shapefile
# Check if file has already been downloaded
fileName = zipfile
if not os.path.isfile(fileName):
# Download the file
from gluon.tools import fetch
s3_debug("Downloading %s" % url)
try:
file = fetch(url)
except urllib2.URLError, exception:
s3_debug(exception)
return
fp = StringIO(file)
else:
s3_debug("Using existing file %s" % fileName)
fp = open(fileName)
# Unzip it
s3_debug("Unzipping %s" % layerName)
import zipfile
myfile = zipfile.ZipFile(fp)
for ext in ["dbf", "prj", "sbn", "sbx", "shp", "shx"]:
fileName = "%s.%s" % (layerName, ext)
file = myfile.read(fileName)
f = open(fileName, "w")
f.write(file)
f.close()
myfile.close()
# Use OGR to read Shapefile
s3_debug("Opening %s.shp" % layerName)
ds = ogr.Open("%s.shp" % layerName)
if ds is None:
s3_debug("Open failed.\n")
return
lyr = ds.GetLayerByName(layerName)
lyr.ResetReading()
for feat in lyr:
code = feat.GetField(codeField)
if not code:
# Skip the entries which aren't countries
continue
if countries and code not in countries:
# Skip the countries which we're not interested in
continue
geom = feat.GetGeometryRef()
if geom is not None:
if geom.GetGeometryType() == ogr.wkbPoint:
pass
else:
## FIXME
##query = (table.code == code)
wkt = geom.ExportToWkt()
if wkt.startswith("LINESTRING"):
gis_feature_type = 2
elif wkt.startswith("POLYGON"):
gis_feature_type = 3
elif wkt.startswith("MULTIPOINT"):
gis_feature_type = 4
elif wkt.startswith("MULTILINESTRING"):
gis_feature_type = 5
elif wkt.startswith("MULTIPOLYGON"):
gis_feature_type = 6
elif wkt.startswith("GEOMETRYCOLLECTION"):
gis_feature_type = 7
code2 = feat.GetField(code2Field)
area = feat.GetField("Shape_Area")
try:
## FIXME
db(query).update(gis_feature_type=gis_feature_type,
wkt=wkt)
#code2=code2,
#area=area
except db._adapter.driver.OperationalError, exception:
s3_debug(exception)
else:
s3_debug("No geometry\n")
# Close the shapefile
ds.Destroy()
db.commit()
# Revert back to the working directory as before.
os.chdir(old_working_directory)
return
# -------------------------------------------------------------------------
def import_geonames(self, country, level=None):
"""
Import Locations from the Geonames database
@param country: the 2-letter country code
@param level: the ADM level to import
Designed to be run from the CLI
Levels should be imported sequentially.
It is assumed that L0 exists in the DB already
L1-L3 may have been imported from Shapefiles with Polygon info
Geonames can then be used to populate the lower levels of hierarchy
"""
import codecs
from shapely.geometry import point
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
db = current.db
s3db = current.s3db
cache = s3db.cache
request = current.request
settings = current.deployment_settings
table = s3db.gis_location
ttable = s3db.gis_location_tag
url = "http://download.geonames.org/export/dump/" + country + ".zip"
cachepath = os.path.join(request.folder, "cache")
filename = country + ".txt"
filepath = os.path.join(cachepath, filename)
if os.access(filepath, os.R_OK):
cached = True
else:
cached = False
if not os.access(cachepath, os.W_OK):
s3_debug("Folder not writable", cachepath)
return
if not cached:
# Download File
from gluon.tools import fetch
try:
f = fetch(url)
except (urllib2.URLError,):
e = sys.exc_info()[1]
s3_debug("URL Error", e)
return
except (urllib2.HTTPError,):
e = sys.exc_info()[1]
s3_debug("HTTP Error", e)
return
# Unzip File
if f[:2] == "PK":
# Unzip
fp = StringIO(f)
import zipfile
myfile = zipfile.ZipFile(fp)
try:
# Python 2.6+ only :/
# For now, 2.5 users need to download/unzip manually to cache folder
myfile.extract(filename, cachepath)
myfile.close()
except IOError:
s3_debug("Zipfile contents don't seem correct!")
myfile.close()
return
f = codecs.open(filepath, encoding="utf-8")
# Downloaded file is worth keeping
#os.remove(filepath)
if level == "L1":
fc = "ADM1"
parent_level = "L0"
elif level == "L2":
fc = "ADM2"
parent_level = "L1"
elif level == "L3":
fc = "ADM3"
parent_level = "L2"
elif level == "L4":
fc = "ADM4"
parent_level = "L3"
else:
# 5 levels of hierarchy or 4?
# @ToDo make more extensible still
gis_location_hierarchy = self.get_location_hierarchy()
try:
label = gis_location_hierarchy["L5"]
level = "L5"
parent_level = "L4"
except:
# ADM4 data in Geonames isn't always good (e.g. PK bad)
level = "L4"
parent_level = "L3"
finally:
fc = "PPL"
deleted = (table.deleted == False)
query = deleted & (table.level == parent_level)
# Do the DB query once (outside loop)
all_parents = db(query).select(table.wkt,
table.lon_min,
table.lon_max,
table.lat_min,
table.lat_max,
table.id)
if not all_parents:
# No locations in the parent level found
# - use the one higher instead
parent_level = "L" + str(int(parent_level[1:]) + 1)
query = deleted & (table.level == parent_level)
all_parents = db(query).select(table.wkt,
table.lon_min,
table.lon_max,
table.lat_min,
table.lat_max,
table.id)
# Parse File
current_row = 0
for line in f:
current_row += 1
# Format of file: http://download.geonames.org/export/dump/readme.txt
geonameid,
name,
asciiname,
alternatenames,
lat,
lon,
feature_class,
feature_code,
country_code,
cc2,
admin1_code,
admin2_code,
admin3_code,
admin4_code,
population,
elevation,
gtopo30,
timezone,
modification_date = line.split("\t")
if feature_code == fc:
# Add WKT
lat = float(lat)
lon = float(lon)
wkt = self.latlon_to_wkt(lat, lon)
shape = point.Point(lon, lat)
# Add Bounds
lon_min = lon_max = lon
lat_min = lat_max = lat
# Locate Parent
parent = ""
# 1st check for Parents whose bounds include this location (faster)
def in_bbox(row):
return (row.lon_min < lon_min) & \
(row.lon_max > lon_max) & \
(row.lat_min < lat_min) & \
(row.lat_max > lat_max)
for row in all_parents.find(lambda row: in_bbox(row)):
# Search within this subset with a full geometry check
# Uses Shapely.
# @ToDo provide option to use PostGIS/Spatialite
try:
parent_shape = wkt_loads(row.wkt)
if parent_shape.intersects(shape):
parent = row.id
# Should be just a single parent
break
except ReadingError:
s3_debug("Error reading wkt of location with id", row.id)
# Add entry to database
new_id = table.insert(name=name,
level=level,
parent=parent,
lat=lat,
lon=lon,
wkt=wkt,
lon_min=lon_min,
lon_max=lon_max,
lat_min=lat_min,
lat_max=lat_max)
ttable.insert(location_id=new_id,
tag="geonames",
value=geonames_id)
else:
continue
s3_debug("All done!")
return
# -------------------------------------------------------------------------
@staticmethod
def latlon_to_wkt(lat, lon):
"""
Convert a LatLon to a WKT string
>>> s3gis.latlon_to_wkt(6, 80)
'POINT(80 6)'
"""
WKT = "POINT(%f %f)" % (lon, lat)
return WKT
# -------------------------------------------------------------------------
@staticmethod
def parse_location(wkt, lon=None, lat=None):
"""
Parses a location from wkt, returning wkt, lat, lon, bounding box and type.
For points, wkt may be None if lat and lon are provided; wkt will be generated.
For lines and polygons, the lat, lon returned represent the shape's centroid.
Centroid and bounding box will be None if Shapely is not available.
"""
if not wkt:
if not lon is not None and lat is not None:
raise RuntimeError, "Need wkt or lon+lat to parse a location"
wkt = "POINT(%f %f)" % (lon, lat)
geom_type = GEOM_TYPES["point"]
bbox = (lon, lat, lon, lat)
else:
try:
from shapely.wkt import loads as wkt_loads
SHAPELY = True
except:
SHAPELY = False
if SHAPELY:
shape = wkt_loads(wkt)
centroid = shape.centroid
lat = centroid.y
lon = centroid.x
geom_type = GEOM_TYPES[shape.type.lower()]
bbox = shape.bounds
else:
lat = None
lon = None
geom_type = GEOM_TYPES[wkt.split("(")[0].lower()]
bbox = None
res = {"wkt": wkt, "lat": lat, "lon": lon, "gis_feature_type": geom_type}
if bbox:
res["lon_min"], res["lat_min"], res["lon_max"], res["lat_max"] = bbox
return res
# -------------------------------------------------------------------------
def update_location_tree(self, feature=None):
"""
Update GIS Locations' Materialized path, Lx locations & Lat/Lon
@param feature: a feature dict to update the tree for
- if not provided then update the whole tree
returns the path of the feature
Called onaccept for locations (async, where-possible)
"""
if not feature:
# Do the whole database
# Do in chunks to save memory and also do in correct order
db = current.db
table = db.gis_location
fields = [table.id, table.name, table.gis_feature_type,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.wkt, table.inherited,
table.path, table.parent]
update_location_tree = self.update_location_tree
wkt_centroid = self.wkt_centroid
for level in ["L0", "L1", "L2", "L3", "L4", "L5", None]:
features = db(table.level == level).select(*fields)
for feature in features:
feature["level"] = level
update_location_tree(feature)
# Also do the Bounds/Centroid/WKT
form = Storage()
form.vars = feature
form.errors = Storage()
wkt_centroid(form)
_vars = form.vars
if "lat_max" in _vars:
db(table.id == feature.id).update(gis_feature_type = _vars.gis_feature_type,
lat = _vars.lat,
lon = _vars.lon,
wkt = _vars.wkt,
lat_max = _vars.lat_max,
lat_min = _vars.lat_min,
lon_min = _vars.lon_min,
lon_max = _vars.lon_max)
return
id = "id" in feature and str(feature["id"])
if not id:
# Nothing we can do
raise ValueError
# L0
db = current.db
table = db.gis_location
name = feature.get("name", False)
level = feature.get("level", False)
path = feature.get("path", False)
L0 = feature.get("L0", False)
if level == "L0":
if name:
if path == id and L0 == name:
# No action required
return path
else:
db(table.id == id).update(L0=name,
path=id)
else:
# Look this up
feature = db(table.id == id).select(table.name,
table.path,
table.L0,
limitby=(0, 1)).first()
if feature:
name = feature["name"]
path = feature["path"]
L0 = feature["L0"]
if path == id and L0 == name:
# No action required
return path
else:
db(table.id == id).update(L0=name,
path=id)
return id
# L1
parent = feature.get("parent", False)
L1 = feature.get("L1", False)
lat = feature.get("lat", False)
lon = feature.get("lon", False)
inherited = feature.get("inherited", None)
if level == "L1":
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False:
# Get the whole feature
feature = db(table.id == id).select(table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
if parent:
_path = "%s/%s" % (parent, id)
_L0 = db(table.id == parent).select(table.name,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = _L0.name
L0_lat = _L0.lat
L0_lon = _L0.lon
else:
_path = id
L0_name = None
L0_lat = None
L0_lon = None
if path == _path and L1 == name and L0 == L0_name:
if inherited and lat == L0_lat and lon == L0_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
db(table.id == id).update(inherited=True,
lat=L0_lat,
lon=L0_lon)
elif inherited and lat == L0_lat and lon == L0_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=name)
return _path
elif inherited or lat is None or lon is None:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=name,
inherited=True,
lat=L0_lat,
lon=L0_lon)
else:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=name)
# Ensure that any locations which inherit their latlon from this one get updated
query = (table.parent == id) and \
(table.inherited == True)
fields = [table.id, table.name, table.path, table.parent,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.inherited]
rows = db(query).select(*fields)
for row in rows:
self.update_location_tree(row)
return _path
# L2
L2 = feature.get("L2", False)
if level == "L2":
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False or \
L2 is False:
# Get the whole feature
feature = db(table.id == id).select(table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
table.L2,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
if parent:
Lx = db(table.id == parent).select(table.name,
table.level,
table.parent,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
if Lx.level == "L1":
L1_name = Lx.name
_parent = Lx.parent
if _parent:
_path = "%s/%s/%s" % (_parent, parent, id)
L0_name = db(table.id == _parent).select(table.name,
limitby=(0, 1),
cache=current.s3db.cache).first().name
else:
_path = "%s/%s" % (parent, id)
L0_name = None
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
else:
raise ValueError
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
Lx_lat = None
Lx_lon = None
if path == _path and L2 == name and L0 == L0_name and \
L1 == L1_name:
if inherited and lat == Lx_lat and lon == Lx_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
db(table.id == id).update(inherited=True,
lat=Lx_lat,
lon=Lx_lon)
elif inherited and lat == Lx_lat and lon == Lx_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=name,
)
return _path
elif inherited or lat is None or lon is None:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=name,
inherited=True,
lat=Lx_lat,
lon=Lx_lon)
else:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=name)
# Ensure that any locations which inherit their latlon from this one get updated
query = (table.parent == id) and \
(table.inherited == True)
fields = [table.id, table.name, table.path, table.parent,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.inherited]
rows = db(query).select(*fields)
for row in rows:
self.update_location_tree(row)
return _path
# L3
L3 = feature.get("L3", False)
if level == "L3":
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False or \
L2 is False or L3 is False:
# Get the whole feature
feature = db(table.id == id).select(table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
table.L2,
table.L3,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.L0,
table.L1,
table.path,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
if Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = self.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = self.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
else:
raise ValueError
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
Lx_lat = None
Lx_lon = None
if path == _path and L3 == name and L0 == L0_name and \
L1 == L1_name and L2 == L2_name:
if inherited and lat == Lx_lat and lon == Lx_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
db(table.id == id).update(inherited=True,
lat=Lx_lat,
lon=Lx_lon)
elif inherited and lat == Lx_lat and lon == Lx_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=name,
)
return _path
elif inherited or lat is None or lon is None:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=name,
inherited=True,
lat=Lx_lat,
lon=Lx_lon)
else:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=name)
# Ensure that any locations which inherit their latlon from this one get updated
query = (table.parent == id) and \
(table.inherited == True)
fields = [table.id, table.name, table.path, table.parent,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.inherited]
rows = db(query).select(*fields)
for row in rows:
self.update_location_tree(row)
return _path
# L4
L4 = feature.get("L4", False)
if level == "L4":
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False or \
L2 is False or L3 is False or \
L4 is False:
# Get the whole feature
feature = db(table.id == id).select(table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.L0,
table.L1,
table.L2,
table.path,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
if Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = self.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
L3_name = None
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = self.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
L3_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = self.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
L3_name = None
else:
raise ValueError
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
L3_name = None
Lx_lat = None
Lx_lon = None
if path == _path and L4 == name and L0 == L0_name and \
L1 == L1_name and L2 == L2_name and \
L3 == L3_name:
if inherited and lat == Lx_lat and lon == Lx_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
db(table.id == id).update(inherited=True,
lat=Lx_lat,
lon=Lx_lon)
elif inherited and lat == Lx_lat and lon == Lx_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=name,
)
return _path
elif inherited or lat is None or lon is None:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=name,
inherited=True,
lat=Lx_lat,
lon=Lx_lon)
else:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=name)
# Ensure that any locations which inherit their latlon from this one get updated
query = (table.parent == id) and \
(table.inherited == True)
fields = [table.id, table.name, table.path, table.parent,
table.L0, table.L1, table.L2, table.L3, table.L4,
table.lat, table.lon, table.inherited]
rows = db(query).select(*fields)
for row in rows:
self.update_location_tree(row)
return _path
# @ToDo: L5
# Specific Location
# - or unspecified (which we should avoid happening)
if name is False or lat is False or lon is False or inherited is None or \
parent is False or path is False or L0 is False or L1 is False or \
L2 is False or L3 is False or \
L4 is False:
# Get the whole feature
feature = db(table.id == id).select(table.name,
table.parent,
table.path,
table.lat,
table.lon,
table.inherited,
table.L0,
table.L1,
table.L2,
table.L3,
table.L4,
limitby=(0, 1)).first()
name = feature.name
parent = feature.parent
path = feature.path
lat = feature.lat
lon = feature.lon
inherited = feature.inherited
L0 = feature.L0
L1 = feature.L1
L2 = feature.L2
L3 = feature.L3
L4 = feature.L4
if parent:
Lx = db(table.id == parent).select(table.id,
table.name,
table.level,
table.L0,
table.L1,
table.L2,
table.L3,
table.path,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
if Lx.level == "L4":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
L4_name = Lx.name
_path = Lx.path
if _path and L0_name and L1_name and L2_name and L3_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = self.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.L3,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.L3
elif Lx.level == "L3":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
L3_name = Lx.name
L4_name = None
_path = Lx.path
if _path and L0_name and L1_name and L2_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = self.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.L2,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.L2
elif Lx.level == "L2":
L0_name = Lx.L0
L1_name = Lx.L1
L2_name = Lx.name
L3_name = None
L4_name = None
_path = Lx.path
if _path and L0_name and L1_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = self.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.L1,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = Lx.L0
L1_name = Lx.L1
elif Lx.level == "L1":
L0_name = Lx.L0
L1_name = Lx.name
L2_name = None
L3_name = None
L4_name = None
_path = Lx.path
if _path and L0_name:
_path = "%s/%s" % (_path, id)
else:
# This feature needs to be updated
_path = self.update_location_tree(Lx)
_path = "%s/%s" % (_path, id)
# Query again
Lx = db(table.id == parent).select(table.L0,
table.lat,
table.lon,
limitby=(0, 1),
cache=current.s3db.cache).first()
L0_name = Lx.L0
elif Lx.level == "L0":
_path = "%s/%s" % (parent, id)
L0_name = Lx.name
L1_name = None
L2_name = None
L3_name = None
L4_name = None
else:
raise ValueError
Lx_lat = Lx.lat
Lx_lon = Lx.lon
else:
_path = id
L0_name = None
L1_name = None
L2_name = None
L3_name = None
L4_name = None
Lx_lat = None
Lx_lon = None
if path == _path and L0 == L0_name and \
L1 == L1_name and L2 == L2_name and \
L3 == L3_name and L4 == L4_name:
if inherited and lat == Lx_lat and lon == Lx_lon:
# No action required
return path
elif inherited or lat is None or lon is None:
db(table.id == id).update(inherited=True,
lat=Lx_lat,
lon=Lx_lon)
elif inherited and lat == Lx_lat and lon == Lx_lon:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=L4_name,
)
elif inherited or lat is None or lon is None:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=L4_name,
inherited=True,
lat=Lx_lat,
lon=Lx_lon)
else:
db(table.id == id).update(path=_path,
L0=L0_name,
L1=L1_name,
L2=L2_name,
L3=L3_name,
L4=L4_name)
return _path
# -------------------------------------------------------------------------
@staticmethod
def wkt_centroid(form):
"""
OnValidation callback:
If a WKT is defined: validate the format,
calculate the LonLat of the Centroid, and set bounds
Else if a LonLat is defined: calculate the WKT for the Point.
Uses Shapely.
@ToDo: provide an option to use PostGIS/Spatialite
"""
messages = current.messages
vars = form.vars
if vars.gis_feature_type == "1":
# Point
if (vars.lon is None and vars.lat is None) or \
(vars.lon == "" and vars.lat == ""):
# No Geometry available
# Don't clobber existing records (e.g. in Prepop)
#vars.gis_feature_type = "0"
# Cannot create WKT, so Skip
return
elif vars.lat is None or vars.lat == "":
form.errors["lat"] = messages.lat_empty
elif vars.lon is None or vars.lon == "":
form.errors["lon"] = messages.lon_empty
else:
vars.wkt = "POINT(%(lon)s %(lat)s)" % vars
if "lon_min" not in vars or vars.lon_min is None:
vars.lon_min = vars.lon
if "lon_max" not in vars or vars.lon_max is None:
vars.lon_max = vars.lon
if "lat_min" not in vars or vars.lat_min is None:
vars.lat_min = vars.lat
if "lat_max" not in vars or vars.lat_max is None:
vars.lat_max = vars.lat
elif vars.wkt:
# Parse WKT for LineString, Polygon, etc
from shapely.wkt import loads as wkt_loads
try:
shape = wkt_loads(vars.wkt)
except:
try:
# Perhaps this is really a LINESTRING (e.g. OSM import of an unclosed Way)
linestring = "LINESTRING%s" % vars.wkt[8:-1]
shape = wkt_loads(linestring)
vars.wkt = linestring
except:
form.errors["wkt"] = messages.invalid_wkt
return
gis_feature_type = shape.type
if gis_feature_type == "Point":
vars.gis_feature_type = 1
elif gis_feature_type == "LineString":
vars.gis_feature_type = 2
elif gis_feature_type == "Polygon":
vars.gis_feature_type = 3
elif gis_feature_type == "MultiPoint":
vars.gis_feature_type = 4
elif gis_feature_type == "MultiLineString":
vars.gis_feature_type = 5
elif gis_feature_type == "MultiPolygon":
vars.gis_feature_type = 6
elif gis_feature_type == "GeometryCollection":
vars.gis_feature_type = 7
try:
centroid_point = shape.centroid
vars.lon = centroid_point.x
vars.lat = centroid_point.y
bounds = shape.bounds
vars.lon_min = bounds[0]
vars.lat_min = bounds[1]
vars.lon_max = bounds[2]
vars.lat_max = bounds[3]
except:
form.errors.gis_feature_type = messages.centroid_error
if current.deployment_settings.get_gis_spatialdb():
# Also populate the spatial field
vars.the_geom = vars.wkt
elif (vars.lon is None and vars.lat is None) or \
(vars.lon == "" and vars.lat == ""):
# No Geometry available
# Don't clobber existing records (e.g. in Prepop)
#vars.gis_feature_type = "0"
# Cannot create WKT, so Skip
return
else:
# Point
vars.gis_feature_type = "1"
if vars.lat is None or vars.lat == "":
form.errors["lat"] = messages.lat_empty
elif vars.lon is None or vars.lon == "":
form.errors["lon"] = messages.lon_empty
else:
vars.wkt = "POINT(%(lon)s %(lat)s)" % vars
if "lon_min" not in vars or vars.lon_min is None:
vars.lon_min = vars.lon
if "lon_max" not in vars or vars.lon_max is None:
vars.lon_max = vars.lon
if "lat_min" not in vars or vars.lat_min is None:
vars.lat_min = vars.lat
if "lat_max" not in vars or vars.lat_max is None:
vars.lat_max = vars.lat
return
# -------------------------------------------------------------------------
@staticmethod
def query_features_by_bbox(lon_min, lat_min, lon_max, lat_max):
"""
Returns a query of all Locations inside the given bounding box
"""
table = current.s3db.gis_location
query = (table.lat_min <= lat_max) & \
(table.lat_max >= lat_min) & \
(table.lon_min <= lon_max) & \
(table.lon_max >= lon_min)
return query
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_bbox(lon_min, lat_min, lon_max, lat_max):
"""
Returns Rows of Locations whose shape intersects the given bbox.
"""
query = current.gis.query_features_by_bbox(lon_min,
lat_min,
lon_max,
lat_max)
return current.db(query).select()
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_shape(shape):
"""
Returns Rows of locations which intersect the given shape.
Relies on Shapely for wkt parsing and intersection.
@ToDo: provide an option to use PostGIS/Spatialite
"""
from shapely.geos import ReadingError
from shapely.wkt import loads as wkt_loads
table = current.s3db.gis_location
in_bbox = current.gis.query_features_by_bbox(*shape.bounds)
has_wkt = (table.wkt != None) & (table.wkt != "")
for loc in current.db(in_bbox & has_wkt).select():
try:
location_shape = wkt_loads(loc.wkt)
if location_shape.intersects(shape):
yield loc
except ReadingError:
s3_debug("Error reading wkt of location with id", loc.id)
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_latlon(lat, lon):
"""
Returns a generator of locations whose shape intersects the given LatLon.
Relies on Shapely.
@todo: provide an option to use PostGIS/Spatialite
"""
from shapely.geometry import point
return current.gis.get_features_by_shape(point.Point(lon, lat))
# -------------------------------------------------------------------------
@staticmethod
def get_features_by_feature(feature):
"""
Returns all Locations whose geometry intersects the given feature.
Relies on Shapely.
@ToDo: provide an option to use PostGIS/Spatialite
"""
from shapely.wkt import loads as wkt_loads
shape = wkt_loads(feature.wkt)
return current.gis.get_features_by_shape(shape)
# -------------------------------------------------------------------------
@staticmethod
def set_all_bounds():
"""
Sets bounds for all locations without them.
If shapely is present, and a location has wkt, bounds of the geometry
are used. Otherwise, the (lat, lon) are used as bounds.
"""
try:
from shapely.wkt import loads as wkt_loads
SHAPELY = True
except:
SHAPELY = False
db = current.db
table = current.s3db.gis_location
# Query to find all locations without bounds set
no_bounds = (table.lon_min == None) & \
(table.lat_min == None) & \
(table.lon_max == None) & \
(table.lat_max == None) & \
(table.lat != None) & \
(table.lon != None)
if SHAPELY:
# Refine to those locations with a WKT field
wkt_no_bounds = no_bounds & (table.wkt != None) & (table.wkt != "")
for location in db(wkt_no_bounds).select(table.wkt):
try :
shape = wkt_loads(location.wkt)
except:
s3_debug("Error reading WKT", location.wkt)
continue
bounds = shape.bounds
table[location.id] = dict(
lon_min = bounds[0],
lat_min = bounds[1],
lon_max = bounds[2],
lat_max = bounds[3],
)
# Anything left, we assume is a Point, so set the bounds to be the same
db(no_bounds).update(lon_min=table.lon,
lat_min=table.lat,
lon_max=table.lon,
lat_max=table.lat)
# -------------------------------------------------------------------------
@staticmethod
def simplify(wkt, tolerance=0.001, preserve_topology=True, output="wkt"):
"""
Simplify a complex Polygon
- NB This uses Python, better performance will be gained by doing
this direct from the database if you are using PostGIS:
ST_Simplify() is available as
db(query).select(table.the_geom.st_simplify(tolerance).st_astext().with_alias('wkt')).first().wkt
db(query).select(table.the_geom.st_simplify(tolerance).st_asgeojson().with_alias('geojson')).first().geojson
@ToDo: Reduce the number of decimal points to 4
- requires patching modules/geojson?
"""
from shapely.wkt import loads as wkt_loads
try:
# Enable C-based speedups available from 1.2.10+
from shapely import speedups
speedups.enable()
except:
s3_debug("S3GIS", "Upgrade Shapely for Performance enhancements")
try:
shape = wkt_loads(wkt)
except:
return None
simplified = shape.simplify(tolerance, preserve_topology)
if output == "wkt":
output = simplified.to_wkt()
elif output == "geojson":
from ..geojson import dumps
# Compact Encoding
output = dumps(simplified, separators=(",", ":"))
return output
# -------------------------------------------------------------------------
def show_map( self,
height = None,
width = None,
bbox = {},
lat = None,
lon = None,
zoom = None,
projection = None,
add_feature = False,
add_feature_active = False,
add_polygon = False,
add_polygon_active = False,
features = [],
feature_queries = [],
feature_resources = [],
wms_browser = {},
catalogue_layers = False,
legend = False,
toolbar = False,
search = False,
googleEarth = False,
googleStreetview = False,
mouse_position = "normal",
print_tool = {},
mgrs = {},
window = False,
window_hide = False,
closable = True,
maximizable = True,
collapsed = False,
location_selector = False,
plugins = None,
):
"""
Returns the HTML to display a map
Normally called in the controller as: map = gis.show_map()
In the view, put: {{=XML(map)}}
@param height: Height of viewport (if not provided then the default deployment setting is used)
@param width: Width of viewport (if not provided then the default deployment setting is used)
@param bbox: default Bounding Box of viewport (if not provided then the Lat/Lon/Zoom are used) (Dict):
{
"max_lat" : float,
"max_lon" : float,
"min_lat" : float,
"min_lon" : float
}
@param lat: default Latitude of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param lon: default Longitude of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param zoom: default Zoom level of viewport (if not provided then the default setting from the Map Service Catalogue is used)
@param projection: EPSG code for the Projection to use (if not provided then the default setting from the Map Service Catalogue is used)
@param add_feature: Whether to include a DrawFeature control to allow adding a marker to the map
@param add_feature_active: Whether the DrawFeature control should be active by default
@param add_polygon: Whether to include a DrawFeature control to allow drawing a polygon over the map
@param add_polygon_active: Whether the DrawFeature control should be active by default
@param features: Simple Features to overlay on Map (no control over appearance & not interactive)
[{
"lat": lat,
"lon": lon
}]
@param feature_queries: Feature Queries to overlay onto the map & their options (List of Dicts):
[{
"name" : T("MyLabel"), # A string: the label for the layer
"query" : query, # A gluon.sql.Rows of gis_locations, which can be from a simple query or a Join.
# Extra fields can be added for 'popup_url', 'popup_label' & either
# 'marker' (url/height/width) or 'shape' (with optional 'colour' & 'size')
"active" : True, # Is the feed displayed upon load or needs ticking to load afterwards?
"marker" : None, # Optional: A per-Layer marker query or marker_id for the icon used to display the feature
"opacity" : 1, # Optional
"cluster_distance", # Optional
"cluster_threshold" # Optional
}]
@param feature_resources: REST URLs for (filtered) resources to overlay onto the map & their options (List of Dicts):
[{
"name" : T("MyLabel"), # A string: the label for the layer
"id" : "search", # A string: the id for the layer (for manipulation by JavaScript)
"url" : "/eden/module/resource.geojson?filter", # A URL to load the resource
"active" : True, # Is the feed displayed upon load or needs ticking to load afterwards?
"marker" : None, # Optional: A per-Layer marker dict for the icon used to display the feature
"opacity" : 1, # Optional
"cluster_distance", # Optional
"cluster_threshold" # Optional
}]
@param wms_browser: WMS Server's GetCapabilities & options (dict)
{
"name": T("MyLabel"), # Name for the Folder in LayerTree
"url": string # URL of GetCapabilities
}
@param catalogue_layers: Show all the enabled Layers from the GIS Catalogue
Defaults to False: Just show the default Base layer
@param legend: Show the Legend panel
@param toolbar: Show the Icon Toolbar of Controls
@param search: Show the Geonames search box
@param googleEarth: Include a Google Earth Panel
@param googleStreetview: Include the ability to click to open up StreetView in a popup at that location
@param mouse_position: Show the current coordinates in the bottom-right of the map. 3 Options: 'normal' (default), 'mgrs' (MGRS), False (off)
@param print_tool: Show a print utility (NB This requires server-side support: http://eden.sahanafoundation.org/wiki/BluePrintGISPrinting)
{
"url": string, # URL of print service (e.g. http://localhost:8080/geoserver/pdf/)
"mapTitle": string, # Title for the Printed Map (optional)
"subTitle": string # subTitle for the Printed Map (optional)
}
@param mgrs: Use the MGRS Control to select PDFs
{
"name": string, # Name for the Control
"url": string # URL of PDF server
}
@ToDo: Also add MGRS Search support: http://gxp.opengeo.org/master/examples/mgrs.html
@param window: Have viewport pop out of page into a resizable window
@param window_hide: Have the window hidden by default, ready to appear (e.g. on clicking a button)
@param closable: In Window mode, whether the window is closable or not
@param collapsed: Start the Tools panel (West region) collapsed
@param location_selector: This Map is being instantiated within the LocationSelectorWidget
@param plugins: an iterable of objects which support the following methods:
.addToMapWindow(items)
.setup(map)
"""
request = current.request
response = current.response
if not response.warning:
response.warning = ""
s3 = response.s3
session = current.session
T = current.T
db = current.db
s3db = current.s3db
auth = current.auth
cache = s3db.cache
settings = current.deployment_settings
public_url = settings.get_base_public_url()
cachetable = s3db.gis_cache
MAP_ADMIN = auth.s3_has_role(session.s3.system_roles.MAP_ADMIN)
# Defaults
# Also in static/S3/s3.gis.js
# http://dev.openlayers.org/docs/files/OpenLayers/Strategy/Cluster-js.html
self.cluster_distance = 20 # pixels
self.cluster_threshold = 2 # minimum # of features to form a cluster
# Support bookmarks (such as from the control)
# - these over-ride the arguments
vars = request.vars
# Read configuration
config = GIS.get_config()
if height:
map_height = height
else:
map_height = settings.get_gis_map_height()
if width:
map_width = width
else:
map_width = settings.get_gis_map_width()
if (bbox
and (-90 < bbox["max_lat"] < 90)
and (-90 < bbox["min_lat"] < 90)
and (-180 < bbox["max_lon"] < 180)
and (-180 < bbox["min_lon"] < 180)
):
# We have sane Bounds provided, so we should use them
pass
else:
# No bounds or we've been passed bounds which aren't sane
bbox = None
# Use Lat/Lon to center instead
if "lat" in vars and vars.lat:
lat = float(vars.lat)
if lat is None or lat == "":
lat = config.lat
if "lon" in vars and vars.lon:
lon = float(vars.lon)
if lon is None or lon == "":
lon = config.lon
if "zoom" in request.vars:
zoom = int(vars.zoom)
if not zoom:
zoom = config.zoom
if not projection:
projection = config.epsg
if projection not in (900913, 4326):
# Test for Valid Projection file in Proj4JS library
projpath = os.path.join(
request.folder, "static", "scripts", "gis", "proj4js", \
"lib", "defs", "EPSG%s.js" % projection
)
try:
f = open(projpath, "r")
f.close()
except:
if projection:
response.warning = \
T("Map not available: Projection %(projection)s not supported - please add definition to %(path)s") % \
dict(projection = "'%s'" % projection,
path= "/static/scripts/gis/proj4js/lib/defs")
else:
response.warning = \
T("Map not available: No Projection configured")
return None
units = config.units
maxResolution = config.maxResolution
maxExtent = config.maxExtent
numZoomLevels = config.zoom_levels
marker_default = Storage(image = config.marker_image,
height = config.marker_height,
width = config.marker_width,
url = URL(c="static", f="img",
args=["markers", config.marker_image]))
markers = {}
#####
# CSS
#####
# All Loaded as-standard to avoid delays in page loading
######
# HTML
######
html = DIV(_id="map_wrapper")
html_append = html.append
# Map (Embedded not Window)
html_append(DIV(_id="map_panel"))
# Status Reports
html_append(TABLE(TR(
#TD(
# # Somewhere to report details of OSM File Features via on_feature_hover()
# DIV(_id="status_osm"),
# _style="border: 0px none ;", _valign="top",
#),
TD(
# Somewhere to report whether KML feed is using cached copy or completely inaccessible
DIV(_id="status_kml"),
# Somewhere to report if Files are not found
DIV(_id="status_files"),
_style="border: 0px none ;", _valign="top",
)
)))
#########
# Scripts
#########
# JS Loader
html_append(SCRIPT(_type="text/javascript",
_src=URL(c="static", f="scripts/yepnope.1.5.4-min.js")))
scripts = []
scripts_append = scripts.append
ready = ""
def add_javascript(script, ready=""):
if type(script) == SCRIPT:
if ready:
ready = """%s
%s""" % (ready, script)
else:
ready = script
elif script.startswith("http"):
scripts_append(script)
else:
script = URL(c="static", f=script)
scripts_append(script)
debug = s3.debug
if debug:
if projection not in (900913, 4326):
add_javascript("scripts/gis/proj4js/lib/proj4js-combined.js")
add_javascript("scripts/gis/proj4js/lib/defs/EPSG%s.js" % projection)
add_javascript("scripts/gis/openlayers/lib/OpenLayers.js")
add_javascript("scripts/gis/cdauth.js")
add_javascript("scripts/gis/osm_styles.js")
add_javascript("scripts/gis/GeoExt/lib/GeoExt.js")
add_javascript("scripts/gis/GeoExt/ux/GeoNamesSearchCombo.js")
add_javascript("scripts/gis/gxp/RowExpander.js")
add_javascript("scripts/gis/gxp/widgets/NewSourceWindow.js")
add_javascript("scripts/gis/gxp/plugins/LayerSource.js")
add_javascript("scripts/gis/gxp/plugins/WMSSource.js")
add_javascript("scripts/gis/gxp/plugins/Tool.js")
add_javascript("scripts/gis/gxp/plugins/AddLayers.js")
add_javascript("scripts/gis/gxp/plugins/RemoveLayer.js")
if mouse_position == "mgrs":
add_javascript("scripts/gis/usng2.js")
add_javascript("scripts/gis/MP.js")
pass
else:
if projection not in (900913, 4326):
add_javascript("scripts/gis/proj4js/lib/proj4js-compressed.js")
add_javascript("scripts/gis/proj4js/lib/defs/EPSG%s.js" % projection)
add_javascript("scripts/gis/OpenLayers.js")
add_javascript("scripts/gis/GeoExt.js")
if mouse_position == "mgrs":
add_javascript("scripts/gis/MGRS.min.js")
#######
# Tools
#######
# Toolbar
if toolbar:
toolbar = '''S3.gis.toolbar=true\n'''
else:
toolbar = ""
# @ToDo: Could we get this automatically?
if location_selector:
loc_select = '''S3.gis.loc_select=true\n'''
else:
loc_select = ""
# MGRS PDF Browser
if mgrs:
mgrs_name = '''S3.gis.mgrs_name='%s'\n''' % mgrs["name"]
mgrs_url = '''S3.gis.mgrs_url='%s'\n''' % mgrs["url"]
else:
mgrs_name = ""
mgrs_url = ""
# Legend panel
if legend:
legend = '''S3.i18n.gis_legend='%s'\n''' % T("Legend")
else:
legend = ""
# Draw Feature Controls
if add_feature:
if add_feature_active:
draw_feature = '''S3.gis.draw_feature='active'\n'''
else:
draw_feature = '''S3.gis.draw_feature='inactive'\n'''
else:
draw_feature = ""
if add_polygon:
if add_polygon_active:
draw_polygon = '''S3.gis.draw_polygon='active'\n'''
else:
draw_polygon = '''S3.gis.draw_polygon='inactive'\n'''
else:
draw_polygon = ""
authenticated = ""
config_id = ""
if auth.is_logged_in():
authenticated = '''S3.auth=true\n'''
if MAP_ADMIN or \
(config.pe_id == auth.user.pe_id):
# Personal config or MapAdmin, so enable Save Button for Updates
config_id = '''S3.gis.config_id=%i\n''' % config.id
# Upload Layer
if settings.get_gis_geoserver_password():
upload_layer = '''S3.i18n.gis_uploadlayer='Upload Shapefile'\n'''
add_javascript("scripts/gis/gxp/FileUploadField.js")
add_javascript("scripts/gis/gxp/widgets/LayerUploadPanel.js")
else:
upload_layer = ""
# Layer Properties
layer_properties = '''S3.i18n.gis_properties='Layer Properties'\n'''
# Search
if search:
search = '''S3.i18n.gis_search='%s'\n''' % T("Search location in Geonames")
#'''S3.i18n.gis_search_no_internet="%s"''' % T("Geonames.org search requires Internet connectivity!")
else:
search = ""
# WMS Browser
if wms_browser:
wms_browser_name = '''S3.gis.wms_browser_name='%s'\n''' % wms_browser["name"]
# urlencode the URL
wms_browser_url = '''S3.gis.wms_browser_url='%s'\n''' % urllib.quote(wms_browser["url"])
else:
wms_browser_name = ""
wms_browser_url = ""
# Mouse Position
if not mouse_position:
mouse_position = ""
elif mouse_position == "mgrs":
mouse_position = '''S3.gis.mouse_position='mgrs'\n'''
else:
mouse_position = '''S3.gis.mouse_position=true\n'''
# OSM Authoring
if config.osm_oauth_consumer_key and \
config.osm_oauth_consumer_secret:
osm_auth = '''S3.gis.osm_oauth='%s'\n''' % T("Zoom in closer to Edit OpenStreetMap layer")
else:
osm_auth = ""
# Print
# NB This isn't too-flexible a method. We're now focussing on print.css
# If we do come back to it, then it should be moved to static
if print_tool:
url = print_tool["url"]
if "title" in print_tool:
mapTitle = unicode(print_tool["mapTitle"])
else:
mapTitle = unicode(T("Map from Sahana Eden"))
if "subtitle" in print_tool:
subTitle = unicode(print_tool["subTitle"])
else:
subTitle = unicode(T("Printed from Sahana Eden"))
if auth.is_logged_in():
creator = unicode(auth.user.email)
else:
creator = ""
script = u"".join(("""
if (typeof(printCapabilities) != 'undefined') {
// info.json from script headers OK
printProvider = new GeoExt.data.PrintProvider({
//method: 'POST',
//url: '""", url, """',
method: 'GET', // 'POST' recommended for production use
capabilities: printCapabilities, // from the info.json returned from the script headers
customParams: {
mapTitle: '""", mapTitle, """',
subTitle: '""", subTitle, """',
creator: '""", creator, """'
}
});
// Our print page. Stores scale, center and rotation and gives us a page
// extent feature that we can add to a layer.
printPage = new GeoExt.data.PrintPage({
printProvider: printProvider
});
//var printExtent = new GeoExt.plugins.PrintExtent({
// printProvider: printProvider
//});
// A layer to display the print page extent
//var pageLayer = new OpenLayers.Layer.Vector('""", unicode(T("Print Extent")), """');
//pageLayer.addFeatures(printPage.feature);
//pageLayer.setVisibility(false);
//map.addLayer(pageLayer);
//var pageControl = new OpenLayers.Control.TransformFeature();
//map.addControl(pageControl);
//map.setOptions({
// eventListeners: {
// recenter/resize page extent after pan/zoom
// 'moveend': function() {
// printPage.fit(mapPanel, true);
// }
// }
//});
// The form with fields controlling the print output
S3.gis.printFormPanel = new Ext.form.FormPanel({
title: '""", unicode(T("Print Map")), """',
rootVisible: false,
split: true,
autoScroll: true,
collapsible: true,
collapsed: true,
collapseMode: 'mini',
lines: false,
bodyStyle: 'padding:5px',
labelAlign: 'top',
defaults: {anchor: '100%%'},
listeners: {
'expand': function() {
//if (null == mapPanel.map.getLayersByName('""", unicode(T("Print Extent")), """')[0]) {
// mapPanel.map.addLayer(pageLayer);
//}
if (null == mapPanel.plugins[0]) {
//map.addLayer(pageLayer);
//pageControl.activate();
//mapPanel.plugins = [ new GeoExt.plugins.PrintExtent({
// printProvider: printProvider,
// map: map,
// layer: pageLayer,
// control: pageControl
//}) ];
//mapPanel.plugins[0].addPage();
}
},
'collapse': function() {
//mapPanel.map.removeLayer(pageLayer);
//if (null != mapPanel.plugins[0]) {
// map.removeLayer(pageLayer);
// mapPanel.plugins[0].removePage(mapPanel.plugins[0].pages[0]);
// mapPanel.plugins = [];
//}
}
},
items: [{
xtype: 'textarea',
name: 'comment',
value: '',
fieldLabel: '""", unicode(T("Comment")), """',
plugins: new GeoExt.plugins.PrintPageField({
printPage: printPage
})
}, {
xtype: 'combo',
store: printProvider.layouts,
displayField: 'name',
fieldLabel: '""", T("Layout").decode("utf-8"), """',
typeAhead: true,
mode: 'local',
triggerAction: 'all',
plugins: new GeoExt.plugins.PrintProviderField({
printProvider: printProvider
})
}, {
xtype: 'combo',
store: printProvider.dpis,
displayField: 'name',
fieldLabel: '""", unicode(T("Resolution")), """',
tpl: '<tpl for="."><div class="x-combo-list-item">{name} dpi</div></tpl>',
typeAhead: true,
mode: 'local',
triggerAction: 'all',
plugins: new GeoExt.plugins.PrintProviderField({
printProvider: printProvider
}),
// the plugin will work even if we modify a combo value
setValue: function(v) {
v = parseInt(v) + ' dpi';
Ext.form.ComboBox.prototype.setValue.apply(this, arguments);
}
//}, {
// xtype: 'combo',
// store: printProvider.scales,
// displayField: 'name',
// fieldLabel: '""", unicode(T("Scale")), """',
// typeAhead: true,
// mode: 'local',
// triggerAction: 'all',
// plugins: new GeoExt.plugins.PrintPageField({
// printPage: printPage
// })
//}, {
// xtype: 'textfield',
// name: 'rotation',
// fieldLabel: '""", unicode(T("Rotation")), """',
// plugins: new GeoExt.plugins.PrintPageField({
// printPage: printPage
// })
}],
buttons: [{
text: '""", unicode(T("Create PDF")), """',
handler: function() {
// the PrintExtent plugin is the mapPanel's 1st plugin
//mapPanel.plugins[0].print();
// convenient way to fit the print page to the visible map area
printPage.fit(mapPanel, true);
// print the page, including the legend, where available
if (null == legendPanel) {
printProvider.print(mapPanel, printPage);
} else {
printProvider.print(mapPanel, printPage, {legend: legendPanel});
}
}
}]
});
} else {
// Display error diagnostic
S3.gis.printFormPanel = new Ext.Panel ({
title: '""", unicode(T("Print Map")), """',
rootVisible: false,
split: true,
autoScroll: true,
collapsible: true,
collapsed: true,
collapseMode: 'mini',
lines: false,
bodyStyle: 'padding:5px',
labelAlign: 'top',
defaults: {anchor: '100%'},
html: '""", unicode(T("Printing disabled since server not accessible")), """: <BR />""", unicode(url), """'
});
}
"""))
ready = """%s
%s""" % (ready, script)
script = "%sinfo.json?var=printCapabilities" % url
scripts_append(script)
##########
# Settings
##########
# Layout
s3_gis_window = ""
s3_gis_windowHide = ""
if not closable:
s3_gis_windowNotClosable = '''S3.gis.windowNotClosable=true\n'''
else:
s3_gis_windowNotClosable = ""
if window:
s3_gis_window = '''S3.gis.window=true\n'''
if window_hide:
s3_gis_windowHide = '''S3.gis.windowHide=true\n'''
if maximizable:
maximizable = '''S3.gis.maximizable=true\n'''
else:
maximizable = '''S3.gis.maximizable=false\n'''
# Collapsed
if collapsed:
collapsed = '''S3.gis.west_collapsed=true\n'''
else:
collapsed = ""
# Bounding Box
if bbox:
# Calculate from Bounds
center = '''S3.gis.lat,S3.gis.lon
S3.gis.bottom_left=[%f,%f]
S3.gis.top_right=[%f,%f]
''' % (bbox["min_lon"], bbox["min_lat"], bbox["max_lon"], bbox["max_lat"])
else:
center = '''S3.gis.lat=%s
S3.gis.lon=%s
''' % (lat, lon)
########
# Layers
########
# =====================================================================
# Overlays
#
# Duplicate Features to go across the dateline?
# @ToDo: Action this again (e.g. for DRRPP)
if settings.get_gis_duplicate_features():
duplicate_features = '''S3.gis.duplicate_features=true'''
else:
duplicate_features = ""
# ---------------------------------------------------------------------
# Features
#
# Simple Features added to the Draft layer
# - used by the Location Selector
#
_features = ""
if features:
_features = '''S3.gis.features=new Array()\n'''
counter = -1
for feature in features:
counter = counter + 1
if feature["lat"] and feature["lon"]:
# Generate JS snippet to pass to static
_features += '''S3.gis.features[%i]={
lat:%f,
lon:%f
}\n''' % (counter,
feature["lat"],
feature["lon"])
# ---------------------------------------------------------------------
# Feature Queries
#
# These can be Rows or Storage()
# NB These considerations need to be taken care of before arriving here:
# Security of data
# Localisation of name/popup_label
#
if feature_queries:
layers_feature_queries = '''
S3.gis.layers_feature_queries=new Array()'''
counter = -1
mtable = s3db.gis_marker
else:
layers_feature_queries = ""
for layer in feature_queries:
counter = counter + 1
name = str(layer["name"])
name_safe = re.sub("\W", "_", name)
# Lat/Lon via Join or direct?
try:
layer["query"][0].gis_location.lat
join = True
except:
join = False
# Push the Features into a temporary table in order to have them accessible via GeoJSON
# @ToDo: Maintenance Script to clean out old entries (> 24 hours?)
fqtable = s3db.gis_feature_query
cname = "%s_%s_%s" % (name_safe,
request.controller,
request.function)
# Clear old records
query = (fqtable.name == cname)
if auth.user:
created_by = auth.user.id
else:
# Anonymous
# @ToDo: A deployment with many Anonymous Feature Queries being
# accessed will need to change this design - e.g. use session ID instead
created_by = None
query = query & (fqtable.created_by == created_by)
db(query).delete()
for row in layer["query"]:
rowdict = {"name" : cname}
if join:
rowdict["lat"] = row.gis_location.lat
rowdict["lon"] = row.gis_location.lon
else:
rowdict["lat"] = row["lat"]
rowdict["lon"] = row["lon"]
if "popup_url" in row:
rowdict["popup_url"] = row["popup_url"]
if "popup_label" in row:
rowdict["popup_label"] = row["popup_label"]
if "marker" in row:
rowdict["marker_url"] = URL(c="static", f="img",
args=["markers",
row["marker"].image])
rowdict["marker_height"] = row["marker"].height
rowdict["marker_width"] = row["marker"].width
else:
if "marker_url" in row:
rowdict["marker_url"] = row["marker_url"]
if "marker_height" in row:
rowdict["marker_height"] = row["marker_height"]
if "marker_width" in row:
rowdict["marker_width"] = row["marker_width"]
if "shape" in row:
rowdict["shape"] = row["shape"]
if "size" in row:
rowdict["size"] = row["size"]
if "colour" in row:
rowdict["colour"] = row["colour"]
if "opacity" in row:
rowdict["opacity"] = row["opacity"]
record_id = fqtable.insert(**rowdict)
if not created_by:
auth.s3_make_session_owner(fqtable, record_id)
# URL to retrieve the data
url = "%s.geojson?feature_query.name=%s&feature_query.created_by=%s" % \
(URL(c="gis", f="feature_query"),
cname,
created_by)
if "active" in layer and not layer["active"]:
visibility = ''',
"visibility":false'''
else:
visibility = ""
markerLayer = ""
if "marker" in layer:
# per-Layer Marker
marker = layer["marker"]
if isinstance(marker, int):
# integer (marker_id) not row
query = (mtable.id == marker)
marker = db(query).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1),
cache=cache).first()
if marker:
markerLayer = ''',
"marker_url":"%s",
"marker_height":%i,
"marker_width":%i''' % (marker["image"], marker["height"], marker["width"])
else:
markerLayer = ""
if "opacity" in layer and layer["opacity"] != 1:
opacity = ''',
"opacity":%.1f''' % layer["opacity"]
else:
opacity = ""
if "cluster_distance" in layer and layer["cluster_distance"] != self.cluster_distance:
cluster_distance = ''',
"cluster_distance":%i''' % layer["cluster_distance"]
else:
cluster_distance = ""
if "cluster_threshold" in layer and layer["cluster_threshold"] != self.cluster_threshold:
cluster_threshold = ''',
"cluster_threshold":%i''' % layer["cluster_threshold"]
else:
cluster_threshold = ""
# Generate JS snippet to pass to static
layers_feature_queries += '''
S3.gis.layers_feature_queries[%i]={
"name":"%s",
"url":"%s"%s%s%s%s%s
}
''' % (counter,
name,
url,
visibility,
markerLayer,
opacity,
cluster_distance,
cluster_threshold)
# ---------------------------------------------------------------------
# Feature Resources
#
# REST URLs to back-end resources
#
if feature_resources:
layers_feature_resources = '''
S3.gis.layers_feature_resources=new Array()'''
counter = -1
else:
layers_feature_resources = ""
for layer in feature_resources:
counter = counter + 1
name = str(layer["name"])
id = str(layer["id"])
id = re.sub("\W", "_", id)
# URL to retrieve the data
url = layer["url"]
# Optimise the query & & tell back-end not to add the type to the tooltips
options = "components=None&maxdepth=0&references=location_id&fields=name&label_off=1"
if "?" in url:
url = "%s&%s" % (url, options)
else:
url = "%s?%s" % (url, options)
if "active" in layer and not layer["active"]:
visibility = ''',
"visibility":false'''
else:
visibility = ""
if "opacity" in layer and layer["opacity"] != 1:
opacity = ''',
"opacity":%.1f''' % layer["opacity"]
else:
opacity = ""
if "cluster_distance" in layer and layer["cluster_distance"] != self.cluster_distance:
cluster_distance = ''',
"cluster_distance":%i''' % layer["cluster_distance"]
else:
cluster_distance = ""
if "cluster_threshold" in layer and layer["cluster_threshold"] != self.cluster_threshold:
cluster_threshold = ''',
"cluster_threshold":%i''' % layer["cluster_threshold"]
else:
cluster_threshold = ""
if "marker" in layer:
marker = layer["marker"]
markerLayer = ''',
"marker_image":"%s",
"marker_height":%i,
"marker_width":%i''' % (marker["image"], marker["height"], marker["width"])
else:
markerLayer = ""
# Generate JS snippet to pass to static
layers_feature_resources += '''
S3.gis.layers_feature_resources[%i]={
"name":"%s",
"id":"%s",
"url":"%s"%s%s%s%s%s
}
''' % (counter,
name,
id,
url,
visibility,
markerLayer,
opacity,
cluster_distance,
cluster_threshold)
if catalogue_layers:
# Add all Layers from the Catalogue
layer_types = [
ArcRESTLayer,
BingLayer,
EmptyLayer,
GoogleLayer,
OSMLayer,
TMSLayer,
WMSLayer,
XYZLayer,
JSLayer,
ThemeLayer,
GeoJSONLayer,
GPXLayer,
CoordinateLayer,
GeoRSSLayer,
KMLLayer,
OpenWeatherMapLayer,
WFSLayer,
FeatureLayer,
]
else:
# Add just the default Base Layer
s3.gis.base = True
layer_types = []
ltable = s3db.gis_layer_config
etable = s3db.gis_layer_entity
query = (etable.id == ltable.layer_id) & \
(ltable.config_id == config["id"]) & \
(ltable.base == True) & \
(ltable.enabled == True)
layer = db(query).select(etable.instance_type,
limitby=(0, 1)).first()
if layer:
layer_type = layer.instance_type
if layer_type == "gis_layer_openstreetmap":
layer_types = [OSMLayer]
elif layer_type == "gis_layer_google":
# NB v3 doesn't work when initially hidden
layer_types = [GoogleLayer]
elif layer_type == "gis_layer_arcrest":
layer_types = [ArcRESTLayer]
elif layer_type == "gis_layer_bing":
layer_types = [BingLayer]
elif layer_type == "gis_layer_tms":
layer_types = [TMSLayer]
elif layer_type == "gis_layer_wms":
layer_types = [WMSLayer]
elif layer_type == "gis_layer_xyz":
layer_types = [XYZLayer]
elif layer_type == "gis_layer_empty":
layer_types = [EmptyLayer]
if not layer_types:
layer_types = [EmptyLayer]
layers_config = ""
for LayerType in layer_types:
try:
# Instantiate the Class
layer = LayerType()
layer_type_js = layer.as_javascript()
if layer_type_js:
# Add to the output JS
layers_config = "".join((layers_config,
layer_type_js))
for script in layer.scripts:
if "google.com" in script:
# Uses document.write, so can't load async
script = SCRIPT(_type="text/javascript",
_src=script)
html_append(script)
else:
add_javascript(script, ready=ready)
except Exception, exception:
error = "%s not shown: %s" % (LayerType.__name__, exception)
if debug:
raise HTTP(500, error)
else:
response.warning += error
# WMS getFeatureInfo
# (loads conditionally based on whether queryable WMS Layers have been added)
if s3.gis.get_feature_info:
getfeatureinfo = '''S3.i18n.gis_get_feature_info="%s"
S3.i18n.gis_feature_info="%s"
''' % (T("Get Feature Info"),
T("Feature Info"))
else:
getfeatureinfo = ""
#############
# Main script
#############
# Configure settings to pass through to Static script
# @ToDo: Consider passing this as JSON Objects to allow it to be done dynamically
config_script = "".join((
authenticated,
'''S3.public_url='%s'\n''' % public_url, # Needed just for GoogleEarthPanel
config_id,
s3_gis_window,
s3_gis_windowHide,
s3_gis_windowNotClosable,
maximizable,
collapsed,
toolbar,
loc_select,
'''S3.gis.map_height=%i\n''' % map_height,
'''S3.gis.map_width=%i\n''' % map_width,
'''S3.gis.zoom=%i\n''' % (zoom or 1),
center,
'''S3.gis.projection='%i'\n''' % projection,
'''S3.gis.units='%s'\n''' % units,
'''S3.gis.maxResolution=%f\n'''% maxResolution,
'''S3.gis.maxExtent=[%s]\n''' % maxExtent,
'''S3.gis.numZoomLevels=%i\n''' % numZoomLevels,
'''S3.gis.max_w=%i\n''' % settings.get_gis_marker_max_width(),
'''S3.gis.max_h=%i\n''' % settings.get_gis_marker_max_height(),
mouse_position,
duplicate_features,
wms_browser_name,
wms_browser_url,
mgrs_name,
mgrs_url,
draw_feature,
draw_polygon,
'''S3.gis.marker_default='%s'\n''' % marker_default.image,
'''S3.gis.marker_default_height=%i\n''' % marker_default.height,
'''S3.gis.marker_default_width=%i\n''' % marker_default.width,
osm_auth,
layers_feature_queries,
layers_feature_resources,
_features,
layers_config,
# i18n Labels
legend, # Presence of label turns feature on
search, # Presence of label turns feature on
getfeatureinfo, # Presence of labels turns feature on
upload_layer, # Presence of label turns feature on
layer_properties, # Presence of label turns feature on
'''S3.i18n.gis_requires_login='%s'\n''' % T("Requires Login"),
'''S3.i18n.gis_base_layers='%s'\n''' % T("Base Layers"),
'''S3.i18n.gis_overlays='%s'\n''' % T("Overlays"),
'''S3.i18n.gis_layers='%s'\n''' % T("Layers"),
'''S3.i18n.gis_draft_layer='%s'\n''' % T("Draft Features"),
'''S3.i18n.gis_cluster_multiple='%s'\n''' % T("There are multiple records at this location"),
'''S3.i18n.gis_loading='%s'\n''' % T("Loading"),
'''S3.i18n.gis_length_message='%s'\n''' % T("The length is"),
'''S3.i18n.gis_area_message='%s'\n''' % T("The area is"),
'''S3.i18n.gis_length_tooltip='%s'\n''' % T("Measure Length: Click the points along the path & end with a double-click"),
'''S3.i18n.gis_area_tooltip='%s'\n''' % T("Measure Area: Click the points around the polygon & end with a double-click"),
'''S3.i18n.gis_zoomfull='%s'\n''' % T("Zoom to maximum map extent"),
'''S3.i18n.gis_zoomout='%s'\n''' % T("Zoom Out: click in the map or use the left mouse button and drag to create a rectangle"),
'''S3.i18n.gis_zoomin='%s'\n''' % T("Zoom In: click in the map or use the left mouse button and drag to create a rectangle"),
'''S3.i18n.gis_pan='%s'\n''' % T("Pan Map: keep the left mouse button pressed and drag the map"),
'''S3.i18n.gis_navPrevious='%s'\n''' % T("Previous View"),
'''S3.i18n.gis_navNext='%s'\n''' % T("Next View"),
'''S3.i18n.gis_geoLocate='%s'\n''' % T("Zoom to Current Location"),
'''S3.i18n.gis_draw_feature='%s'\n''' % T("Add Point"),
'''S3.i18n.gis_draw_polygon='%s'\n''' % T("Add Polygon"),
'''S3.i18n.gis_save='%s'\n''' % T("Save: Default Lat, Lon & Zoom for the Viewport"),
'''S3.i18n.gis_potlatch='%s'\n''' % T("Edit the OpenStreetMap data for this area"),
# For S3LocationSelectorWidget
'''S3.i18n.gis_current_location='%s'\n''' % T("Current Location"),
))
html_append(SCRIPT(config_script))
# Static Script
if debug:
add_javascript("scripts/S3/s3.gis.layers.js")
add_javascript("scripts/S3/s3.gis.controls.js")
add_javascript("scripts/S3/s3.gis.js")
else:
add_javascript("scripts/S3/s3.gis.min.js")
# Set up map plugins
# This, and any code it generates is done last
# However, map plugin should not assume this.
if plugins is not None:
for plugin in plugins:
plugin.extend_gis_map(
add_javascript,
html_append # for adding in dynamic configuration, etc.
)
script = "','".join(scripts)
if ready:
ready = '''%s
S3.gis.show_map()''' % ready
else:
ready = "S3.gis.show_map();"
# Tell YepNope to load all our scripts asynchronously & then run the callback
script = '''yepnope({
load:['%s'],
complete:function(){
%s
}
})''' % (script, ready)
html_append(SCRIPT(script))
return html
# =============================================================================
class Marker(object):
"""
Represents a Map Marker
"""
def __init__(self, id=None, layer_id=None):
s3db = current.s3db
mtable = s3db.gis_marker
marker = None
config = None
if id:
# Lookup the Marker details from it's ID
query = (mtable.id == id)
marker = current.db(query).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1),
cache=s3db.cache).first()
elif layer_id:
# Check if we have a Marker for this Layer
config = current.gis.get_config()
ltable = s3db.gis_layer_symbology
query = (ltable.layer_id == layer_id) & \
(ltable.symbology_id == config.symbology_id) & \
(ltable.marker_id == mtable.id)
marker = current.db(query).select(mtable.image,
mtable.height,
mtable.width,
limitby=(0, 1)).first()
if not marker:
# Default Marker
if not config:
config = current.gis.get_config()
self.image = config.marker_image
self.height = config.marker_height
self.width = config.marker_width
else:
self.image = marker.image
self.height = marker.height
self.width = marker.width
# Always lookup URL client-side
#self.url = URL(c="static", f="img",
# args=["markers", marker.image])
def add_attributes_to_output(self, output):
"""
Called by Layer.as_dict()
"""
output["marker_image"] = self.image
output["marker_height"] = self.height
output["marker_width"] = self.width
def as_dict(self):
"""
Called by gis.get_marker()
"""
output = Storage(
image = self.image,
height = self.height,
width = self.width,
)
return output
# =============================================================================
class Projection(object):
"""
Represents a Map Projection
"""
def __init__(self, id=None):
if id:
s3db = current.s3db
table = s3db.gis_projection
query = (table.id == id)
projection = current.db(query).select(table.epsg,
limitby=(0, 1),
cache=s3db.cache).first()
else:
# Default projection
config = current.gis.get_config()
projection = Storage(epsg = config.epsg)
self.epsg = projection.epsg
# =============================================================================
class Layer(object):
"""
Abstract base class for Layers from Catalogue
"""
def __init__(self):
sublayers = []
append = sublayers.append
self.scripts = []
gis = current.response.s3.gis
s3db = current.s3db
s3_has_role = current.auth.s3_has_role
# Read the Layers enabled in the Active Configs
tablename = self.tablename
table = s3db[tablename]
ctable = s3db.gis_config
ltable = s3db.gis_layer_config
fields = table.fields
metafields = s3_all_meta_field_names()
fields = [table[f] for f in fields if f not in metafields]
fappend = fields.append
fappend(ltable.enabled)
fappend(ltable.visible)
fappend(ltable.base)
fappend(ltable.style)
fappend(ctable.pe_type)
query = (table.layer_id == ltable.layer_id) & \
(ltable.config_id == ctable.id) & \
(ltable.config_id.belongs(gis.config.ids))
if gis.base == True:
# Only show the default base layer
if self.tablename == "gis_layer_empty":
# Show even if disabled (as fallback)
query = (table.id > 0)
else:
query = query & (ltable.base == True)
rows = current.db(query).select(orderby=ctable.pe_type,
*fields)
layer_ids = []
lappend = layer_ids.append
SubLayer = self.SubLayer
# Flag to show whether we've set the default baselayer
# (otherwise a config higher in the hierarchy can overrule one lower down)
base = True
for _record in rows:
record = _record[tablename]
# Check if we've already seen this layer
layer_id = record.layer_id
if layer_id in layer_ids:
continue
# Add layer to list of checked
lappend(layer_id)
# Check if layer is enabled
_config = _record["gis_layer_config"]
if not _config.enabled:
continue
# Check user is allowed to access the layer
role_required = record.role_required
if role_required and not s3_has_role(role_required):
continue
# All OK - add SubLayer
record["visible"] = _config.visible
if base and _config.base:
# name can't conflict with OSM/WMS/ArcREST layers
record["_base"] = True
base = False
else:
record["_base"] = False
record["style"] = _config.style
if tablename in ["gis_layer_bing", "gis_layer_google"]:
# SubLayers handled differently
append(record)
else:
append(SubLayer(record))
# Alphasort layers
# - client will only sort within their type: s3.gis.layers.js
self.sublayers = sorted(sublayers, key=lambda row: row.name)
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layers as Javascript
- suitable for inclusion in the HTML page
"""
sublayer_dicts = []
append = sublayer_dicts.append
sublayers = self.sublayers
for sublayer in sublayers:
# Read the output dict for this sublayer
sublayer_dict = sublayer.as_dict()
if sublayer_dict:
# Add this layer to the list of layers for this layer type
append(sublayer_dict)
if sublayer_dicts:
# Output the Layer Type as JSON
layer_type_json = json.dumps(sublayer_dicts,
sort_keys=True,
indent=4)
return '''%s=%s\n''' % (self.js_array, layer_type_json)
else:
return None
# -------------------------------------------------------------------------
def as_json(self):
"""
Output the Layers as JSON
@ToDo: Support layers with SubLayer.as_dict() to pass config
dynamically between server & client
"""
if self.record:
return json.dumps(self.as_dict(), indent=4, sort_keys=True)
else:
return
# -------------------------------------------------------------------------
class SubLayer(object):
def __init__(self, record):
# Ensure all attributes available (even if Null)
self.__dict__.update(record)
del record
self.safe_name = re.sub('[\\"]', "", self.name)
self.marker = Marker(layer_id=self.layer_id)
if hasattr(self, "projection_id"):
self.projection = Projection(self.projection_id)
def setup_clustering(self, output):
gis = current.gis
cluster_distance = gis.cluster_distance
cluster_threshold = gis.cluster_threshold
if self.cluster_distance != cluster_distance:
output["cluster_distance"] = self.cluster_distance
if self.cluster_threshold != cluster_threshold:
output["cluster_threshold"] = self.cluster_threshold
def setup_folder(self, output):
if self.dir:
output["dir"] = self.dir
def setup_folder_and_visibility(self, output):
if not self.visible:
output["visibility"] = False
if self.dir:
output["dir"] = self.dir
def setup_folder_visibility_and_opacity(self, output):
if not self.visible:
output["visibility"] = False
if self.opacity != 1:
output["opacity"] = "%.1f" % self.opacity
if self.dir:
output["dir"] = self.dir
@staticmethod
def add_attributes_if_not_default(output, **values_and_defaults):
# could also write values in debug mode, to check if defaults ignored.
# could also check values are not being overwritten.
for key, (value, defaults) in values_and_defaults.iteritems():
if value not in defaults:
output[key] = value
# -----------------------------------------------------------------------------
class ArcRESTLayer(Layer):
"""
ArcGIS REST Layers from Catalogue
"""
tablename = "gis_layer_arcrest"
js_array = "S3.gis.layers_arcrest"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {
"id": self.layer_id,
"type": "arcrest",
"name": self.safe_name,
"url": self.url,
}
# Attributes which are defaulted client-side if not set
self.setup_folder_and_visibility(output)
self.add_attributes_if_not_default(
output,
layers = (self.layers, (0,)),
transparent = (self.transparent, (True,)),
base = (self.base, (False,)),
_base = (self._base, (False,)),
)
return output
# -----------------------------------------------------------------------------
class BingLayer(Layer):
"""
Bing Layers from Catalogue
"""
tablename = "gis_layer_bing"
js_array = "S3.gis.Bing"
# -------------------------------------------------------------------------
def as_dict(self):
sublayers = self.sublayers
if sublayers:
if Projection().epsg != 900913:
raise Exception("Cannot display Bing layers unless we're using the Spherical Mercator Projection\n")
apikey = current.deployment_settings.get_gis_api_bing()
if not apikey:
raise Exception("Cannot display Bing layers unless we have an API key\n")
# Mandatory attributes
output = {
"ApiKey": apikey
}
for sublayer in sublayers:
# Attributes which are defaulted client-side if not set
if sublayer._base:
# Set default Base layer
output["Base"] = sublayer.type
if sublayer.type == "aerial":
output["Aerial"] = {"name": sublayer.name or "Bing Satellite",
"id": sublayer.layer_id}
elif sublayer.type == "road":
output["Road"] = {"name": sublayer.name or "Bing Roads",
"id": sublayer.layer_id}
elif sublayer.type == "hybrid":
output["Hybrid"] = {"name": sublayer.name or "Bing Hybrid",
"id": sublayer.layer_id}
return output
else:
return None
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layer as Javascript
- suitable for inclusion in the HTML page
"""
output = self.as_dict()
if output:
result = json.dumps(output, indent=4, sort_keys=True)
if result:
return '''%s=%s\n''' % (self.js_array, result)
return None
# -----------------------------------------------------------------------------
class CoordinateLayer(Layer):
"""
Coordinate Layer from Catalogue
- there should only be one of these
"""
tablename = "gis_layer_coordinate"
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layer as Javascript
- suitable for inclusion in the HTML page
"""
sublayers = self.sublayers
if sublayers:
sublayer = sublayers[0]
name_safe = re.sub("'", "", sublayer.name)
if sublayer.visible:
visibility = "true"
else:
visibility = "false"
output = '''S3.gis.CoordinateGrid={name:'%s',visibility:%s,id:%s}\n''' % \
(name_safe, visibility, sublayer.layer_id)
return output
else:
return None
# -----------------------------------------------------------------------------
class EmptyLayer(Layer):
"""
Empty Layer from Catalogue
- there should only be one of these
"""
tablename = "gis_layer_empty"
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layer as Javascript
- suitable for inclusion in the HTML page
"""
sublayers = self.sublayers
if sublayers:
sublayer = sublayers[0]
name = str(current.T(sublayer.name))
name_safe = re.sub("'", "", name)
if sublayer._base:
base = ",base:true"
else:
base = ""
output = '''S3.gis.EmptyLayer={name:'%s',id:%s%s}\n''' % \
(name_safe, sublayer.layer_id, base)
return output
else:
return None
# -----------------------------------------------------------------------------
class FeatureLayer(Layer):
"""
Feature Layers from Catalogue
"""
tablename = "gis_layer_feature"
js_array = "S3.gis.layers_features"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def __init__(self, record):
record_module = record.controller or record.module # Backwards-compatibility
self.skip = False
if record_module is not None:
if record_module not in current.deployment_settings.modules:
# Module is disabled
self.skip = True
if not current.auth.permission.has_permission("read",
c=record_module,
f=record.function or record.resource):
# User has no permission to this resource (in ACL)
self.skip = True
else:
raise Exception("FeatureLayer Record '%s' has no controller" % record.name)
super(FeatureLayer.SubLayer, self).__init__(record)
def as_dict(self):
if self.skip:
# Skip layer
return
controller = self.controller or self.module # Backwards-compatibility
function = self.function or self.resource # Backwards-compatibility
url = "%s.geojson?layer=%i&components=None&maxdepth=0&references=location_id&fields=name" % \
(URL(controller, function), self.id)
if self.filter:
url = "%s&%s" % (url, self.filter)
if self.trackable:
url = "%s&track=1" % url
# Mandatory attributes
output = {
"id": self.layer_id,
# Defaults client-side if not-provided
#"type": "feature",
"name": self.safe_name,
"url": url,
}
#
self.marker.add_attributes_to_output(output)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class GeoJSONLayer(Layer):
"""
GeoJSON Layers from Catalogue
"""
tablename = "gis_layer_geojson"
js_array = "S3.gis.layers_geojson"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
# Mandatory attributes
output = {
"id": self.layer_id,
"type": "geojson",
"name": self.safe_name,
"url": self.url,
}
self.marker.add_attributes_to_output(output)
# Attributes which are defaulted client-side if not set
projection = self.projection
if projection.epsg != 4326:
output["projection"] = projection.epsg
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class GeoRSSLayer(Layer):
"""
GeoRSS Layers from Catalogue
"""
tablename = "gis_layer_georss"
js_array = "S3.gis.layers_georss"
def __init__(self):
super(GeoRSSLayer, self).__init__()
GeoRSSLayer.SubLayer.cachetable = current.s3db.gis_cache
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
db = current.db
request = current.request
response = current.response
cachetable = self.cachetable
url = self.url
# Check to see if we should Download layer to the cache
download = True
query = (cachetable.source == url)
existing_cached_copy = db(query).select(cachetable.modified_on,
limitby=(0, 1)).first()
refresh = self.refresh or 900 # 15 minutes set if we have no data (legacy DB)
if existing_cached_copy:
modified_on = existing_cached_copy.modified_on
cutoff = modified_on + timedelta(seconds=refresh)
if request.utcnow < cutoff:
download = False
if download:
# Download layer to the Cache
from gluon.tools import fetch
# @ToDo: Call directly without going via HTTP
# @ToDo: Make this async by using S3Task (also use this for the refresh time)
fields = ""
if self.data:
fields = "&data_field=%s" % self.data
if self.image:
fields = "%s&image_field=%s" % (fields, self.image)
_url = "%s%s/update.georss?fetchurl=%s%s" % (current.deployment_settings.get_base_public_url(),
URL(c="gis", f="cache_feed"),
url,
fields)
# Keep Session for local URLs
import Cookie
cookie = Cookie.SimpleCookie()
cookie[response.session_id_name] = response.session_id
current.session._unlock(response)
try:
# @ToDo: Need to commit to not have DB locked with SQLite?
fetch(_url, cookie=cookie)
if existing_cached_copy:
# Clear old selfs which are no longer active
query = (cachetable.source == url) & \
(cachetable.modified_on < cutoff)
db(query).delete()
except Exception, exception:
s3_debug("GeoRSS %s download error" % url, exception)
# Feed down
if existing_cached_copy:
# Use cached copy
# Should we Update timestamp to prevent every
# subsequent request attempting the download?
#query = (cachetable.source == url)
#db(query).update(modified_on=request.utcnow)
pass
else:
response.warning += "%s down & no cached copy available" % url
name_safe = self.safe_name
# Pass the GeoJSON URL to the client
# Filter to the source of this feed
url = "%s.geojson?cache.source=%s" % (URL(c="gis", f="cache_feed"),
url)
# Mandatory attributes
output = {
"id": self.layer_id,
"type": "georss",
"name": name_safe,
"url": url,
}
self.marker.add_attributes_to_output(output)
# Attributes which are defaulted client-side if not set
if self.refresh != 900:
output["refresh"] = self.refresh
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class GoogleLayer(Layer):
"""
Google Layers/Tools from Catalogue
"""
tablename = "gis_layer_google"
js_array = "S3.gis.Google"
# -------------------------------------------------------------------------
def as_dict(self):
sublayers = self.sublayers
if sublayers:
T = current.T
epsg = (Projection().epsg == 900913)
apikey = current.deployment_settings.get_gis_api_google()
debug = current.response.s3.debug
add_script = self.scripts.append
output = {}
for sublayer in sublayers:
# Attributes which are defaulted client-side if not set
if sublayer.type == "earth":
output["Earth"] = str(T("Switch to 3D"))
add_script("http://www.google.com/jsapi?key=%s" % apikey)
add_script(SCRIPT('''try{google && google.load('earth','1')}catch(e){}''', _type="text/javascript"))
if debug:
# Non-debug has this included within GeoExt.js
add_script("scripts/gis/gxp/widgets/GoogleEarthPanel.js")
elif epsg:
# Earth is the only layer which can run in non-Spherical Mercator
# @ToDo: Warning?
if sublayer._base:
# Set default Base layer
output["Base"] = sublayer.type
if sublayer.type == "satellite":
output["Satellite"] = {"name": sublayer.name or "Google Satellite",
"id": sublayer.layer_id}
elif sublayer.type == "maps":
output["Maps"] = {"name": sublayer.name or "Google Maps",
"id": sublayer.layer_id}
elif sublayer.type == "hybrid":
output["Hybrid"] = {"name": sublayer.name or "Google Hybrid",
"id": sublayer.layer_id}
elif sublayer.type == "streetview":
output["StreetviewButton"] = "Click where you want to open Streetview"
elif sublayer.type == "terrain":
output["Terrain"] = {"name": sublayer.name or "Google Terrain",
"id": sublayer.layer_id}
elif sublayer.type == "mapmaker":
output["MapMaker"] = {"name": sublayer.name or "Google MapMaker",
"id": sublayer.layer_id}
elif sublayer.type == "mapmakerhybrid":
output["MapMakerHybrid"] = {"name": sublayer.name or "Google MapMaker Hybrid",
"id": sublayer.layer_id}
if "MapMaker" in output or "MapMakerHybrid" in output:
# Need to use v2 API
# This should be able to be fixed in OpenLayers now since Google have fixed in v3 API:
# http://code.google.com/p/gmaps-api-issues/issues/detail?id=2349#c47
add_script("http://maps.google.com/maps?file=api&v=2&key=%s" % apikey)
else:
# v3 API (3.7 is frozen, 3.8 release & 3.9 is nightly)
add_script("http://maps.google.com/maps/api/js?v=3.7&sensor=false")
if "StreetviewButton" in output:
# Streetview doesn't work with v2 API
output["StreetviewButton"] = str(T("Click where you want to open Streetview"))
output["StreetviewTitle"] = str(T("Street View"))
if debug:
# Non-debug has this included within GeoExt.js
add_script("scripts/gis/gxp/widgets/GoogleStreetViewPanel.js")
return output
else:
return None
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layer as Javascript
- suitable for inclusion in the HTML page
"""
output = self.as_dict()
if output:
result = json.dumps(output, indent=4, sort_keys=True)
if result:
return '''%s=%s\n''' % (self.js_array, result)
return None
# -----------------------------------------------------------------------------
class GPXLayer(Layer):
"""
GPX Layers from Catalogue
"""
tablename = "gis_layer_gpx"
js_array = "S3.gis.layers_gpx"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = URL(c="default", f="download",
args=self.track)
# Mandatory attributes
output = {
"id": self.layer_id,
"name": self.safe_name,
"url": url,
}
self.marker.add_attributes_to_output(output)
self.add_attributes_if_not_default(
output,
waypoints = (self.waypoints, (True,)),
tracks = (self.tracks, (True,)),
routes = (self.routes, (True,)),
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class JSLayer(Layer):
"""
JS Layers from Catalogue
- these are raw Javascript layers for use by expert OpenLayers people
to quickly add/configure new data sources without needing support
from back-end Sahana programmers
"""
tablename = "gis_layer_js"
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layer as Javascript
- suitable for inclusion in the HTML page
"""
sublayers = self.sublayers
if sublayers:
output = "function addJSLayers() {"
for sublayer in sublayers:
output = '''%s\n%s''' % (output,
sublayer.code)
output = '''%s\n}''' % output
return output
else:
return None
# -----------------------------------------------------------------------------
class KMLLayer(Layer):
"""
KML Layers from Catalogue
"""
tablename = "gis_layer_kml"
js_array = "S3.gis.layers_kml"
# -------------------------------------------------------------------------
def __init__(self):
"Set up the KML cache, should be done once per request"
super(KMLLayer, self).__init__()
# Needed for gis.download_kml()
self.table = current.s3db[self.tablename]
# Can we cache downloaded KML feeds?
# Needed for unzipping & filtering as well
# @ToDo: Should we move this folder to static to speed up access to cached content?
# Do we need to secure it?
cachepath = os.path.join(current.request.folder,
"uploads",
"gis_cache")
if os.path.exists(cachepath):
cacheable = os.access(cachepath, os.W_OK)
else:
try:
os.mkdir(cachepath)
except OSError, os_error:
s3_debug(
"GIS: KML layers cannot be cached: %s %s" % (
cachepath,
os_error
)
)
cacheable = False
else:
cacheable = True
# @ToDo: Migrate to gis_cache
KMLLayer.cachetable = current.s3db.gis_cache2
KMLLayer.cacheable = cacheable
KMLLayer.cachepath = cachepath
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
db = current.db
request = current.request
cachetable = KMLLayer.cachetable
cacheable = KMLLayer.cacheable
cachepath = KMLLayer.cachepath
name = self.name
if cacheable:
_name = urllib2.quote(name)
_name = _name.replace("%", "_")
filename = "%s.file.%s.kml" % (cachetable._tablename,
_name)
# Should we download a fresh copy of the source file?
download = True
query = (cachetable.name == name)
cached = db(query).select(cachetable.modified_on,
limitby=(0, 1)).first()
refresh = self.refresh or 900 # 15 minutes set if we have no data (legacy DB)
if cached:
modified_on = cached.modified_on
cutoff = modified_on + timedelta(seconds=refresh)
if request.utcnow < cutoff:
download = False
if download:
# Download file (async, if workers alive)
current.s3task.async("gis_download_kml",
args=[self.id, filename])
if cached:
db(query).update(modified_on=request.utcnow)
else:
cachetable.insert(name=name, file=filename)
url = URL(c="default", f="download",
args=[filename])
else:
# No caching possible (e.g. GAE), display file direct from remote (using Proxy)
# (Requires OpenLayers.Layer.KML to be available)
url = self.url
output = dict(
id = self.layer_id,
name = self.safe_name,
url = url,
)
self.add_attributes_if_not_default(
output,
title = (self.title, ("name", None, "")),
body = (self.body, ("description", None)),
refresh = (self.refresh, (900,)),
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
self.marker.add_attributes_to_output(output)
return output
# -----------------------------------------------------------------------------
class OSMLayer(Layer):
"""
OpenStreetMap Layers from Catalogue
@ToDo: Provide a catalogue of standard layers which are fully-defined
in static & can just have name over-ridden, as well as
fully-custom layers.
"""
tablename = "gis_layer_openstreetmap"
js_array = "S3.gis.layers_osm"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
if Projection().epsg != 900913:
# Cannot display OpenStreetMap layers unless we're using the Spherical Mercator Projection
return {}
output = {
"id": self.layer_id,
"name": self.safe_name,
"url1": self.url1,
}
self.add_attributes_if_not_default(
output,
base = (self.base, (True,)),
_base = (self._base, (False,)),
url2 = (self.url2, ("",)),
url3 = (self.url3, ("",)),
zoomLevels = (self.zoom_levels, (9,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder_and_visibility(output)
return output
# -----------------------------------------------------------------------------
class OpenWeatherMapLayer(Layer):
"""
OpenWeatherMap Layers from Catalogue
"""
tablename = "gis_layer_openweathermap"
js_array = "S3.gis.OWM"
# -------------------------------------------------------------------------
def as_dict(self):
sublayers = self.sublayers
if sublayers:
if current.response.s3.debug:
# Non-debug has this included within OpenLayers.js
self.scripts.append("scripts/gis/OWM.OpenLayers.1.3.0.2.js")
output = {}
for sublayer in sublayers:
if sublayer.type == "station":
output["station"] = {"name": sublayer.name or "Weather Stations",
"id": sublayer.layer_id,
"dir": sublayer.dir,
"visibility": sublayer.visible
}
elif sublayer.type == "city":
output["city"] = {"name": sublayer.name or "Current Weather",
"id": sublayer.layer_id,
"dir": sublayer.dir,
"visibility": sublayer.visible
}
return output
else:
return None
# -------------------------------------------------------------------------
def as_javascript(self):
"""
Output the Layer as Javascript
- suitable for inclusion in the HTML page
"""
output = self.as_dict()
if output:
result = json.dumps(output, indent=4, sort_keys=True)
if result:
return '''%s=%s\n''' % (self.js_array, result)
return None
# -----------------------------------------------------------------------------
class ThemeLayer(Layer):
"""
Theme Layers from Catalogue
"""
tablename = "gis_layer_theme"
js_array = "S3.gis.layers_theme"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
url = "%s.geojson?theme_data.layer_theme_id=%i&polygons=1&maxdepth=0&references=location_id&fields=value" % \
(URL(c="gis", f="theme_data"),
self.id)
# Mandatory attributes
output = {
"id": self.layer_id,
"type": "theme",
"name": self.safe_name,
"url": url,
}
self.setup_folder_and_visibility(output)
self.setup_clustering(output)
style = json.loads(self.style)
self.add_attributes_if_not_default(
output,
style = (style, (None,)),
)
return output
# -----------------------------------------------------------------------------
class TMSLayer(Layer):
"""
TMS Layers from Catalogue
"""
tablename = "gis_layer_tms"
js_array = "S3.gis.layers_tms"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
output = {
"id": self.layer_id,
"type": "tms",
"name": self.safe_name,
"url": self.url,
"layername": self.layername
}
self.add_attributes_if_not_default(
output,
_base = (self._base, (False,)),
url2 = (self.url2, (None,)),
url3 = (self.url3, (None,)),
format = (self.img_format, ("png", None)),
zoomLevels = (self.zoom_levels, (19,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder(output)
return output
# -----------------------------------------------------------------------------
class WFSLayer(Layer):
"""
WFS Layers from Catalogue
"""
tablename = "gis_layer_wfs"
js_array = "S3.gis.layers_wfs"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
output = dict(
id = self.layer_id,
name = self.safe_name,
url = self.url,
title = self.title,
featureType = self.featureType,
featureNS = self.featureNS,
schema = self.wfs_schema,
)
self.add_attributes_if_not_default(
output,
version = (self.version, ("1.1.0",)),
geometryName = (self.geometryName, ("the_geom",)),
username = (self.username, (None,)),
password = (self.password, (None,)),
styleField = (self.style_field, (None,)),
styleValues = (self.style_values, ("{}", None)),
projection = (self.projection.epsg, (4326,)),
#editable
)
self.setup_folder_visibility_and_opacity(output)
self.setup_clustering(output)
return output
# -----------------------------------------------------------------------------
class WMSLayer(Layer):
"""
WMS Layers from Catalogue
"""
js_array = "S3.gis.layers_wms"
tablename = "gis_layer_wms"
# -------------------------------------------------------------------------
def __init__(self):
super(WMSLayer, self).__init__()
if self.sublayers:
if current.response.s3.debug:
# Non-debug has this included within GeoExt.js
self.scripts.append("scripts/gis/gxp/plugins/WMSGetFeatureInfo.js")
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
if self.queryable:
current.response.s3.gis.get_feature_info = True
output = dict(
id = self.layer_id,
name = self.safe_name,
url = self.url,
layers = self.layers
)
legend_url = self.legend_url
if legend_url and not legend_url.startswith("http"):
legend_url = "%s/%s%s" % \
(current.deployment_settings.get_base_public_url(),
current.request.application,
legend_url)
self.add_attributes_if_not_default(
output,
transparent = (self.transparent, (True,)),
version = (self.version, ("1.1.1",)),
format = (self.img_format, ("image/png",)),
map = (self.map, (None,)),
username = (self.username, (None,)),
password = (self.password, (None,)),
buffer = (self.buffer, (0,)),
base = (self.base, (False,)),
_base = (self._base, (False,)),
style = (self.style, (None,)),
bgcolor = (self.bgcolor, (None,)),
tiled = (self.tiled, (False, )),
legendURL = (legend_url, (None,)),
queryable = (self.queryable, (False, )),
)
self.setup_folder_visibility_and_opacity(output)
return output
# -----------------------------------------------------------------------------
class XYZLayer(Layer):
"""
XYZ Layers from Catalogue
"""
tablename = "gis_layer_xyz"
js_array = "S3.gis.layers_xyz"
# -------------------------------------------------------------------------
class SubLayer(Layer.SubLayer):
def as_dict(self):
output = {
"id": self.layer_id,
"name": self.safe_name,
"url": self.url
}
self.add_attributes_if_not_default(
output,
_base = (self._base, (False,)),
url2 = (self.url2, (None,)),
url3 = (self.url3, (None,)),
format = (self.img_format, ("png", None)),
zoomLevels = (self.zoom_levels, (19,)),
attribution = (self.attribution, (None,)),
)
self.setup_folder(output)
return output
# =============================================================================
class S3Map(S3Search):
"""
Class to generate a Map with a Search form above it
@ToDo: Allow .configure() to override normal search_method with one
for map (like report)
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point to apply search method to S3Requests
@param r: the S3Request
@param attr: request attributes
"""
output = dict()
search = self.resource.search
if r.component and self != search:
output = search(r, **attr)
# Save search
elif "save" in r.vars :
r.interactive = False
output = self.save_search(r, **attr)
# Interactive or saved search
elif "load" in r.vars or r.interactive and \
search._S3Search__interactive:
# Put shortcuts where other methods expect them
self.advanced = search.advanced
# We want advanced open by default
#self.simple = search.simple
output = self.search_interactive(r, **attr)
if not output:
# Not supported
r.error(501, current.manager.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def search_interactive(self, r, **attr):
"""
Interactive search
@param r: the S3Request instance
@param attr: request parameters
@ToDo: Reload Map Layer by AJAX rather than doing a full-page refresh
@ToDo: Static JS to resize page to bounds when layer is loaded
@ToDo: Refactor components common to parent class
"""
T = current.T
session = current.session
table = self.table
if "location_id" in table or \
"site_id" in table:
# ok
pass
else:
session.error = T("This resource cannot be displayed on the map!")
redirect(r.url(method="search"))
# Get environment
request = self.request
response = current.response
resource = self.resource
db = current.db
s3db = current.s3db
gis = current.gis
tablename = self.tablename
# Initialize the form
form = DIV(_class="search_form form-container")
# Figure out which set of form values to use
# POST > GET > session > unfiltered
if r.http == "POST":
# POST
form_values = r.post_vars
else:
url_options = Storage([(k, v) for k, v in r.get_vars.iteritems() if v])
if url_options:
# GET
form_values = url_options
else:
session_options = session.s3.search_options
if session_options and tablename in session_options:
# session
session_options = session_options[tablename]
else:
# unfiltered
session_options = Storage()
form_values = session_options
# Build the search forms
simple_form, advanced_form = self.build_forms(r, form_values)
# Check for Load Search
if "load" in r.get_vars:
search_id = r.get_vars.get("load", None)
if not search_id:
r.error(400, current.manager.ERROR.BAD_RECORD)
r.post_vars = r.vars
search_table = s3db.pr_save_search
_query = (search_table.id == search_id)
record = current.db(_query).select(record.search_vars,
limitby=(0, 1)).first()
if not record:
r.error(400, current.manager.ERROR.BAD_RECORD)
s_vars = cPickle.loads(record.search_vars)
r.post_vars = Storage(s_vars["criteria"])
r.http = "POST"
# Process the search forms
query, errors = self.process_forms(r,
simple_form,
advanced_form,
form_values)
if not errors:
resource.add_filter(query)
search_vars = dict(simple=False,
advanced=True,
criteria=form_values)
else:
search_vars = dict()
if response.s3.simple_search:
form.append(DIV(_id="search-mode", _mode="simple"))
else:
form.append(DIV(_id="search-mode", _mode="advanced"))
# Save Search Widget
if session.auth and \
current.deployment_settings.get_save_search_widget():
save_search = self.save_search_widget(r, search_vars, **attr)
else:
save_search = DIV()
# Complete the output form
if simple_form is not None:
simple_form.append(save_search)
form.append(simple_form)
if advanced_form is not None:
advanced_form.append(save_search)
form.append(advanced_form)
# Add a map for search results
# (this same map is also used by the Map Search Widget, if-present)
# Build URL to load the features onto the map
if query:
vars = query.serialize_url(resource=resource)
else:
vars = None
url = URL(extension="geojson",
args=None,
vars=vars)
feature_resources = [{
"name" : T("Search Results"),
"id" : "search_results",
"url" : url,
"active" : True,
"marker" : gis.get_marker(request.controller, request.function)
}]
map = gis.show_map(
feature_resources=feature_resources,
catalogue_layers=True,
legend=True,
toolbar=True,
collapsed=True,
search = True,
)
# Title
title = self.crud_string(tablename, "title_map")
# View
response.view = self._view(r, "map.html")
# RHeader gets added later in S3Method()
output = dict(
title = title,
form = form,
map = map,
)
return output
# =============================================================================
class Geocoder(object):
"""
Base class for all Geocoders
"""
def __init__(self):
" Initializes the page content object "
pass
# -------------------------------------------------------------------------
@staticmethod
def get_api_key(type):
" Acquire API key from the database "
pass
# -----------------------------------------------------------------------------
class GoogleGeocoder(Geocoder):
"""
Google Geocoder module
http://code.google.com/apis/maps/documentation/javascript/v2/reference.html#GGeoStatusCode
Should convert this to be a thin wrapper for modules.geopy.geocoders.google
"""
def __init__(self, location):
" Initialise parent class & make any necessary modifications "
Geocoder.__init__(self)
api_key = current.deployment_settings.get_gis_api_google()
params = {"q": location, "key": api_key}
self.url = "http://maps.google.com/maps/geo?%s" % urllib.urlencode(params)
# -------------------------------------------------------------------------
def get_json(self):
" Returns the output in JSON format "
from gluon.tools import fetch
url = self.url
page = fetch(url)
return page
# -----------------------------------------------------------------------------
class YahooGeocoder(Geocoder):
"""
Yahoo Geocoder module
Should convert this to be a thin wrapper for modules.geopy.geocoders.`
"""
def __init__(self, location):
" Initialise parent class & make any necessary modifications "
Geocoder.__init__(self)
api_key = current.deployment_settings.get_gis_api_yahoo()
params = {"location": location, "appid": api_key}
self.url = "http://local.yahooapis.com/MapsService/V1/geocode?%s" % urllib.urlencode(params)
# -------------------------------------------------------------------------
def get_xml(self):
" Return the output in XML format "
from gluon.tools import fetch
url = self.url
page = fetch(url)
return page
# END =========================================================================
| ashwyn/eden-message_parser | modules/s3/s3gis.py | Python | mit | 274,540 | [
"Amber"
] | dd24024183d256a8da79201d705e6a2e04a9727998f421cbf88d1ce8862b2079 |
# monitor.py
#
# Copyright (C) 2011-2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Brian C. Lane <bcl@redhat.com>
#
import logging
log = logging.getLogger("livemedia-creator")
import re
import socket
import socketserver
import threading
import time
class LogRequestHandler(socketserver.BaseRequestHandler):
"""
Handle monitoring and saving the logfiles from the virtual install
Incoming data is written to self.server.log_path and each line is checked
for patterns that would indicate that the installation failed.
self.server.log_error is set True when this happens.
"""
simple_tests = [
"Traceback (",
"traceback script(s) have been run",
"Out of memory:",
"Call Trace:",
"insufficient disk space:",
"Not enough disk space to download the packages",
"error populating transaction after",
"crashed on signal",
"packaging: Missed: NoSuchPackage",
"packaging: Installation failed",
"The following error occurred while installing. This is a fatal error"
]
re_tests = [
r"packaging: base repo .* not valid",
r"packaging: .* requires .*"
]
def setup(self):
"""Start writing to self.server.log_path"""
if self.server.log_path:
self.fp = open(self.server.log_path, "w") # pylint: disable=attribute-defined-outside-init
else:
self.fp = None
self.request.settimeout(10)
def handle(self):
"""
Write incoming data to a logfile and check for errors
Split incoming data into lines and check for any Tracebacks or other
errors that indicate that the install failed.
Loops until self.server.kill is True
"""
log.info("Processing logs from %s", self.client_address)
line = ""
while True:
if self.server.kill:
break
try:
data = str(self.request.recv(4096), "utf8")
if self.fp:
self.fp.write(data)
self.fp.flush()
# check the data for errors and set error flag
# need to assemble it into lines so we can test for the error
# string.
while data:
more = data.split("\n", 1)
line += more[0]
if len(more) > 1:
self.iserror(line)
line = ""
data = more[1]
else:
data = None
except socket.timeout:
pass
except Exception as e: # pylint: disable=broad-except
log.info("log processing killed by exception: %s", e)
break
def finish(self):
log.info("Shutting down log processing")
self.request.close()
if self.fp:
self.fp.close()
def iserror(self, line):
"""
Check a line to see if it contains an error indicating installation failure
:param str line: log line to check for failure
If the line contains IGNORED it will be skipped.
"""
if "IGNORED" in line:
return
for t in self.simple_tests:
if t in line:
self.server.log_error = True
self.server.error_line = line
return
for t in self.re_tests:
if re.search(t, line):
self.server.log_error = True
self.server.error_line = line
return
class LogServer(socketserver.TCPServer):
"""A TCP Server that listens for log data"""
# Number of seconds to wait for a connection after startup
timeout = 60
def __init__(self, log_path, *args, **kwargs):
"""
Setup the log server
:param str log_path: Path to the log file to write
"""
self.kill = False
self.log_error = False
self.error_line = ""
self.log_path = log_path
self._timeout = kwargs.pop("timeout", None)
if self._timeout:
self._start_time = time.time()
socketserver.TCPServer.__init__(self, *args, **kwargs)
def log_check(self):
"""
Check to see if an error has been found in the log
:returns: True if there has been an error
:rtype: bool
"""
if self._timeout:
taking_too_long = time.time() > self._start_time + (self._timeout * 60)
if taking_too_long:
log.error("Canceling installation due to timeout")
else:
taking_too_long = False
return self.log_error or taking_too_long
class LogMonitor(object):
"""
Setup a server to monitor the logs output by the installation
This needs to be running before the virt-install runs, it expects
there to be a listener on the port used for the virtio log port.
"""
def __init__(self, log_path=None, host="localhost", port=0, timeout=None, log_request_handler_class=LogRequestHandler):
"""
Start a thread to monitor the logs.
:param str log_path: Path to the logfile to write
:param str host: Host to bind to. Default is localhost.
:param int port: Port to listen to or 0 to pick a port
If 0 is passed for the port the dynamically assigned port will be
available as self.port
If log_path isn't set then it only monitors the logs, instead of
also writing them to disk.
"""
self.server = LogServer(log_path, (host, port), log_request_handler_class, timeout=timeout)
self.host, self.port = self.server.server_address
self.log_path = log_path
self.server_thread = threading.Thread(target=self.server.handle_request)
self.server_thread.daemon = True
self.server_thread.start()
def shutdown(self):
"""Force shutdown of the monitoring thread"""
self.server.kill = True
self.server_thread.join()
| bcl/lorax | src/pylorax/monitor.py | Python | gpl-2.0 | 6,715 | [
"Brian"
] | c789d9e0fa371516911d9ed191cbdd61c54533c7f707ce1fa5c991ea027ffdfb |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify commits against a trusted keys list."""
import argparse
import hashlib
import os
import subprocess
import sys
import time
GIT = os.getenv('GIT', 'git')
def tree_sha512sum(commit='HEAD'):
"""Calculate the Tree-sha512 for the commit.
This is copied from github-merge.py."""
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert metadata[1] == b'blob'
name = line[name_sep + 1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert reply[0] == blob and reply[1] == b'blob'
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert p.stdout.read(1) == b'\n' # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def main():
# Parse arguments
parser = argparse.ArgumentParser(usage='%(prog)s [options] [commit id]')
parser.add_argument('--disable-tree-check', action='store_false', dest='verify_tree', help='disable SHA-512 tree check')
parser.add_argument('--clean-merge', type=float, dest='clean_merge', default=float('inf'), help='Only check clean merge after <NUMBER> days ago (default: %(default)s)', metavar='NUMBER')
parser.add_argument('commit', nargs='?', default='HEAD', help='Check clean merge up to commit <commit>')
args = parser.parse_args()
# get directory of this program and read data files
dirname = os.path.dirname(os.path.abspath(__file__))
print("Using verify-commits data from " + dirname)
verified_root = open(dirname + "/trusted-git-root", "r", encoding="utf8").read().splitlines()[0]
verified_sha512_root = open(dirname + "/trusted-sha512-root-commit", "r", encoding="utf8").read().splitlines()[0]
revsig_allowed = open(dirname + "/allow-revsig-commits", "r", encoding="utf-8").read().splitlines()
unclean_merge_allowed = open(dirname + "/allow-unclean-merge-commits", "r", encoding="utf-8").read().splitlines()
incorrect_sha512_allowed = open(dirname + "/allow-incorrect-sha512-commits", "r", encoding="utf-8").read().splitlines()
# Set commit and branch and set variables
current_commit = args.commit
if ' ' in current_commit:
print("Commit must not contain spaces", file=sys.stderr)
sys.exit(1)
verify_tree = args.verify_tree
no_sha1 = True
prev_commit = ""
initial_commit = current_commit
branch = subprocess.check_output([GIT, 'show', '-s', '--format=%H', initial_commit], universal_newlines=True).splitlines()[0]
# Iterate through commits
while True:
if current_commit == verified_root:
print('There is a valid path from "{}" to {} where all commits are signed!'.format(initial_commit, verified_root))
sys.exit(0)
if current_commit == verified_sha512_root:
if verify_tree:
print("All Tree-SHA512s matched up to {}".format(verified_sha512_root), file=sys.stderr)
verify_tree = False
no_sha1 = False
os.environ['BITSEND_VERIFY_COMMITS_ALLOW_SHA1'] = "0" if no_sha1 else "1"
os.environ['BITSEND_VERIFY_COMMITS_ALLOW_REVSIG'] = "1" if current_commit in revsig_allowed else "0"
# Check that the commit (and parents) was signed with a trusted key
if subprocess.call([GIT, '-c', 'gpg.program={}/gpg.sh'.format(dirname), 'verify-commit', current_commit], stdout=subprocess.DEVNULL):
if prev_commit != "":
print("No parent of {} was signed with a trusted key!".format(prev_commit), file=sys.stderr)
print("Parents are:", file=sys.stderr)
parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', prev_commit], universal_newlines=True).splitlines()[0].split(' ')
for parent in parents:
subprocess.call([GIT, 'show', '-s', parent], stdout=sys.stderr)
else:
print("{} was not signed with a trusted key!".format(current_commit), file=sys.stderr)
sys.exit(1)
# Check the Tree-SHA512
if (verify_tree or prev_commit == "") and current_commit not in incorrect_sha512_allowed:
tree_hash = tree_sha512sum(current_commit)
if ("Tree-SHA512: {}".format(tree_hash)) not in subprocess.check_output([GIT, 'show', '-s', '--format=format:%B', current_commit], universal_newlines=True).splitlines():
print("Tree-SHA512 did not match for commit " + current_commit, file=sys.stderr)
sys.exit(1)
# Merge commits should only have two parents
parents = subprocess.check_output([GIT, 'show', '-s', '--format=format:%P', current_commit], universal_newlines=True).splitlines()[0].split(' ')
if len(parents) > 2:
print("Commit {} is an octopus merge".format(current_commit), file=sys.stderr)
sys.exit(1)
# Check that the merge commit is clean
commit_time = int(subprocess.check_output([GIT, 'show', '-s', '--format=format:%ct', current_commit], universal_newlines=True).splitlines()[0])
check_merge = commit_time > time.time() - args.clean_merge * 24 * 60 * 60 # Only check commits in clean_merge days
allow_unclean = current_commit in unclean_merge_allowed
if len(parents) == 2 and check_merge and not allow_unclean:
current_tree = subprocess.check_output([GIT, 'show', '--format=%T', current_commit], universal_newlines=True).splitlines()[0]
subprocess.call([GIT, 'checkout', '--force', '--quiet', parents[0]])
subprocess.call([GIT, 'merge', '--no-ff', '--quiet', parents[1]], stdout=subprocess.DEVNULL)
recreated_tree = subprocess.check_output([GIT, 'show', '--format=format:%T', 'HEAD'], universal_newlines=True).splitlines()[0]
if current_tree != recreated_tree:
print("Merge commit {} is not clean".format(current_commit), file=sys.stderr)
subprocess.call([GIT, 'diff', current_commit])
subprocess.call([GIT, 'checkout', '--force', '--quiet', branch])
sys.exit(1)
subprocess.call([GIT, 'checkout', '--force', '--quiet', branch])
prev_commit = current_commit
current_commit = parents[0]
if __name__ == '__main__':
main()
| LIMXTEC/BitSend | contrib/verify-commits/verify-commits.py | Python | mit | 7,876 | [
"Octopus"
] | e1040c247e98e62c1a17cf4852d6018d90f98806be599a1e8f058a1110eb8e18 |
from . import nodes
class Visitor:
""" Visit ast nodes """
def visit(self, node):
if isinstance(node, nodes.Program):
for variable in node.variables:
self.visit(variable)
for statement in node.statements:
self.visit(statement)
elif isinstance(node, (nodes.Variable, nodes.Const, nodes.VarRef)):
pass
elif isinstance(node, (nodes.Continue, nodes.Stop)):
pass
elif isinstance(node, (nodes.Print, nodes.Read, nodes.Write)):
for a in node.args:
self.visit(a)
elif isinstance(node, (nodes.Format,)):
pass
elif isinstance(node, nodes.Assignment):
self.visit(node.var)
self.visit(node.expr)
elif isinstance(node, nodes.GoTo):
self.visit(node.x)
elif isinstance(node, nodes.IfArith):
self.visit(node.s1)
self.visit(node.s2)
self.visit(node.s3)
elif isinstance(node, nodes.Binop):
self.visit(node.a)
self.visit(node.b)
elif isinstance(node, nodes.Unop):
self.visit(node.a)
elif isinstance(node, nodes.Data):
for c in node.clist:
self.visit(c)
else:
raise NotImplementedError("VISIT:{} {}".format(node, type(node)))
class Printer(Visitor):
def __init__(self):
self.indent = 0
def print(self, node):
""" Print the AST """
self.indent = 0
self.visit(node)
def visit(self, node):
print(" " * self.indent + str(node))
self.indent += 2
super().visit(node)
self.indent -= 2
| windelbouwman/ppci-mirror | ppci/lang/fortran/utils.py | Python | bsd-2-clause | 1,713 | [
"VisIt"
] | fa8c088af466b41d177902ae1c7191c43ed07e0df02622df26e4577141daf034 |
import numpy as np
import math
from scipy.special import kn
from scipy.interpolate import interp1d
from scipy.interpolate import SmoothBivariateSpline
from scipy.optimize import brentq
from scipy.integrate import quad
from scipy.interpolate import griddata
#import matplotlib.pyplot as plt
from Hidden_Sec_Utilities import *
#Units
gev=1;mev=1e-3*gev;kev=1e-6*gev;
#masses
mp=0.938272046*gev;melec=511*kev;mmuon=105.658*mev;mpi=134.9767*mev;mpip=139.57018*mev;mkaon=0.493667*gev;mj_psi=3.097*gev;
#Widths, Branching Ratios, Constants
tauK=1.23e-8;alpha_em=1.0/137.035999139;Brj_psi_to_ee=0.0594;Brj_psi_to_mumu=0.0593;Brj_psi_to_invis=7e-4;
hbar=float(1.054*1e-34/(1.6e-19)/(1e9));speed_of_light=3e8;conversion=hbar**2*speed_of_light**2;
#Relid density cross section for rough estimates of relic density
relic_density_sigma=1e-40
#Functions for returning arrays of V mass, DM mass and some function f(mv,mx,alpha_p,kappa)
#Returns an array of the V mass, DM mass and kappa^4*alpha_p
def k4al(mv,mx,alpha_p,kappa):
return [mv,mx,kappa**4*alpha_p]
def kappa(mv,mx,alpha_p,kappa):
return [mv,mx,kappa]
def Y_func(mv,mx,alpha_p,kappa):
return [mv,mx,kappa**2*alpha_p*(mx/mv)**4]
def alphab(mv,mx,alpha_b,kappa):
return [mv,mx,alpha_b]
#Reduced Mass
def reduced_mass(m1,m2):
return m1*m2/(m1+m2)
#Scattering cross section for kinetic mixing
#These are only applicable for mV>>alpha_em^2*m_e^2 (see https://arxiv.org/pdf/1108.5383v3.pdf)
def sigman(mv,mx,kappa,alpha_p):
return 16*math.pi*kappa**2*alpha_em*alpha_p*reduced_mass(mp,mx)**2/mv**4*0.25
def sigman_to_kappa(sigma,mv,mx,alpha_p):
return math.sqrt(sigma/conversion/100**2/(16*math.pi*alpha_em*alpha_p*reduced_mass(mp,mx)**2/mv**4*0.25))
def sigman_to_kappa2(sigma,mv,mx,alpha_p):
return math.sqrt(sigma/conversion/100**2/sigman(mv,mx,1,alpha_p))
def sigmae(mv,mx,kappa,alpha_p):
return 16*math.pi*kappa**2*alpha_em*alpha_p*reduced_mass(melec,mx)**2/(mv**2+alpha_em**2*melec**2)**2
def sigmae_to_kappa(sigma,mv,mx,alpha_p):
return math.sqrt(sigma/conversion/100**2/(16*math.pi*alpha_em*alpha_p*reduced_mass(melec,mx)**2/(mv**2+alpha_em**2*melec**2)**2))
#return math.sqrt(sigma/conversion/100**2/sigmae(mv,mx,1,alpha_p))
#Scattering cross section for baryonic with kappa=0
def sigman_B(mv,mx,alpha_b):
return 16*math.pi*alpha_b**2*reduced_mass(mp,mx)**2/mv**4
def sigman_to_alpha_b(sigman,mv,mx):
return math.sqrt(sigman/conversion/100**2/sigman_B(mv,mx,1))
#RRatio
def format_rratio(rratio):
for line in rratio:
linearr=line.split()
if len(linearr)>=14:
try:
yield [float(linearr[0]),float(linearr[3])]
except ValueError:
continue
with open('data/rratio.dat','r') as infile:
rratio1=infile.read()
rratio_rough = rratio1.splitlines()
rratio_clean=np.array(list(format_rratio(rratio_rough)),dtype=float)
f_rratio = interp1d(rratio_clean[:,0],rratio_clean[:,1])
def rratio(s):
if s<rratio_clean[0,0]:
return 0
else:
return f_rratio(s)
#This returns the momentum of particles with masses m2 and m3 produced by the decay of a
#particle at rest with mass m1
def lambda_m(m1,m2,m3):
return 1.0/(2*m1)*math.sqrt(m1**4+m2**4+m3**4-2*m1**2*m2**2-2*m3**2*m2**2-2*m1**2*m3**2)
#Relativistic gamma
def gamma(beta):
return 1.0/math.sqrt(1-beta**2)
def Epart(beta, m):
return m*gamma(beta)
def GammaV(alpha_p, kappa, mv, mx):
term = 0;
if mv>2*mx:
term += alpha_p*(mv*mv-4*mx*mx)*math.sqrt(mv*mv/4.0-mx*mx)
if mv>2*melec:
term += 4*pow(kappa,2)*alpha_em*(2*pow(melec,2)+mv*mv)*math.sqrt(mv*mv/4.0-pow(melec,2))
if mv>2*mmuon:
term += 4*pow(kappa,2)*alpha_em*(2*pow(mmuon,2)+mv*mv)*math.sqrt(mv*mv/4.0-pow(mmuon,2))*(1+rratio(2*Epart(dm_beta,mx)))
return 1.0/(6.0*mv*mv)*(term)
#Only includes V->DM+DM at the moment
def GammaVB(alpha_B, kappa, mv, mx):
return 2.0/3.0*alpha_B*lambda_m(mv,mx,mx)**3/mv**2
#Rough relic density stuff!
dm_beta=0.3
def sigma_ann_lepton(alphap,kappa,mv,mx,mlepton):
if Epart(dm_beta,mx)>mlepton:
return 8.0*math.pi/3*alphap*alpha_em*kappa**2/((4*Epart(dm_beta,mx)**2-mv**2)**2+mv**2*GammaV(alphap,kappa,mv,mx)**2)*(2*Epart(dm_beta,mx)**2+mlepton**2)*dm_beta**2*math.sqrt(1-mlepton**2/Epart(dm_beta,mx)**2)
return 0
def sigma_annihilation_dm(kappa,alphap,mv,mx):
return sigma_ann_lepton(alphap,kappa,mv,mx,melec)+sigma_ann_lepton(alphap,kappa,mv,mx,mmuon)*(1+rratio(2*Epart(dm_beta,mx)))
def gen_relic_dm(mv,mx,alpha_p):
g=lambda kappa: sigma_annihilation_dm(kappa,alpha_p,mv,mx)*conversion-relic_density_sigma
try:
return brentq(g,0,10)
except ValueError:
print("Value error encountered for mv,mx,sigma_ann",mv,mx,sigma_annihilation_dm(1e-3,alpha_p,mv,mx))
return 1000
#This is much faster, but could be tripped up if sigma_annihilation_dm does not
#scale as kappa**2.
def gen_relic_dm_fast(mv,mx,alpha_p):
return math.sqrt(relic_density_sigma/(sigma_annihilation_dm(1,alpha_p,mv,mx)*conversion))
#######################
#Muon and Electron g-2#
#######################
def al_int(z,mv,ml):
return 2*ml**2*z*(1-z)**2/(ml**2*(1-z)**2+mv**2*z)
def al(mv,mlepton):
return alpha_em/(2*math.pi)*quad(al_int,0,1,args=(mv,mlepton))[0]
def kappa_muon_lim(mv):
return math.sqrt(7.4e-9/al(mv,mmuon))
def kappa_fav_low(mv):
return math.sqrt(1.3e-9/al(mv,mmuon))
def kappa_fav_high(mv):
return math.sqrt(4.8e-9/al(mv,mmuon))
def Delta_a_electron(mv,kappa):
return alpha_em/(3*math.pi)*kappa**2*melec**2/mv**2*(1-(7*alpha_em**2-alpha_em/(4*math.pi)))
#Constraints on electron g - 2 from Motoi Endo, Koichi Hamaguchi, and Go Mishima's paper 1209.2558
def kappa_electron_lim(mv):
g=lambda kappa: Delta_a_electron(mv,kappa)-(-1.06+2*0.82)*1e-12
return brentq(g,0,1)
################
#Monojet Limits#
################
#From aritz e-mail 2012. Should find a citation for this.
def monojet_limit():
return 0.02/math.sqrt(4*math.pi*alpha_em)
#1112.5457
def monojet_limit_baryonic():
return 9*0.021**2/(4*math.pi)
##########################
#K^+ --> pi^+ + invisible#
##########################
#Formulae from 0808.2459 and data from E949 0903.0030
kpip_invis_dat_1 = np.loadtxt("data/kpipinvis1.dat")
kpip_invis_dat_2 = np.loadtxt("data/kpipinvis2.dat")
def W2(mv):
return 1e-12*(3+6*mv**2/mkaon**2)
def Gamma_K_Vpi(mv,kappa):
return alpha_em*kappa**2*mv**2*W2(mv)*4*lambda_m(mkaon,mpip,mv)**3/(2**9*math.pi**4*mkaon**4)
def Br_K_Vpi(mv,kappa):
return Gamma_K_Vpi(mv,kappa)*tauK/hbar
def gen_K_Vpi_lim(arr):
return np.array([[mv, math.sqrt(kdat/Br_K_Vpi(mv,1))] for mv,kdat in arr])
#Baryonic
def Br_K_VpiB(mv,alpha_B):
return 1.4e-3*alpha_B*(mv/0.1)**2
def gen_K_VpiB_lim(arr):
return np.array([[mv, kdat/Br_K_VpiB(mv,1)] for mv,kdat in arr])
def gen_K_VpiB_lim_conservative(arr):
return np.array([[mv, 10*kdat/Br_K_VpiB(mv,1)] for mv,kdat in arr])
########################
#Electroweak Fit limits#
########################
#Model Independent Bounds on Kinetic Mixing - Anson Hook, Eder Izaguirre, Jay G.Wacker. 1006.0973
zprimedat=np.loadtxt("data/zprime.dat")
############
#Babar line#
############
#Possible sensitivity from repurposed analysis of http://arxiv.org/abs/arXiv:0808.0017
#Full analysis in http://arxiv.org/pdf/1309.5084v2.pdf
babar_dat=np.loadtxt("data/babar.dat")
babar_interp = interp1d(babar_dat[:,0],babar_dat[:,1])
babar_max = max(babar_dat[:,0])
#Sensitivity from BaBar analysis https://arxiv.org/abs/1702.03327
#Should maybe add a branching ratio to this, but normally Br(V->invis)~1 for our parameters
babar2017_dat = np.loadtxt("data/babar2017_formatted.dat")
babar2017_interp = interp1d(babar2017_dat[:,0],babar2017_dat[:,1])
babar2017_min = min(babar2017_dat[:,0])
babar2017_max = max(babar2017_dat[:,0])
# In[23]:
def babar_func(mv,mx,alpha_p,fill_value=1000):
if mv < 0.2:
term = babar_interp(0.2)
elif mv > babar_max:
term= fill_value
else:
term = babar_interp(mv)
if 2*mx>mv:
term = 1.0/math.sqrt(alpha_p)*term
return term
def babar_func2017(mv,mx,alpha_p,fill_value=1000):
if mv <= babar2017_min:
term = babar2017_interp(babar2017_min)
elif mv >= babar2017_max:
term = fill_value
else:
term = babar2017_interp(mv)
if 2*mx>mv:
term = 1.0/math.sqrt(alpha_p)*term
return term
#################################
#Baryonic Limits from 1705.06726#
#################################
#These largely eliminate the non-anomaly-free version of
#the model. I expect an anomaly-free version will need to
#be found for the model to be viable.
anomalon_1705_06726_dat = np.loadtxt("data/1705.06726/Anomalon_formatted.dat")
anomalon_1705_06726_dat[:,1]=anomalon_1705_06726_dat[:,1]**2/4.0/math.pi
BtoKX_1705_06726_dat = np.loadtxt("data/1705.06726/BtoKX_formatted.dat")
BtoKX_1705_06726_dat[:,1]=BtoKX_1705_06726_dat[:,1]**2/4.0/math.pi
ZtogammaX_1705_06726_dat = np.loadtxt("data/1705.06726/ZtogammaX_formatted.dat")
ZtogammaX_1705_06726_dat[:,1]=ZtogammaX_1705_06726_dat[:,1]**2/4.0/math.pi
KtopiX_1705_06726_dat = np.loadtxt("data/1705.06726/KtopiX_formatted.dat")
KtopiX_1705_06726_dat[:,1]=KtopiX_1705_06726_dat[:,1]**2/4.0/math.pi
#############################
#Baryonic Neutron Scattering#
#############################
def neutron_scatter(mv):
return 3.4e-11*(mv/0.001)**4
#########################
#Limits from rare decays#
#########################
#Rare Decay Limits
def Br_Jpsi_to_V(mv,mx,alpha_p,kappa):
return Brj_psi_to_ee*kappa**2*alpha_p/(4*alpha_em)*mj_psi**4/((mj_psi**2-mv**2)**2+mv**2*GammaV(alpha_p,kappa,mv,mx)**2)
def rarelimit(mv,mx,alpha_p):
g= lambda kappa: Br_Jpsi_to_V(mv,mx,alpha_p,kappa) - Brj_psi_to_invis
return brentq(g,0,1)
#Baryonic
def Br_Jpsi_to_VB(mv,mx,alpha_B,kappa):
return Brj_psi_to_ee/9.0*alpha_B**2/(4*alpha_em**2)*mj_psi**4/((mj_psi**2-mv**2)**2+mv**2*GammaVB(alpha_B,kappa,mv,mx)**2)
def rarelimitB(mv,mx,kappa):
if mv == mj_psi:
return 0
g= lambda alpha_B: (Br_Jpsi_to_VB(mv,mx,alpha_B,kappa) - Brj_psi_to_invis)
return brentq(g,0,1)
#######################
#Invisible Pion Limits#
#######################
#http://inspirehep.net/record/333625?ln=en, Atiya 1992
invispiondat = np.loadtxt("data/invis_pion.dat")
#Note, this curve has kappa dependence. It is currently assuming kappa=0
invispionbaryonicdat = np.loadtxt("data/invis_pion_baryonic.dat")
######
#NA64#
######
#https://arxiv.org/abs/1610.02988
#These limits are only valid in the case that V->\chi\bar\chi is dominant decay channel
#NA64dat = np.loadtxt("data/NA64_formatted.dat")
#https://arxiv.org/abs/1710.00971
#NA64dat = np.loadtxt("data/NA64_2017_formatted.dat")
#https://arxiv.org/abs/1906.00176
NA64dat = np.loadtxt("data/NA64_2019_formatted.dat")
#NA64dat = np.loadtxt("data/NA64_2019_aD0.5_formatted.dat")
#NA64dat = math.sqrt(NA64dat/0.5*3**4)
#Projections from Physics Beyond Colliders Working Group meeting
#NA64_2016dat = np.loadtxt("data/NA64_2016_formatted.dat")
#NA64_2017dat = np.loadtxt("data/NA64_2017_formatted.dat")
#NA64_2018dat = np.loadtxt("data/NA64_2018_formatted.dat")
######
#E137#
######
#E137 has to be handled separately. These limits are provided by Brian
#Batell. See https://arxiv.org/abs/1406.2698.
E137tab = np.loadtxt("data/E137-kappa4XalphaD-mV-mX.csv",delimiter=',')
######
#LSND#
######
#See arXiv:1107.4580 and arXiv:1411.1055.
LSNDtab = np.loadtxt("data/lsnd.dat",delimiter='\t')
###########
#MiniBooNE#
##########
#See arXiv:1807.06137
MiniBooNE_N_tab = np.loadtxt("data/miniboone_full_nucleon_timing_vector_portal_cl_epsilon4alphaD.txt",delimiter='\t')
MiniBooNE_e_tab = np.loadtxt("data/miniboone_electron_timing_vector_portal_cl_epsilon4alphaD.txt",delimiter='\t')
#Direct_Detection_Limits
#1105.5191
xenon10_dat = np.loadtxt("data/xenon10.dat")
#1207.5988
xenon100_dat1 = np.loadtxt("data/xenon100_1.dat")
xenon100_dat2 = np.loadtxt("data/xenon100_2.dat")
#1705.06655
xenon1T_dat = np.loadtxt("data/1705.06655.xenon1t.dat")
#Not sure which this is from.
xenon1T_S2_dat = np.loadtxt("data/XENON1T_S2Only_2019.dat")
#1802.06994
darkside50_dat=np.loadtxt("data/1802.06994.Darkside50.dat")
#1105.5191
damic_dat=np.loadtxt("data/damic.dat")
#arXiv:1509.01515
cressII2015_dat_unscaled = np.loadtxt("data/cressII2015.dat")
cressII2015_dat = zip(cressII2015_dat_unscaled[:,0],cressII2015_dat_unscaled[:,1]*1e-36)
#arXiv:1711.07692
cresstIII2017_dat = np.loadtxt("data/1711.07692.cresst-iii.dat")
cresstIII2017_dat = zip(cresstIII2017_dat[:,0],cresstIII2017_dat[:,1]*1e-36)
#arXiv:1402.7137
#with open("data/SuperCDMS.dat") as infile:
# scdms1=infile.read()
# scdms2=[line.split() for line in scdms1.split(';')]
# SuperCDMS_dat=[[float(x),float(y)] for x,y in scdms2]
SuperCDMS_dat=np.loadtxt("data/SuperCDMS.dat")
#arXiv:1509.02448
CDMSlite_dat = np.loadtxt("data/cdmslite2015.dat")
#1512.03506
LUX_dat_unscaled = np.loadtxt("data/lux2015.dat")
LUX_dat = zip(LUX_dat_unscaled[:,0],LUX_dat_unscaled[:,1]*1e-45)
#1707.06749
cresstIIsurface=np.loadtxt("data/cresst_II_surface.csv",delimiter=',')
cresstIIsurface_dat = zip(cresstIIsurface[:,0],cresstIIsurface[:,1]*1e-36)
Direct_Det_Tab =[xenon10_dat,xenon100_dat1,xenon100_dat2,damic_dat,cressII2015_dat,SuperCDMS_dat,CDMSlite_dat,LUX_dat,cresstIIsurface_dat,cresstIII2017_dat,xenon1T_dat,xenon1T_S2_dat,darkside50_dat]
#Direct_Det_Func=[interp1d(np.array(tab)[:,0],np.array(tab)[:,1],bounds_error=False,fill_value=1e-25) for tab in Direct_Det_Tab]
def Direct_Det(mx):
return min([func(mx) for func in Direct_Det_Func])
#arxiv:.pdf
#See also https://arxiv.org/pdf/1505.00011.pdf for comparison
xenon10e_dat = np.loadtxt("data/xenon10e_2017_formatted.csv",delimiter=",")
xenon10efunc=interp1d(xenon10e_dat[:,0],xenon10e_dat[:,1],bounds_error=False,fill_value=1e-15)
xenon100e_dat = np.loadtxt("data/xenon100e_2017_formatted.csv",delimiter=",")
xenon100efunc=interp1d(xenon100e_dat[:,0],xenon100e_dat[:,1],bounds_error=False,fill_value=1e-15)
#arxiv:1206.2644v1.pdf
#xenon10e_dat = np.loadtxt("data/xenon10e_formatted.csv",delimiter=",")
#xenon10e_dat = np.loadtxt("data/xenon10e_2017_formatted.csv",delimiter=",")
#1804.10697
SCDMSe_dat = np.loadtxt("data/CDMS_electron_2018_formatted.dat",delimiter=" ")
SCDMSefunc=interp1d(SCDMSe_dat[:,0],SCDMSe_dat[:,1],bounds_error=False,fill_value=1e-15)
#1804.00088
SENSEIe_dat = np.loadtxt("data/SENSEI2018_formatted.dat",delimiter=" ")
SENSEIefunc=interp1d(SENSEIe_dat[:,0],SENSEIe_dat[:,1],bounds_error=False,fill_value=1e-15)
| pgdeniverville/Hidden-Sector-Limits | Hidden_Sec_Physics.py | Python | mit | 14,590 | [
"Brian"
] | ad3e0fd4e651adf913ce345809d07cbb80bc131bcf7d42efcfe2eb0f32dffbea |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011-2012 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
# This is mostly lifted from
# http://code.google.com/p/pyboleto licensed under MIT
import datetime
import sys
import traceback
from reportlab.graphics.barcode.common import I2of5
from reportlab.lib import colors, pagesizes, utils
from reportlab.lib.units import mm
from reportlab.pdfgen import canvas
from stoqlib.exceptions import ReportError
from stoqlib.lib.crashreport import collect_traceback
from stoqlib.lib.boleto import BoletoException, get_bank_info_by_number
from stoqlib.lib.message import warning
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class BoletoPDF(object):
(FORMAT_BOLETO,
FORMAT_CARNE) = range(2)
def __init__(self, file_descr, format=FORMAT_BOLETO):
self.file_descr = file_descr
self.width = 190 * mm
self.widthCanhoto = 70 * mm
self.space = 2
self.fontSizeTitle = 6
self.fontSizeValue = 9
pagesize = pagesizes.A4
if format == self.FORMAT_CARNE:
pagesize = pagesizes.landscape(pagesize)
self.heightLine = 5.75 * mm
else:
self.heightLine = 6.5 * mm
self.deltaTitle = self.heightLine - (self.fontSizeTitle + 1)
self.deltaFont = self.fontSizeValue + 1
self.format = format
self.pdfCanvas = canvas.Canvas(self.file_descr, pagesize=pagesize)
self.pdfCanvas.setStrokeColor(colors.black)
self.boletos = []
def drawReciboSacadoCanhoto(self, boletoDados, x, y):
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x * mm, y * mm)
linhaInicial = 12
# Horizontal Lines
self.pdfCanvas.setLineWidth(2)
self._horizontalLine(0, 0, self.widthCanhoto)
self.pdfCanvas.setLineWidth(1)
self._horizontalLine(0, (linhaInicial + 0) * self.heightLine,
self.widthCanhoto)
self._horizontalLine(0, (linhaInicial + 1) * self.heightLine,
self.widthCanhoto)
self.pdfCanvas.setLineWidth(2)
self._horizontalLine(0, (linhaInicial + 2) * self.heightLine,
self.widthCanhoto)
# Vertical Lines
self.pdfCanvas.setLineWidth(1)
self._verticalLine(self.widthCanhoto - (35 * mm),
(linhaInicial + 0) * self.heightLine, self.heightLine)
self._verticalLine(self.widthCanhoto - (35 * mm),
(linhaInicial + 1) * self.heightLine, self.heightLine)
self.pdfCanvas.setFont('Helvetica-Bold', 6)
self.pdfCanvas.drawRightString(self.widthCanhoto,
0 * self.heightLine + 3,
'Recibo do Sacado')
# Titles
self.pdfCanvas.setFont('Helvetica', 6)
self.deltaTitle = self.heightLine - (6 + 1)
self.pdfCanvas.drawString(
self.space,
(((linhaInicial + 0) * self.heightLine)) + self.deltaTitle,
'Nosso Número')
self.pdfCanvas.drawString(
self.widthCanhoto - (35 * mm) + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.deltaTitle,
'Vencimento')
self.pdfCanvas.drawString(
self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Agência/Código Cedente')
self.pdfCanvas.drawString(
self.widthCanhoto - (35 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Valor Documento')
# Values
self.pdfCanvas.setFont('Helvetica', 9)
heighFont = 9 + 1
valorDocumento = self._formataValorParaExibir(
boletoDados.valor_documento)
self.pdfCanvas.drawString(
self.space,
(((linhaInicial + 0) * self.heightLine)) + self.space,
boletoDados.format_nosso_numero())
self.pdfCanvas.drawString(
self.widthCanhoto - (35 * mm) + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.space,
boletoDados.data_vencimento.strftime('%d/%m/%Y'))
self.pdfCanvas.drawString(
self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
boletoDados.agencia_conta)
self.pdfCanvas.drawString(
self.widthCanhoto - (35 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
valorDocumento)
demonstrativo = boletoDados.demonstrativo[0:12]
for i in range(len(demonstrativo)):
parts = utils.simpleSplit(demonstrativo[i], 'Helvetica', 9,
self.widthCanhoto)
self.pdfCanvas.drawString(
2 * self.space,
(((linhaInicial - 1) * self.heightLine)) - (i * heighFont),
parts[0])
self.pdfCanvas.restoreState()
return (self.widthCanhoto / mm,
((linhaInicial + 2) * self.heightLine) / mm)
def drawReciboSacado(self, boletoDados, x, y):
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x * mm, y * mm)
linhaInicial = 16
# Horizontal Lines
self.pdfCanvas.setLineWidth(1)
# Cedente
self._horizontalLine(0,
linhaInicial * self.heightLine, self.width)
# Endereço
self._horizontalLine(0,
(linhaInicial + 1) * self.heightLine, self.width)
# Sacado
self._horizontalLine(0,
(linhaInicial - 1) * self.heightLine, self.width)
self.pdfCanvas.setLineWidth(2)
self._horizontalLine(0,
(linhaInicial + 2) * self.heightLine, self.width)
# Vertical Lines
# Vertical line 1
# Linha Sacado
self.pdfCanvas.setLineWidth(1)
self._verticalLine(
self.width - (35 * mm) - (30 * mm) - (40 * mm),
(linhaInicial - 1) * self.heightLine,
1 * self.heightLine)
# Linha Cedente
self._verticalLine(
self.width - (35 * mm) - (30 * mm) - (40 * mm),
(linhaInicial + 1) * self.heightLine,
1 * self.heightLine)
# Vertical line 2
# Cedente
self.pdfCanvas.setLineWidth(1)
self._verticalLine(
self.width - (35 * mm) - (30 * mm),
(linhaInicial + 1) * self.heightLine,
1 * self.heightLine)
# Sacado
self.pdfCanvas.setLineWidth(1)
self._verticalLine(
self.width - (35 * mm) - (30 * mm),
(linhaInicial - 1) * self.heightLine,
1 * self.heightLine)
# Vertical line 3
# Cedente/Endereço/Sacado
self.pdfCanvas.setLineWidth(1)
self._verticalLine(
self.width - (35 * mm),
(linhaInicial + -1) * self.heightLine,
3 * self.heightLine)
# Head
self.pdfCanvas.setLineWidth(2)
self._verticalLine(40 * mm,
(linhaInicial + 2) * self.heightLine, self.heightLine)
self._verticalLine(60 * mm,
(linhaInicial + 2) * self.heightLine, self.heightLine)
if boletoDados.logo_image_path:
self.pdfCanvas.drawImage(
boletoDados.logo_image_path,
0, (linhaInicial + 2) * self.heightLine + 3,
40 * mm,
self.heightLine,
preserveAspectRatio=True,
anchor='sw')
self.pdfCanvas.setFont('Helvetica-Bold', 18)
self.pdfCanvas.drawCentredString(
50 * mm,
(linhaInicial + 2) * self.heightLine + 3,
boletoDados.codigo_dv_banco)
self.pdfCanvas.setFont('Helvetica-Bold', 10)
self.pdfCanvas.drawRightString(
self.width,
(linhaInicial + 2) * self.heightLine + 3,
'Recibo do Sacado')
# Titles
self.pdfCanvas.setFont('Helvetica', 6)
self.deltaTitle = self.heightLine - (6 + 1)
self.pdfCanvas.drawRightString(
self.width,
self.heightLine,
'Autenticação Mecânica')
# Linha Cedente
self.pdfCanvas.drawString(
0,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Cedente')
self.pdfCanvas.drawString(
self.width - (35 * mm) - (30 * mm) - (40 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Agência/Código Cedente')
self.pdfCanvas.drawString(
self.width - (35 * mm) - (30 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Data Documento')
self.pdfCanvas.drawString(
self.width - (35 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.deltaTitle,
'Vencimento')
# Linha Endereço
self.pdfCanvas.drawString(
0,
(((linhaInicial + 0) * self.heightLine)) + self.deltaTitle,
'Endereço Cedente')
self.pdfCanvas.drawString(
self.width - (35 * mm) + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.deltaTitle,
'CNPJ Cedente')
# Linha Sacado
self.pdfCanvas.drawString(
0,
(((linhaInicial - 1) * self.heightLine)) + self.deltaTitle,
'Sacado')
self.pdfCanvas.drawString(
self.width - (35 * mm) - (30 * mm) - (40 * mm) + self.space,
(((linhaInicial - 1) * self.heightLine)) + self.deltaTitle,
'Nosso Número')
self.pdfCanvas.drawString(
self.width - (35 * mm) - (30 * mm) + self.space,
(((linhaInicial - 1) * self.heightLine)) + self.deltaTitle,
'N. do documento')
self.pdfCanvas.drawString(
self.width - (35 * mm) + self.space,
(((linhaInicial - 1) * self.heightLine)) + self.deltaTitle,
'Valor Documento')
self.pdfCanvas.drawString(
0,
(((linhaInicial - 2) * self.heightLine)) + self.deltaTitle,
'Demonstrativo')
# Values
self.pdfCanvas.setFont('Helvetica', 9)
heighFont = 9 + 1
# Valores da linha Cedente
self.pdfCanvas.drawString(
0 + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
boletoDados.cedente[0])
self.pdfCanvas.drawString(
self.width - (35 * mm) - (30 * mm) - (40 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
boletoDados.agencia_conta)
self.pdfCanvas.drawString(
self.width - (35 * mm) - (30 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
boletoDados.data_documento.strftime('%d/%m/%Y'))
self.pdfCanvas.drawString(
self.width - (35 * mm) + self.space,
(((linhaInicial + 1) * self.heightLine)) + self.space,
boletoDados.data_vencimento.strftime('%d/%m/%Y'))
# Valores da linha Endereço
# Endereço
self.pdfCanvas.drawString(
0 + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.space,
'{endereco}, {detalhe_end}'.format(endereco=boletoDados.cedente[1],
detalhe_end=boletoDados.cedente[2]))
# CNPJ
self.pdfCanvas.drawString(
self.width - (35 * mm) + self.space,
(((linhaInicial + 0) * self.heightLine)) + self.space,
boletoDados.cedente[3])
# Valores da linha Sacado
valorDocumento = self._formataValorParaExibir(
boletoDados.valor_documento)
self.pdfCanvas.drawString(
0 + self.space,
(((linhaInicial - 1) * self.heightLine)) + self.space,
boletoDados.sacado[0])
self.pdfCanvas.drawString(
self.width - (35 * mm) - (30 * mm) - (40 * mm) + self.space,
(((linhaInicial - 1) * self.heightLine)) + self.space,
boletoDados.format_nosso_numero())
self.pdfCanvas.drawString(
self.width - (35 * mm) - (30 * mm) + self.space,
(((linhaInicial - 1) * self.heightLine)) + self.space,
boletoDados.numero_documento)
self.pdfCanvas.drawString(
self.width - (35 * mm) + self.space,
(((linhaInicial - 1) * self.heightLine)) + self.space,
valorDocumento)
demonstrativo = boletoDados.demonstrativo[0:25]
for i in range(len(demonstrativo)):
self.pdfCanvas.drawString(
2 * self.space,
(((linhaInicial - 2) * self.heightLine)) - (i * heighFont),
demonstrativo[i])
self.pdfCanvas.restoreState()
return (self.width / mm,
((linhaInicial + 2) * self.heightLine) / mm)
def drawHorizontalCorteLine(self, x, y, width):
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x * mm, y * mm)
self.pdfCanvas.setLineWidth(1)
self.pdfCanvas.setDash(1, 2)
self._horizontalLine(0, 0, width * mm)
self.pdfCanvas.restoreState()
def drawVerticalCorteLine(self, x, y, height):
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x * mm, y * mm)
self.pdfCanvas.setLineWidth(1)
self.pdfCanvas.setDash(1, 2)
self._verticalLine(0, 0, height * mm)
self.pdfCanvas.restoreState()
def drawReciboCaixa(self, boletoDados, x, y):
self.pdfCanvas.saveState()
self.pdfCanvas.translate(x * mm, y * mm)
# De baixo para cima posicao 0,0 esta no canto inferior esquerdo
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
y = 1.5 * self.heightLine
self.pdfCanvas.drawRightString(
self.width,
(1.5 * self.heightLine) + self.deltaTitle - 1,
'Autenticação Mecânica / Ficha de Compensação')
# Primeira linha depois do codigo de barra
y += self.heightLine
self.pdfCanvas.setLineWidth(2)
self._horizontalLine(0, y, self.width)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.space, 'Código de baixa')
self.pdfCanvas.drawString(0, y + self.space, 'Sacador / Avalista')
y += self.heightLine
self.pdfCanvas.drawString(0, y + self.deltaTitle, 'Sacado')
sacado = boletoDados.sacado
# Linha grossa dividindo o Sacado
y += self.heightLine
self.pdfCanvas.setLineWidth(2)
self._horizontalLine(0, y, self.width)
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
for i in range(len(sacado)):
self.pdfCanvas.drawString(
15 * mm,
(y - 10) - (i * self.deltaFont),
sacado[i])
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha vertical limitando todos os campos da direita
self.pdfCanvas.setLineWidth(1)
self._verticalLine(self.width - (45 * mm), y, 9 * self.heightLine)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(=) Valor cobrado')
# Campos da direita
y += self.heightLine
self._horizontalLine(self.width - (45 * mm), y, 45 * mm)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(+) Outros acréscimos')
y += self.heightLine
self._horizontalLine(self.width - (45 * mm), y, 45 * mm)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(+) Mora/Multa')
y += self.heightLine
self._horizontalLine(self.width - (45 * mm), y, 45 * mm)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(-) Outras deduções')
y += self.heightLine
self._horizontalLine(self.width - (45 * mm), y, 45 * mm)
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(-) Descontos/Abatimentos')
self.pdfCanvas.drawString(
0,
y + self.deltaTitle,
'Instruções')
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
instrucoes = boletoDados.instrucoes[:7]
for i in range(len(instrucoes)):
parts = utils.simpleSplit(instrucoes[i], 'Helvetica', 9,
self.width - 45 * mm)
if not parts:
parts = [' ']
self.pdfCanvas.drawString(
2 * self.space,
y - (i * self.deltaFont),
parts[0])
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha horizontal com primeiro campo Uso do Banco
y += self.heightLine
self._horizontalLine(0, y, self.width)
self.pdfCanvas.drawString(0, y + self.deltaTitle, 'Uso do banco')
self._verticalLine((30) * mm, y, 2 * self.heightLine)
self.pdfCanvas.drawString(
(30 * mm) + self.space,
y + self.deltaTitle,
'Carteira')
self._verticalLine((30 + 20) * mm, y, self.heightLine)
self.pdfCanvas.drawString(
((30 + 20) * mm) + self.space,
y + self.deltaTitle,
'Espécie')
self._verticalLine(
(30 + 20 + 20) * mm,
y,
2 * self.heightLine)
self.pdfCanvas.drawString(
((30 + 40) * mm) + self.space,
y + self.deltaTitle,
'Quantidade')
self._verticalLine(
(30 + 20 + 20 + 20 + 20) * mm, y, 2 * self.heightLine)
self.pdfCanvas.drawString(
((30 + 40 + 40) * mm) + self.space, y + self.deltaTitle, 'Valor')
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'(=) Valor documento')
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
self.pdfCanvas.drawString(
(30 * mm) + self.space,
y + self.space,
boletoDados.carteira)
self.pdfCanvas.drawString(
((30 + 20) * mm) + self.space,
y + self.space,
boletoDados.especie)
self.pdfCanvas.drawString(
((30 + 20 + 20) * mm) + self.space,
y + self.space,
boletoDados.quantidade)
valor = self._formataValorParaExibir(boletoDados.valor)
self.pdfCanvas.drawString(
((30 + 20 + 20 + 20 + 20) * mm) + self.space,
y + self.space,
valor)
valorDocumento = self._formataValorParaExibir(
boletoDados.valor_documento)
self.pdfCanvas.drawRightString(
self.width - 2 * self.space,
y + self.space,
valorDocumento)
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha horizontal com primeiro campo Data documento
y += self.heightLine
self._horizontalLine(0, y, self.width)
self.pdfCanvas.drawString(
0,
y + self.deltaTitle,
'Data do documento')
self.pdfCanvas.drawString(
(30 * mm) + self.space,
y + self.deltaTitle,
'N. do documento')
self.pdfCanvas.drawString(
((30 + 40) * mm) + self.space,
y + self.deltaTitle,
'Espécie doc')
self._verticalLine(
(30 + 20 + 20 + 20) * mm,
y,
self.heightLine)
self.pdfCanvas.drawString(
((30 + 40 + 20) * mm) + self.space,
y + self.deltaTitle,
'Aceite')
self.pdfCanvas.drawString(
((30 + 40 + 40) * mm) + self.space,
y + self.deltaTitle,
'Data processamento')
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'Nosso número')
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
self.pdfCanvas.drawString(
0,
y + self.space,
boletoDados.data_documento.strftime('%d/%m/%Y'))
self.pdfCanvas.drawString(
(30 * mm) + self.space,
y + self.space,
boletoDados.numero_documento)
self.pdfCanvas.drawString(
((30 + 40) * mm) + self.space,
y + self.space,
boletoDados.especie_documento)
self.pdfCanvas.drawString(
((30 + 40 + 20) * mm) + self.space,
y + self.space,
boletoDados.aceite)
self.pdfCanvas.drawString(
((30 + 40 + 40) * mm) + self.space,
y + self.space,
boletoDados.data_processamento.strftime('%d/%m/%Y'))
self.pdfCanvas.drawRightString(
self.width - 2 * self.space,
y + self.space,
boletoDados.format_nosso_numero())
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha horizontal com primeiro campo Cedente
y += self.heightLine
self._horizontalLine(0, y, self.width)
self.pdfCanvas.drawString(0, y + self.deltaTitle, 'Cedente')
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'Agência/Código cedente')
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
self.pdfCanvas.drawString(0, y + self.space, boletoDados.cedente[0])
self.pdfCanvas.drawRightString(
self.width - 2 * self.space,
y + self.space,
boletoDados.agencia_conta)
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha horizontal com primeiro campo Local de Pagamento
y += self.heightLine
self._horizontalLine(0, y, self.width)
self.pdfCanvas.drawString(
0,
y + self.deltaTitle,
'Local de pagamento')
self.pdfCanvas.drawString(
self.width - (45 * mm) + self.space,
y + self.deltaTitle,
'Vencimento')
self.pdfCanvas.setFont('Helvetica', self.fontSizeValue)
self.pdfCanvas.drawString(
0,
y + self.space,
boletoDados.local_pagamento)
self.pdfCanvas.drawRightString(
self.width - 2 * self.space,
y + self.space,
boletoDados.data_vencimento.strftime('%d/%m/%Y'))
self.pdfCanvas.setFont('Helvetica', self.fontSizeTitle)
# Linha grossa com primeiro campo logo tipo do banco
self.pdfCanvas.setLineWidth(3)
y += self.heightLine
self._horizontalLine(0, y, self.width)
self.pdfCanvas.setLineWidth(2)
self._verticalLine(40 * mm, y, self.heightLine) # Logo Tipo
self._verticalLine(60 * mm, y, self.heightLine) # Numero do Banco
if boletoDados.logo_image_path:
self.pdfCanvas.drawImage(
boletoDados.logo_image_path,
0,
y + self.space + 1,
40 * mm,
self.heightLine,
preserveAspectRatio=True,
anchor='sw')
self.pdfCanvas.setFont('Helvetica-Bold', 18)
self.pdfCanvas.drawCentredString(
50 * mm,
y + 2 * self.space,
boletoDados.codigo_dv_banco)
self.pdfCanvas.setFont('Helvetica-Bold', 10)
self.pdfCanvas.drawRightString(
self.width,
y + 2 * self.space,
boletoDados.linha_digitavel)
# Codigo de barras
self._codigoBarraI25(boletoDados.barcode, 2 * self.space, 0)
self.pdfCanvas.restoreState()
return self.width, (y + self.heightLine) / mm
def drawBoletoCarneDuplo(self, boletoDados1, boletoDados2):
if self.format == self.FORMAT_CARNE:
y = 25
else:
y = 5
d = self.drawBoletoCarne(boletoDados1, y)
y += d[1] + 6
# self.drawHorizontalCorteLine(0, y, d[0])
y += 7
if boletoDados2:
self.drawBoletoCarne(boletoDados2, y)
def drawBoletoCarne(self, boletoDados, y):
x = 5
d = self.drawReciboSacadoCanhoto(boletoDados, x, y)
x += d[0] + 6
self.drawVerticalCorteLine(x, y, d[1])
x += 6
d = self.drawReciboCaixa(boletoDados, x, y)
x += d[0]
return x, d[1]
def drawBoleto(self, boletoDados):
x = 5
y = 40
self.drawHorizontalCorteLine(x, y, self.width / mm)
y += 5
d = self.drawReciboCaixa(boletoDados, x, y)
y += d[1] + 10
self.drawHorizontalCorteLine(x, y, self.width / mm)
y += 10
d = self.drawReciboSacado(boletoDados, x, y)
return self.width, y
def nextPage(self):
self.pdfCanvas.showPage()
def save(self):
self.pdfCanvas.save()
def add_data(self, data):
self.boletos.append(data)
def render(self):
if self.format == self.FORMAT_BOLETO:
for b in self.boletos:
self.drawBoleto(b)
self.nextPage()
elif self.format == self.FORMAT_CARNE:
for i in range(0, len(self.boletos), 2):
args = [self.boletos[i], None]
if i + 1 < len(self.boletos):
args[1] = self.boletos[i + 1]
self.drawBoletoCarneDuplo(*args)
self.nextPage()
#
# Private API
#
def _horizontalLine(self, x, y, width):
self.pdfCanvas.line(x, y, x + width, y)
def _verticalLine(self, x, y, width):
self.pdfCanvas.line(x, y, x, y + width)
def _centreText(self, x, y, text):
self.pdfCanvas.drawCentredString(self.refX + x, self.refY + y, text)
def _rightText(self, x, y, text):
self.pdfCanvas.drawRightString(self.refX + x, self.refY + y, text)
def _formataValorParaExibir(self, nfloat):
if nfloat:
txt = nfloat
txt = txt.replace('.', ',')
else:
txt = ""
return txt
def _codigoBarraI25(self, num, x, y):
# http://en.wikipedia.org/wiki/Interleaved_2_of_5
altura = 13 * mm
comprimento = 103 * mm
tracoFino = 0.254320987654 * mm # Tamanho correto aproximado
bc = I2of5(num,
barWidth=tracoFino,
ratio=3,
barHeight=altura,
bearers=0,
quiet=0,
checksum=0)
# Recalcula o tamanho do tracoFino para que o cod de barras tenha o
# comprimento correto
tracoFino = (tracoFino * comprimento) / bc.width
bc.__init__(num, barWidth=tracoFino)
bc.drawOn(self.pdfCanvas, x, y)
def _render_bill(bill):
try:
bill.render()
except (BoletoException, ValueError):
exc = sys.exc_info()
tb_str = ''.join(traceback.format_exception(*exc))
collect_traceback(exc, submit=True)
raise ReportError(tb_str)
class BillReport(object):
def __init__(self, filename, payments, account=None, bank=None):
self._payments = payments
self._filename = filename
self._account = account
self._bank = bank
self._bill = self._get_bill()
self._payments_added = False
# Reports need a title when printing
self.title = _("Bill")
self.today = datetime.datetime.today()
@classmethod
def check_printable(cls, payments):
for payment in payments:
msg = cls.validate_payment_for_printing(payment)
if msg:
warning(_("Could not print Bill Report"),
description=msg)
return False
return True
@classmethod
def validate_payment_for_printing(cls, payment):
account = payment.method.destination_account
if not account:
msg = _("Payment method missing a destination account: '%s'") % (
account.description, )
return msg
from stoqlib.domain.account import Account
if (account.account_type != Account.TYPE_BANK or
not account.bank):
msg = _("Account '%s' must be a bank account.\n"
"You need to configure the bill payment method in "
"the admin application and try again") % account.description
return msg
bank = account.bank
if bank.bank_number == 0:
msg = _("Improperly configured bank account: %r") % (bank, )
return msg
# FIXME: Verify that all bill option fields are configured properly
bank_no = bank.bank_number
bank_info = get_bank_info_by_number(bank_no)
if not bank_info:
msg = _("Missing stoq support for bank %d") % (bank_no, )
return msg
def _get_bill(self):
format = BoletoPDF.FORMAT_BOLETO
if len(self._payments) > 1:
format = BoletoPDF.FORMAT_CARNE
# This is a PrintOperationPoppler's workaround to really print
# the page in landscape, without cutting the edges
self.print_as_landscape = True
return BoletoPDF(self._filename, format)
def _get_instrucoes(self, payment):
instructions = []
data = sysparam.get_string('BILL_INSTRUCTIONS')
for line in data.split('\n')[:3]:
line = line.replace('$DATE', payment.due_date.strftime('%d/%m/%Y'))
instructions.append(line)
instructions.append('\n' + _('Stoq Retail Management') + ' - www.stoq.com.br')
return instructions
def _get_demonstrativo(self):
payment = self._payments[0]
demonstrativo = [payment.group.get_description()]
sale = payment.group.sale
if sale:
for item in sale.get_items():
demonstrativo.append(' - %s' % item.get_description())
return demonstrativo
def _get_sacado(self):
payment = self._payments[0]
payer = payment.group.payer
address = payer.get_main_address()
return [payer.name,
address.get_address_string(),
address.get_details_string()]
def _get_cedente(self):
payment = self._payments[0]
parent = payment.group.get_parent()
if parent:
branch = parent.branch
else:
branch = sysparam.get_object(payment.store, 'MAIN_COMPANY')
address = branch.person.get_main_address()
return [branch.get_description(),
address.get_address_string(),
address.get_details_string(),
branch.person.company.cnpj]
def _get_account(self, payment):
if self._account:
return self._account
return payment.method.destination_account
def _get_bank(self, account):
if self._bank:
return self._bank
return account.bank
def add_payments(self):
if self._payments_added:
return
for p in self._payments:
if p.method.method_name != 'bill':
continue
self._add_payment(p)
self._payments_added = True
def _add_payment(self, payment):
account = self._get_account(payment)
bank = self._get_bank(account)
kwargs = dict(
valor_documento=payment.value,
data_vencimento=payment.due_date.date(),
data_documento=payment.open_date.date(),
data_processamento=self.today,
# FIXME: Maybe we should add the branch id to this numbers
nosso_numero=str(int(payment.identifier)),
numero_documento=str(int(payment.identifier)),
sacado=self._get_sacado(),
cedente=self._get_cedente(),
demonstrativo=self._get_demonstrativo(),
instrucoes=self._get_instrucoes(payment),
agencia=bank.bank_branch,
conta=bank.bank_account,
)
for opt in bank.options:
kwargs[opt.option] = opt.value
_render_class = get_bank_info_by_number(
bank.bank_number)
data = _render_class(**kwargs)
self._bill.add_data(data)
def override_payment_id(self, payment_id):
for data in self._bill.boletos:
data.nosso_numero = str(payment_id)
data.numero_documento = str(payment_id)
def override_payment_description(self, description):
for data in self._bill.boletos:
data.demonstrativo = description
def save(self):
self.add_payments()
_render_bill(self._bill)
self._bill.save()
class BillTestReport(object):
def __init__(self, filename, data):
self.title = _("Bill")
self._bill = BoletoPDF(filename, BoletoPDF.FORMAT_BOLETO)
self._bill.add_data(data)
def save(self):
_render_bill(self._bill)
self._bill.save()
| tiagocardosos/stoq | stoqlib/reporting/boleto.py | Python | gpl-2.0 | 34,454 | [
"VisIt"
] | c4b0fe803716f1243ec9661133f8741c1f87f58cc71a4ac5a1f971fc1e94d658 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
import deepiv.samplers as samplers
import deepiv.densities as densities
from deepiv.custom_gradients import replace_gradients_mse
from keras.models import Model
from keras import backend as K
from keras.layers import Lambda, InputLayer
from keras.engine import topology
try:
import h5py
except ImportError:
h5py = None
import keras.utils
import numpy
from sklearn import linear_model
from sklearn.decomposition import PCA
from scipy.stats import norm
class Treatment(Model):
'''
Adds sampling functionality to a Keras model and extends the losses to support
mixture of gaussian losses.
# Argument
'''
def _get_sampler_by_string(self, loss):
output = self.outputs[0]
inputs = self.inputs
if loss in ["MSE", "mse", "mean_squared_error"]:
output += samplers.random_normal(K.shape(output), mean=0.0, std=1.0)
draw_sample = K.function(inputs + [K.learning_phase()], [output])
def sample_gaussian(inputs, use_dropout=False):
'''
Helper to draw samples from a gaussian distribution
'''
return draw_sample(inputs + [int(use_dropout)])[0]
return sample_gaussian
elif loss == "binary_crossentropy":
output = K.random_binomial(K.shape(output), p=output)
draw_sample = K.function(inputs + [K.learning_phase()], [output])
def sample_binomial(inputs, use_dropout=False):
'''
Helper to draw samples from a binomial distribution
'''
return draw_sample(inputs + [int(use_dropout)])[0]
return sample_binomial
elif loss in ["mean_absolute_error", "mae", "MAE"]:
output += samplers.random_laplace(K.shape(output), mu=0.0, b=1.0)
draw_sample = K.function(inputs + [K.learning_phase()], [output])
def sample_laplace(inputs, use_dropout=False):
'''
Helper to draw samples from a Laplacian distribution
'''
return draw_sample(inputs + [int(use_dropout)])[0]
return sample_laplace
elif loss == "mixture_of_gaussians":
pi, mu, log_sig = densities.split_mixture_of_gaussians(output, self.n_components)
samples = samplers.random_gmm(pi, mu, K.exp(log_sig))
draw_sample = K.function(inputs + [K.learning_phase()], [samples])
return lambda inputs, use_dropout: draw_sample(inputs + [int(use_dropout)])[0]
else:
raise NotImplementedError("Unrecognised loss: %s.\
Cannot build a generic sampler" % loss)
def _prepare_sampler(self, loss):
'''
Build sampler
'''
if isinstance(loss, str):
self._sampler = self._get_sampler_by_string(loss)
else:
warnings.warn("You're using a custom loss function. Make sure you implement\
the model's sample() fuction yourself.")
def compile(self, optimizer, loss, metrics=None, loss_weights=None,
sample_weight_mode=None, n_components=None, **kwargs):
'''
Overrides the existing keras compile function to add a sampler building
step to the model compilation phase. Once compiled, one can draw samples
from the network using the sample() function and adds support for mixture
of gaussian loss.
'''
if loss == "mixture_of_gaussians":
if n_components is None:
raise Exception("When using mixture of gaussian loss you must\
supply n_components argument")
self.n_components = n_components
self._prepare_sampler(loss)
loss = lambda y_true, y_pred: densities.mixture_of_gaussian_loss(y_true,
y_pred,
n_components)
def predict_mean(x, batch_size=32, verbose=0):
'''
Helper to just predict the expected value of the mixture
of gaussian rather than the parameters for the distribution.
'''
y_hat = super(Treatment, self).predict(x, batch_size, verbose)
n_c = n_components
return (y_hat[:, 0:n_c] * y_hat[:, n_c:2*n_c]).sum(axis=1, keepdims=True)
self.predict_mean = predict_mean
else:
self._prepare_sampler(loss)
super(Treatment, self).compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode, **kwargs)
def sample(self, inputs, n_samples=1, use_dropout=False):
'''
Draw samples from the keras model.
'''
if hasattr(self, "_sampler"):
if not isinstance(inputs, list):
inputs = [inputs]
inputs = [i.repeat(n_samples, axis=0) for i in inputs]
return self._sampler(inputs, use_dropout)
else:
raise Exception("Compile model with loss before sampling")
class Response(Model):
'''
Extends the Keras Model class to support sampling from the Treatment
model during training.
Overwrites the existing fit_generator function.
# Arguments
In addition to the standard model arguments, a Response object takes
a Treatment object as input so that it can sample from the fitted treatment
distriubtion during training.
'''
def __init__(self, treatment, **kwargs):
if isinstance(treatment, Treatment):
self.treatment = treatment
else:
raise TypeError("Expected a treatment model of type Treatment. \
Got a model of type %s. Remember to train your\
treatment model first." % type(treatment))
super(Response, self).__init__(**kwargs)
def compile(self, optimizer, loss, metrics=None, loss_weights=None, sample_weight_mode=None,
unbiased_gradient=False,n_samples=1, batch_size=None):
super(Response, self).compile(optimizer=optimizer, loss=loss, loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode)
self.unbiased_gradient = unbiased_gradient
if unbiased_gradient:
if loss in ["MSE", "mse", "mean_squared_error"]:
if batch_size is None:
raise ValueError("Must supply a batch_size argument if using unbiased gradients. Currently batch_size is None.")
replace_gradients_mse(self, optimizer, batch_size=batch_size, n_samples=n_samples)
else:
warnings.warn("Unbiased gradient only implemented for mean square error loss. It is unnecessary for\
logistic losses and currently not implemented for absolute error losses.")
def fit(self, x=None, y=None, batch_size=512, epochs=1, verbose=1, callbacks=None,
validation_data=None, class_weight=None, initial_epoch=0, samples_per_batch=None,
seed=None, observed_treatments=None):
'''
Trains the model by sampling from the fitted treament distribution.
# Arguments
x: list of numpy arrays. The first element should *always* be the instrument variables.
y: (numpy array). Target response variables.
The remainder of the arguments correspond to the Keras definitions.
'''
batch_size = numpy.minimum(y.shape[0], batch_size)
if seed is None:
seed = numpy.random.randint(0, 1e6)
if samples_per_batch is None:
if self.unbiased_gradient:
samples_per_batch = 2
else:
samples_per_batch = 1
if observed_treatments is None:
generator = SampledSequence(x[1:], x[0], y, batch_size, self.treatment.sample, samples_per_batch)
else:
generator = OnesidedUnbaised(x[1:], x[0], y, observed_treatments, batch_size,
self.treatment.sample, samples_per_batch)
steps_per_epoch = y.shape[0] // batch_size
super(Response, self).fit_generator(generator=generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs, verbose=verbose,
callbacks=callbacks, validation_data=validation_data,
class_weight=class_weight, initial_epoch=initial_epoch)
def fit_generator(self, **kwargs):
'''
We use override fit_generator to support sampling from the treatment model during training.
If you need this functionality, you'll need to build a generator that samples from the
treatment and performs whatever transformations you're performing. Please submit a pull
request if you implement this.
'''
raise NotImplementedError("We use override fit_generator to support sampling from the\
treatment model during training.")
def expected_representation(self, x, z, n_samples=100, batch_size=None, seed=None):
inputs = [z, x]
if not hasattr(self, "_E_representation"):
if batch_size is None:
batch_size = inputs[0].shape[0]
steps = 1
else:
steps = inputs[0].shape[0] // batch_size
intermediate_layer_model = Model(inputs=self.inputs,
outputs=self.layers[-2].output)
def pred(inputs, n_samples=100, seed=None):
features = inputs[1]
samples = self.treatment.sample(inputs, n_samples)
batch_features = [features.repeat(n_samples, axis=0)] + [samples]
representation = intermediate_layer_model.predict(batch_features)
return representation.reshape((inputs[0].shape[0], n_samples, -1)).mean(axis=1)
self._E_representation = pred
return self._E_representation(inputs, n_samples, seed)
else:
return self._E_representation(inputs, n_samples, seed)
def conditional_representation(self, x, p):
inputs = [x, p]
if not hasattr(self, "_c_representation"):
intermediate_layer_model = Model(inputs=self.inputs,
outputs=self.layers[-2].output)
self._c_representation = intermediate_layer_model.predict
return self._c_representation(inputs)
else:
return self._c_representation(inputs)
def dropout_predict(self, x, z, n_samples=100):
if isinstance(x, list):
inputs = [z] + x
else:
inputs = [z, x]
if not hasattr(self, "_dropout_predict"):
predict_with_dropout = K.function(self.inputs + [K.learning_phase()],
[self.layers[-1].output])
def pred(inputs, n_samples = 100):
# draw samples from the treatment network with dropout turned on
samples = self.treatment.sample(inputs, n_samples, use_dropout=True)
# prepare inputs for the response network
rep_inputs = [i.repeat(n_samples, axis=0) for i in inputs[1:]] + [samples]
# return outputs from the response network with dropout turned on (learning_phase=0)
return predict_with_dropout(rep_inputs + [1])[0]
self._dropout_predict = pred
return self._dropout_predict(inputs, n_samples)
else:
return self._dropout_predict(inputs, n_samples)
def credible_interval(self, x, z, n_samples=100, p=0.95):
'''
Return a credible interval of size p using dropout variational inference.
'''
if isinstance(x, list):
n = x[0].shape[0]
else:
n = x.shape[0]
alpha = (1-p) / 2.
samples = self.dropout_predict(x, z, n_samples).reshape((n, n_samples, -1))
upper = numpy.percentile(samples.copy(), 100*(p+alpha), axis=1)
lower = numpy.percentile(samples.copy(), 100*(alpha), axis=1)
return lower, upper
def _add_constant(self, X):
return numpy.concatenate((numpy.ones((X.shape[0], 1)), X), axis=1)
def predict_confidence(self, x, p):
if hasattr(self, "_predict_confidence"):
return self._predict_confidence(x, p)
else:
raise Exception("Call fit_confidence_interval before running predict_confidence")
def fit_confidence_interval(self, x_lo, z_lo, p_lo, y_lo, n_samples=100, alpha=0.):
eta_bar = self.expected_representation(x=x_lo, z=z_lo, n_samples=n_samples)
pca = PCA(1-1e-16, svd_solver="full", whiten=True)
pca.fit(eta_bar)
eta_bar = pca.transform(eta_bar)
eta_lo_prime = pca.transform(self.conditional_representation(x_lo, p_lo))
eta_lo = self._add_constant(eta_lo_prime)
ols1 = linear_model.Ridge(alpha=alpha, fit_intercept=True)
ols1.fit(eta_bar, eta_lo_prime)
hhat = ols1.predict(eta_bar)
ols2 = linear_model.Ridge(alpha=alpha, fit_intercept=False)
ols2.fit(self._add_constant(hhat), y_lo)
yhat = ols2.predict(eta_lo)
hhi = numpy.linalg.inv(numpy.dot(eta_lo.T, eta_lo))
heh = numpy.dot(eta_lo.T, numpy.square(y_lo - yhat) * eta_lo)
V = numpy.dot(numpy.dot(hhi, heh), hhi)
def pred(xx, pp):
H = self._add_constant(pca.transform(self.conditional_representation(xx,pp)))
sdhb = numpy.sqrt(numpy.diag(numpy.dot(numpy.dot(H, V), H.T)))
hb = ols2.predict(H).flatten()
return hb, sdhb
self._predict_confidence = pred
class SampledSequence(keras.utils.Sequence):
def __init__(self, features, instruments, outputs, batch_size, sampler, n_samples=1, seed=None):
self.rng = numpy.random.RandomState(seed)
if not isinstance(features, list):
features = [features.copy()]
else:
features = [f.copy() for f in features]
self.features = features
self.instruments = instruments.copy()
self.outputs = outputs.copy()
if batch_size < self.instruments.shape[0]:
self.batch_size = batch_size
else:
self.batch_size = self.instruments.shape[0]
self.sampler = sampler
self.n_samples = n_samples
self.current_index = 0
self.shuffle()
def __len__(self):
if isinstance(self.outputs, list):
return self.outputs[0].shape[0] // self.batch_size
else:
return self.outputs.shape[0] // self.batch_size
def shuffle(self):
idx = self.rng.permutation(numpy.arange(self.instruments.shape[0]))
self.instruments = self.instruments[idx,:]
self.outputs = self.outputs[idx,:]
self.features = [f[idx,:] for f in self.features]
def __getitem__(self,idx):
instruments = [self.instruments[idx*self.batch_size:(idx+1)*self.batch_size, :]]
features = [inp[idx*self.batch_size:(idx+1)*self.batch_size, :] for inp in self.features]
sampler_input = instruments + features
samples = self.sampler(sampler_input, self.n_samples)
batch_features = [f[idx*self.batch_size:(idx+1)*self.batch_size].repeat(self.n_samples, axis=0) for f in self.features] + [samples]
batch_y = self.outputs[idx*self.batch_size:(idx+1)*self.batch_size].repeat(self.n_samples, axis=0)
if idx == (len(self) - 1):
self.shuffle()
return batch_features, batch_y
class OnesidedUnbaised(SampledSequence):
def __init__(self, features, instruments, outputs, treatments, batch_size, sampler, n_samples=1, seed=None):
self.rng = numpy.random.RandomState(seed)
if not isinstance(features, list):
features = [features.copy()]
else:
features = [f.copy() for f in features]
self.features = features
self.instruments = instruments.copy()
self.outputs = outputs.copy()
self.treatments = treatments.copy()
self.batch_size = batch_size
self.sampler = sampler
self.n_samples = n_samples
self.current_index = 0
self.shuffle()
def shuffle(self):
idx = self.rng.permutation(numpy.arange(self.instruments.shape[0]))
self.instruments = self.instruments[idx,:]
self.outputs = self.outputs[idx,:]
self.features = [f[idx,:] for f in self.features]
self.treatments = self.treatments[idx,:]
def __getitem__(self, idx):
instruments = [self.instruments[idx*self.batch_size:(idx+1)*self.batch_size, :]]
features = [inp[idx*self.batch_size:(idx+1)*self.batch_size, :] for inp in self.features]
observed_treatments = self.treatments[idx*self.batch_size:(idx+1)*self.batch_size, :]
sampler_input = instruments + features
samples = self.sampler(sampler_input, self.n_samples // 2)
samples = numpy.concatenate([observed_treatments, samples], axis=0)
batch_features = [f[idx*self.batch_size:(idx+1)*self.batch_size].repeat(self.n_samples, axis=0) for f in self.features] + [samples]
batch_y = self.outputs[idx*self.batch_size:(idx+1)*self.batch_size].repeat(self.n_samples, axis=0)
if idx == (len(self) - 1):
self.shuffle()
return batch_features, batch_y
def load_weights(filepath, model):
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
with h5py.File(filepath, mode='r') as f:
# set weights
topology.load_weights_from_hdf5_group(f['model_weights'], model.layers)
return model
| jhartford/DeepIV | deepiv/models.py | Python | mit | 18,271 | [
"Gaussian"
] | f019c70b5d8d457a8c0b4df829cb9d1eb3edf26c5595e89cfb9a26a010f97e7c |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy
from pyscf import gto, scf
from pyscf import dft
from pyscf import lib
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = 'h 0 0 0; h 1 .5 0; h 0 4 1; h 1 0 .2'
mol.basis = 'aug-ccpvdz'
mol.build()
#dm = scf.RHF(mol).run(conv_tol=1e-14).make_rdm1()
dm = numpy.load(os.path.realpath(os.path.join(__file__, '..', 'dm_h4.npy')))
mf = dft.RKS(mol)
mf.grids.atom_grid = {"H": (50, 110)}
mf.prune = None
mf.grids.build(with_non0tab=False)
nao = mol.nao_nr()
ao = dft.numint.eval_ao(mol, mf.grids.coords, deriv=1)
rho = dft.numint.eval_rho(mol, ao, dm, xctype='GGA')
def tearDownModule():
global mol, mf, ao, rho
del mol, mf, ao, rho
def finger(a):
w = numpy.cos(numpy.arange(a.size))
return numpy.dot(w, a.ravel())
class KnownValues(unittest.TestCase):
def test_parse_xc(self):
hyb, fn_facs = dft.xcfun.parse_xc('.5*HF+.5*B3LYP,VWN*.5')
self.assertAlmostEqual(hyb[0], .6, 12)
self.assertEqual([x[0] for x in fn_facs], [0,6,16,3])
self.assertTrue(numpy.allclose([x[1] for x in fn_facs],
(0.04, 0.36, 0.405, 0.595)))
hyb, fn_facs = dft.xcfun.parse_xc('HF,')
self.assertEqual(hyb[0], 1)
self.assertEqual(fn_facs, [])
hyb, fn_facs = dft.libxc.parse_xc('B88 - SLATER')
self.assertEqual(fn_facs, [(106, 1), (1, -1)])
hyb, fn_facs = dft.libxc.parse_xc('B88 -SLATER*.5')
self.assertEqual(fn_facs, [(106, 1), (1, -0.5)])
hyb, fn_facs = dft.xcfun.parse_xc('0.5*B3LYP+0.25*B3LYP')
self.assertTrue(numpy.allclose(hyb, [.15, 0, 0]))
hyb = dft.libxc.hybrid_coeff('0.5*B3LYP+0.25*B3LYP')
self.assertAlmostEqual(hyb, .15, 12)
hyb, fn_facs = dft.xcfun.parse_xc('CAM_B3LYP')
self.assertTrue(numpy.allclose(hyb, [0.19, 0.65, 0.33]))
hyb, fn_facs = dft.xcfun.parse_xc('0.6*CAM_B3LYP+0.4*B3P86')
self.assertTrue(numpy.allclose(hyb, [.08+0.19*.6, 0.65*.6, 0.33]))
self.assertTrue(numpy.allclose(fn_facs,
[(9, 0.6), (3, 0.19), (16, 0.486), (0, 0.032), (6, 0.288), (46, 0.324)]))
rsh = dft.xcfun.rsh_coeff('0.6*CAM_B3LYP+0.4*B3P86')
self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))
hyb, fn_facs = dft.xcfun.parse_xc('0.4*B3P86+0.6*CAM_B3LYP')
self.assertTrue(numpy.allclose(hyb, [.08+0.19*.6, 0.65*.6, 0.33]))
self.assertTrue(numpy.allclose(fn_facs,
[(0, 0.032), (6, 0.288), (46, 0.324), (3, 0.19), (9, 0.6), (16, 0.486)]))
rsh = dft.xcfun.rsh_coeff('0.4*B3P86+0.6*CAM_B3LYP')
self.assertTrue(numpy.allclose(rsh, (0.33, 0.39, -0.196)))
hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF(0.3) + .8*HF + .22*LR_HF')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF + .22*LR_HF(0.3) + .8*HF')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
hyb, fn_facs = dft.xcfun.parse_xc('0.5*SR-HF + .8*HF + .22*LR_HF(0.3)')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
hyb, fn_facs = dft.xcfun.parse_xc('0.5*RSH(2.04;0.56;0.3) + 0.5*BP86')
self.assertEqual(hyb, [1.3, 1.02, 0.3])
self.assertEqual(fn_facs, [(6, 0.5), (46, 0.5)])
self.assertRaises(ValueError, dft.xcfun.parse_xc, 'SR_HF(0.3) + LR_HF(.5)')
self.assertRaises(ValueError, dft.xcfun.parse_xc, 'LR-HF(0.3) + SR-HF(.5)')
hyb = dft.xcfun.hybrid_coeff('M05')
self.assertAlmostEqual(hyb, 0.28, 9)
hyb, fn_facs = dft.xcfun.parse_xc('APBE,')
self.assertEqual(fn_facs[0][0], 58)
hyb, fn_facs = dft.xcfun.parse_xc('VWN,')
self.assertEqual(fn_facs, [(3, 1)])
hyb, fn_facs = dft.xcfun.parse_xc('TF,')
self.assertEqual(fn_facs, [(24, 1)])
ref = [(0, 1), (3, 1)]
self.assertEqual(dft.xcfun.parse_xc_name('LDA,VWN'), (0,3))
self.assertEqual(dft.xcfun.parse_xc(('LDA','VWN'))[1], ref)
self.assertEqual(dft.xcfun.parse_xc((0, 3))[1], ref)
self.assertEqual(dft.xcfun.parse_xc('0, 3')[1], ref)
self.assertEqual(dft.xcfun.parse_xc(3)[1], [(3,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11-L')[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11L' )[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11-L,M11L' )[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11_L,M11-L')[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('M11L,M11_L' )[1], [(226,1),(75,1)])
#self.assertEqual(dft.xcfun.parse_xc('Xpbe,')[1], [(123,1)])
#self.assertEqual(dft.xcfun.parse_xc('pbe,' )[1], [(101,1)])
hyb, fn_facs = dft.xcfun.parse_xc('PBE*.4+LDA')
self.assertEqual(fn_facs, [(5, 0.4), (4, 0.4), (0, 1)])
hyb, fn_facs = dft.xcfun.parse_xc('PBE*.4+VWN')
self.assertEqual(fn_facs, [(5, 0.4), (4, 0.4), (3, 1)])
self.assertTrue (dft.xcfun.is_meta_gga('m05'))
self.assertFalse(dft.xcfun.is_meta_gga('pbe0'))
self.assertFalse(dft.xcfun.is_meta_gga('tf,'))
self.assertFalse(dft.xcfun.is_meta_gga('vv10'))
self.assertTrue (dft.xcfun.is_gga('PBE0'))
self.assertFalse(dft.xcfun.is_gga('m05'))
self.assertFalse(dft.xcfun.is_gga('tf,'))
self.assertTrue (dft.xcfun.is_lda('tf,'))
self.assertFalse(dft.xcfun.is_lda('vv10'))
self.assertTrue (dft.xcfun.is_hybrid_xc('m05'))
self.assertTrue (dft.xcfun.is_hybrid_xc('pbe0,'))
self.assertFalse(dft.xcfun.is_hybrid_xc('m05,'))
self.assertFalse(dft.xcfun.is_hybrid_xc('vv10'))
self.assertTrue (dft.xcfun.is_hybrid_xc(('b3lyp',4,'vv10')))
def test_nlc_coeff(self):
self.assertEqual(dft.xcfun.nlc_coeff('vv10'), [5.9, 0.0093])
def test_lda(self):
e,v,f,k = dft.xcfun.eval_xc('lda,', rho[0][:3], deriv=3)
self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)
self.assertAlmostEqual(lib.finger(v[0]), -0.6294083390180697, 8)
self.assertAlmostEqual(lib.finger(f[0]), -1.1414693830969338, 8)
self.assertAlmostEqual(lib.finger(k[0]), 4.1402447248393921, 8)
e,v,f,k = dft.xcfun.eval_xc('lda,', [rho[0][:3]*.5]*2, spin=1, deriv=3)
self.assertAlmostEqual(lib.finger(e) , -0.4720562542635522, 8)
self.assertAlmostEqual(lib.finger(v[0].T[0]), -0.6294083390180697, 8)
self.assertAlmostEqual(lib.finger(v[0].T[1]), -0.6294083390180697, 8)
self.assertAlmostEqual(lib.finger(f[0].T[0]), -1.1414693830969338*2, 8)
self.assertAlmostEqual(lib.finger(f[0].T[2]), -1.1414693830969338*2, 8)
self.assertAlmostEqual(lib.finger(k[0].T[0]), 4.1402447248393921*4, 7)
self.assertAlmostEqual(lib.finger(k[0].T[3]), 4.1402447248393921*4, 7)
def test_lyp(self):
e,v,f = dft.xcfun.eval_xc(',LYP', rho, deriv=3)[:3]
self.assertAlmostEqual(numpy.dot(rho[0],e), -62.114576182676615, 8)
self.assertAlmostEqual(numpy.dot(rho[0],v[0]),-81.771670866308455, 8)
self.assertAlmostEqual(numpy.dot(rho[0],v[1]), 27.485383255125743, 8)
self.assertAlmostEqual(numpy.dot(rho[0],f[0]), 186.823806251777, 7)
self.assertAlmostEqual(numpy.dot(rho[0],f[1]), -3391.2428894571085, 6)
self.assertAlmostEqual(numpy.dot(rho[0],f[2]), 0, 9)
def test_beckex(self):
rho =(numpy.array([1. , 1., 0., 0.]).reshape(-1,1),
numpy.array([ .8, 1., 0., 0.]).reshape(-1,1))
e,v,f = dft.xcfun.eval_xc('b88,', rho, spin=1, deriv=3)[:3]
self.assertAlmostEqual(lib.finger(e) ,-0.9061911523772116 , 9)
self.assertAlmostEqual(lib.finger(v[0]),-1.8531364353196298 , 9)
self.assertAlmostEqual(lib.finger(v[1]),-0.0018308066137967724, 9)
self.assertAlmostEqual(lib.finger(f[0]),-0.21602284426026866 , 9)
self.assertAlmostEqual(lib.finger(f[1]), 0.0072053520662545617, 9)
self.assertAlmostEqual(lib.finger(f[2]), 0.0002275350850255538, 9)
def test_m05x(self):
rho =(numpy.array([1., 1., 0., 0., 0., 0.165 ]).reshape(-1,1),
numpy.array([.8, 1., 0., 0., 0., 0.1050]).reshape(-1,1))
test_ref = numpy.array([-1.57876583, -2.12127045,-2.11264351,-0.00315462,
0.00000000, -0.00444560, 3.45640232, 4.4349756])
exc, vxc, fxc, kxc = dft.xcfun.eval_xc('m05,', rho, 1, deriv=3)
self.assertAlmostEqual(float(exc)*1.8, test_ref[0], 5)
self.assertAlmostEqual(abs(vxc[0]-test_ref[1:3]).max(), 0, 6)
self.assertAlmostEqual(abs(vxc[1]-test_ref[3:6]).max(), 0, 6)
self.assertAlmostEqual(abs(vxc[3]-test_ref[6:8]).max(), 0, 5)
exc, vxc, fxc, kxc = dft.xcfun.eval_xc('m05,', rho[0], 0, deriv=3)
self.assertAlmostEqual(float(exc), -0.5746231988116002, 5)
self.assertAlmostEqual(float(vxc[0]), -0.8806121005703862, 6)
self.assertAlmostEqual(float(vxc[1]), -0.0032300155406846756, 7)
self.assertAlmostEqual(float(vxc[3]), 0.4474953100487698, 5)
def test_camb3lyp(self):
rho = numpy.array([1., 1., 0.1, 0.1]).reshape(-1,1)
exc, vxc, fxc, kxc = dft.xcfun.eval_xc('camb3lyp', rho, 0, deriv=1)
self.assertAlmostEqual(float(exc), -0.5752559666317147, 5)
self.assertAlmostEqual(float(vxc[0]), -0.7709812578936763, 5)
self.assertAlmostEqual(float(vxc[1]), -0.0029862221286189846, 7)
self.assertEqual(dft.xcfun.rsh_coeff('camb3lyp'), (0.33, 0.65, -0.46))
rho = numpy.array([1., 1., 0.1, 0.1]).reshape(-1,1)
exc, vxc, fxc, kxc = dft.xcfun.eval_xc('RSH(0.65;-0.46;0.5) + BECKECAMX', rho, 0, deriv=1)
self.assertAlmostEqual(float(exc), -0.48916154057161476, 9)
self.assertAlmostEqual(float(vxc[0]), -0.6761177630311709, 9)
self.assertAlmostEqual(float(vxc[1]), -0.002949151742087167, 9)
def test_define_xc(self):
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):
exc = vxc = fxc = kxc = None
return exc, vxc, fxc, kxc
mf = dft.RKS(mol)
ni = dft.xcfun.define_xc(mf._numint, eval_xc, 'GGA', hyb=0.2)
ni = dft.xcfun.define_xc(mf._numint, 'b3lyp+vwn', 'GGA', hyb=0.2)
self.assertRaises(ValueError, dft.xcfun.define_xc, mf._numint, 0.1)
def test_vs_libxc_rks(self):
ao = dft.numint.eval_ao(mol, mf.grids.coords[:200], deriv=2)
rho = dft.numint.eval_rho(mol, ao, dm, xctype='MGGA')
rhoa = rho[:,:200]
def check(xc_code, deriv=3, e_place=9, v_place=9, f_place=9, k_place=9):
exc0, vxc0, fxc0, kxc0 = dft.libxc.eval_xc(xc_code, rhoa, 0, deriv=deriv)
exc1, vxc1, fxc1, kxc1 = dft.xcfun.eval_xc(xc_code, rhoa, 0, deriv=deriv)
self.assertAlmostEqual(abs(exc0-exc1).max(), 0, e_place)
if deriv > 0:
for v0, v1 in zip(vxc0, vxc1):
if v0 is not None and v1 is not None:
self.assertAlmostEqual(abs(v0-v1).max(), 0, v_place)
if deriv > 1:
for f0, f1 in zip(fxc0, fxc1):
if f0 is not None and f1 is not None:
self.assertAlmostEqual(abs(f0-f1).max(), 0, f_place)
if deriv > 2:
for k0, k1 in zip(kxc0, kxc1):
if k0 is not None and k1 is not None:
self.assertAlmostEqual(abs(k0-k1).max(), 0, k_place)
check('lda,')
check('pw86,')
check('pbe,', e_place=6, v_place=6, f_place=5, k_place=4)
#?check('becke,')
#?check('br,')
#?check('LDAERF,')
check('optx,')
check('OPTXCORR,')
check('RPBE,')
check('TF,' )
check('PW91,' , e_place=6, v_place=4, f_place=2, k_place=-1)
check('m05' , deriv=1, e_place=6, v_place=6)
#check('m05-2x,', deriv=1, e_place=6, v_place=6)
check('m06' , deriv=1, e_place=6, v_place=6)
check('m06,' , deriv=1, e_place=6, v_place=6)
check('m062x,', deriv=1, e_place=6, v_place=6)
check('m06l,' , deriv=1, e_place=6, v_place=6)
check('TPSS,' , k_place=-4)
#?check('REVTPSS,', deriv=1) # xcfun crash
check('APBE,')
check('BLOC,' , k_place=-5)
check('PBEINT,', e_place=7, v_place=6, f_place=5, k_place=4)
check(',vwn3')
check(',vwn5')
check(',pbe' , deriv=3)
#?check(',br')
#?check(',LDAERF')
check(',lyp' , deriv=3, k_place=0)
check(',SPBE' , deriv=3, e_place=1, v_place=1, f_place=0, k_place=-2)
check(',PW91' , deriv=3, e_place=5, v_place=3, f_place=0, k_place=-2)
check(',m052x', deriv=1)
check(',m05' , deriv=1)
check(',m06' , deriv=1)
check(',m062x', deriv=1)
check(',m06l' , deriv=1)
check(',TPSS' , deriv=1)
check(',REVTPSS', deriv=1, e_place=2, v_place=1)
check(',p86' , deriv=3, e_place=5, v_place=5, f_place=3, k_place=-1)
check(',APBE' , deriv=3)
check(',PBEINT' , deriv=3)
check(',TPSSLOC', deriv=1)
#?check('br')
check('revpbe', deriv=3, e_place=6, v_place=6, f_place=5, k_place=4)
check('b97' , deriv=3, e_place=6, v_place=5, f_place=3, k_place=-5)
#?check('b97_1')
#?check('b97_2')
check('SVWN')
check('BLYP' , deriv=3, k_place=0)
check('BP86' , deriv=3, e_place=5, v_place=5, f_place=3, k_place=-1)
check('OLYP' , deriv=3, k_place=0)
check('KT1' , deriv=3, k_place=0)
check('KT2' , deriv=3, k_place=-1)
#?check('KT3')
check('PBE0' , deriv=3, e_place=6, v_place=6, f_place=5, k_place=-2)
check('B3P86' , deriv=3, e_place=5, v_place=5, f_place=3, k_place=-1)
check('B3P86G' , deriv=3, e_place=5, v_place=5, f_place=3, k_place=-2)
check('B3PW91' , deriv=3, e_place=5, v_place=3, f_place=0, k_place=-2)
check('B3PW91G', deriv=3, e_place=2, v_place=2, f_place=0, k_place=-4)
check('B3LYP' , deriv=3, k_place=0)
check('B3LYP5' , deriv=3, k_place=0)
check('B3LYPG' , deriv=3, k_place=-2)
check('O3LYP' , deriv=3, k_place=-2)
check('X3LYP' , deriv=3, e_place=7, v_place=5, f_place=2, k_place=0)
check('CAMB3LYP', deriv=1)
check('B97_1' , deriv=2, e_place=6, v_place=5, f_place=3)
check('B97_2' , deriv=2, e_place=6, v_place=5, f_place=3)
check('TPSSH' , deriv=1)
def test_vs_libxc_uks(self):
ao = dft.numint.eval_ao(mol, mf.grids.coords[:400], deriv=2)
rho = dft.numint.eval_rho(mol, ao, dm, xctype='MGGA')
rhoa = rho[:,:200]
rhob = rhoa + rho[:,200:400]
def check(xc_code, deriv=3, e_place=9, v_place=9, f_place=9, k_place=9):
exc0, vxc0, fxc0, kxc0 = dft.libxc.eval_xc(xc_code, (rhoa, rhob), 1, deriv=deriv)
exc1, vxc1, fxc1, kxc1 = dft.xcfun.eval_xc(xc_code, (rhoa, rhob), 1, deriv=deriv)
self.assertAlmostEqual(abs(exc0-exc1).max(), 0, e_place)
if deriv > 0:
for v0, v1 in zip(vxc0, vxc1):
if v0 is not None and v1 is not None:
self.assertAlmostEqual(abs(v0-v1).max(), 0, v_place)
if deriv > 1:
for f0, f1 in zip(fxc0, fxc1):
if f0 is not None and f1 is not None:
self.assertAlmostEqual(abs(f0-f1).max(), 0, f_place)
if deriv > 2 and kxc0 is not None:
for k0, k1 in zip(kxc0, kxc1):
if k0 is not None and k1 is not None:
self.assertAlmostEqual(abs(k0-k1).max(), 0, k_place)
check('lda,')
check('pw86,')
check('pbe,', e_place=6, v_place=6, f_place=5, k_place=4)
#?check('becke,')
#?check('br,')
#?check('LDAERF,')
check('optx,')
check('OPTXCORR,')
check('RPBE,')
check('TF,' , e_place=0, v_place=-1, f_place=-2, k_place=-2)
check('PW91,' , e_place=6, v_place=4, f_place=2, k_place=-1)
check('m05' , deriv=1, e_place=6, v_place=6)
#check('m052x,', deriv=1, e_place=6, v_place=6)
check('m06' , deriv=1, e_place=6, v_place=6)
check('m06,' , deriv=1, e_place=6, v_place=6)
check('m062x,', deriv=1, e_place=6, v_place=6)
check('m06l,' , deriv=1, e_place=6, v_place=6)
check('TPSS,' , k_place=-4)
#?check('REVTPSS,', deriv=1) # libxc crash
check('APBE,')
check('BLOC,' , k_place=-5)
check('PBEINT,', e_place=7, v_place=6, f_place=5, k_place=4)
check(',vwn3', e_place=2, v_place=1, f_place=1, k_place=0)
check(',vwn5')
check(',pbe' , deriv=3, k_place=0)
#?check(',br')
#?check(',LDAERF')
check(',lyp' , deriv=3, k_place=-1)
check(',SPBE' , deriv=3, e_place=1, v_place=1, f_place=0, k_place=-1)
check(',PW91' , deriv=3, e_place=5, v_place=3, f_place=2, k_place=-2)
check(',m052x', deriv=1)
check(',m05' , deriv=1)
check(',m06' , deriv=1)
check(',m062x', deriv=1)
check(',m06l' , deriv=1)
check(',TPSS' , deriv=1)
check(',REVTPSS', deriv=1, e_place=2, v_place=1)
check(',p86' , deriv=3, e_place=5, v_place=5, f_place=3, k_place=-2)
check(',APBE' , deriv=3, k_place=-1)
check(',PBEINT' , deriv=3, k_place=-1)
check(',TPSSLOC', deriv=1)
#?check('br')
check('revpbe', deriv=3, e_place=6, v_place=6, f_place=5, k_place=0)
check('b97' , deriv=3, e_place=6, v_place=5, f_place=3, k_place=-5)
#?check('b97_1')
#?check('b97_2')
check('SVWN')
check('BLYP' , deriv=3, k_place=-1)
check('BP86' , deriv=3, e_place=5, v_place=5, f_place=3, k_place=-3)
check('OLYP' , deriv=3, k_place=-1)
check('KT1' , deriv=3, k_place=-2)
check('KT2' , deriv=3, k_place=-2)
#?check('KT3')
check('PBE0' , deriv=3, e_place=6, v_place=6, f_place=5, k_place=-2)
check('B3P86' , deriv=3, e_place=5, v_place=5, f_place=3, k_place=-2)
check('B3P86G' , deriv=3, e_place=3, v_place=2, f_place=2, k_place=-3)
check('B3PW91' , deriv=3, e_place=5, v_place=4, f_place=2, k_place=-1)
check('B3PW91G', deriv=3, e_place=2, v_place=2, f_place=2, k_place=-2)
check('B3LYP' , deriv=3, k_place=-1)
check('B3LYP5' , deriv=3, k_place=-1)
check('B3LYPG' , deriv=3, e_place=3, v_place=2, f_place=2, k_place=-2)
check('O3LYP' , deriv=3, e_place=3, v_place=2, f_place=1, k_place=-2)
check('X3LYP' , deriv=3, e_place=7, v_place=5, f_place=2, k_place=-1)
check('CAMB3LYP', deriv=1, v_place=2)
check('B97_1' , deriv=3, e_place=6, v_place=5, f_place=3, k_place=-4)
check('B97_2' , deriv=3, e_place=6, v_place=5, f_place=3, k_place=-3)
check('TPSSH' , deriv=1)
if __name__ == "__main__":
print("Test xcfun")
unittest.main()
| gkc1000/pyscf | pyscf/dft/test/test_xcfun.py | Python | apache-2.0 | 20,515 | [
"PySCF"
] | 1b7d2b7792aee07846f438ad48359c3b9f59ddca3af5807503913f19d7890418 |
# OUTCARToGULP_ModeMap.py by J. M. Skelton
import csv;
import math;
import os;
from argparse import ArgumentParser;
# Import IO routines from OUTCARToGULP.py.
from OUTCARToGULP import _ReadOUTCARFile, _WriteGULPInputFile;
# Default base for the automatically-determined output file name.
_DefaultOutputFileNameBase = r"OUTCARToGULP_ModeMap.gulp";
# Main block.
if __name__ == "__main__":
# Collect and parse command-line arguments.
parser = ArgumentParser(description = "Extract fitting data for GULP from a VASP OUTCAR file");
parser.set_defaults(
OutputFile = None,
OutputName = None,
ModeMapCSV = None,
AddCommands = False,
GradientThreshold = 1.0e-5,
StressThreshold = 1.0e-5
);
parser.add_argument(
metavar = "<input_file>",
nargs = '+', type = str, dest = 'InputFiles',
help = "Input files to read"
);
parser.add_argument(
"-o", "--output_file",
metavar = "<output_file>",
type = str, dest = 'OutputFile',
help = "Output file (default: automatically determined)"
);
parser.add_argument(
"-n", "--output_name",
metavar = "<output_name>",
type = str, dest = 'OutputName',
help = "Optional identifier to be added to the header comment above each data block"
);
parser.add_argument(
"--mode_map_csv",
metavar = "<mode_map_csv>",
type = str, dest = 'ModeMapCSV',
help = "Optional \"ModeMap_PostProcess.csv\" file listing the normal-mode amplitudes and relative energies associated with each structure in a sequence generated by ModeMap.py"
);
parser.add_argument(
"--add_commands",
action = 'store_true', dest = 'AddCommands',
help = "Add some basic commands to the GULP input file (default: no)"
);
parser.add_argument(
"--gradient_threshold",
metavar = "<threshold>",
type = float, dest = 'GradientThreshold',
help = "Threshold for the output of forces (gradients); sets of gradients where all components have absolute values less than this will not be output (default: 1.0e-5)"
);
parser.add_argument(
"--stress_threshold",
metavar = "<threshold>",
type = float, dest = 'StressThreshold',
help = "Threshold for the output of stress tensor components (strain derivatives); stress tensors where all components have absolute values less than this will not be output (default: 1.0e-5)"
);
args = parser.parse_args();
# Perform some basic validation.
if args.GradientThreshold != None and args.GradientThreshold < 0.0:
raise Exception("Error: If supplied, the gradient component threshold must be >= 0.");
if args.StressThreshold != None and args.StressThreshold < 0.0:
raise Exception("Error: If supplied, the stress-tensor component threshold must be >= 0.");
# Read input files.
print("Reading input file(s)...");
inputDataSets = [];
for inputFile in args.InputFiles:
print(" -> \"{0}\"".format(inputFile));
formula, atomTypesList, atomicMassesList, structures, phononModes, elasticConstantMatrix = _ReadOUTCARFile(inputFile);
# For this script, we expect input files to contain one structure, and not to contain data such as phonon frequencies/eigenvectors or an elastic-constant matrix.
# If this is not the case, print a message to warn the user that some data will not be output.
if len(structures) > 1 or (phononModes != None or elasticConstantMatrix != None):
print(" -> WARNING: This script only outputs the gradients and/or stress tensor for the first structure in each input file; \"{0}\" contains additional structures/properties that will not be output.".format(inputFile));
inputDataSets.append(
(formula, atomTypesList, structures[0])
);
print("");
# If a "ModeMap_PostProcess.csv" file was passed via the --mode_map_csv command-line argument, read in the normal-mode amplitudes and relative energies of each input structure.
modeMapAmplitudesEnergies = None;
if args.ModeMapCSV != None:
modeMapAmplitudesEnergies = [];
print("Reading \"{0}\"...".format(args.ModeMapCSV));
with open(args.ModeMapCSV, 'r') as inputReader:
inputReaderCSV = csv.reader(inputReader);
# Skip three rows.
for i in range(0, 3):
next(inputReaderCSV);
# Each row contains Q, U(Q), dU(Q) columns - the first and last will be used to generate labels.
for i, row in enumerate(inputReaderCSV):
q, uQ, dUQ = [float(item) for item in row];
modeMapAmplitudesEnergies.append(
(q, dUQ)
);
# Sanity check.
if len(modeMapAmplitudesEnergies) != len(inputDataSets):
raise Exception("Error: If a \"ModeMap_PostProcess.csv\" file is supplied via the --mode_map_csv argument, it must contain one entry for each input file.");
print("");
# Generate descriptive header comments and names for each data set.
dataSetLabels = [];
for i, (formula, _, _) in enumerate(inputDataSets):
# Generate a name to insert into the header comment.
outputName = None;
# If an identifier was supplied via the --output_name command-line argument, use this as a base; if not, use the structure number.
if args.OutputName != None:
# If we have mode amplitudes, we have information to assign each structure a unique header comment; if not, append the file number to the identifier instead.
if modeMapAmplitudesEnergies != None:
outputName = args.OutputName;
else:
outputName = "{0}, Input File {1}".format(args.OutputName, i + 1);
else:
outputName = "Input File {0} ({1})".format(i + 1, args.InputFiles[i]);
# If the --mode_map_csv argument was supplied via the command line, we assume we are processing a mode map calculation.
if modeMapAmplitudesEnergies != None:
outputName = "mode map {0}".format(outputName);
# Build the output name into the header comment.
headerComment = "Data for {0}".format(outputName);
# If we are processing a mode map calculation, append the mode amplitude and relative energy to the header comment.
if modeMapAmplitudesEnergies != None:
q, dUQ = modeMapAmplitudesEnergies[i];
headerComment = "{0} w/ Q = {1:.2f} amu^1/2 A, dU(Q) = {2:.0f} meV".format(headerComment, q, dUQ);
# Generate a name to add to the GULP data block.
name = formula;
# If we are processing a mode map calculation, use the mode amplitude in the name.
if modeMapAmplitudesEnergies != None:
q, _ = modeMapAmplitudesEnergies[i];
name = "{0} (Input File {1} w/ Q = {2:.2f})".format(name, i + 1, q);
else:
name = "{0} (Input File {1})".format(name, i + 1);
dataSetLabels.append(
(headerComment, name)
);
# Build a list of data sets to output.
outputDataSets = [];
# We only want to output data blocks for structures where the forces (gradients) and/or stress tensor elements are above the set thresholds.
for i, (_, atomTypesList, structure) in enumerate(inputDataSets):
latticeVectors, atomPositions, totalEnergy, stressTensor, forceSet = structure;
# Decide whether to output the gradients.
outputGradients = False;
# If no threshold is set, output by default.
if args.GradientThreshold == None:
outputGradients = True;
else:
# If a threshold has been set, check the components of the forces against the threshold.
for fx, fy, fz in forceSet:
if math.fabs(fx) >= args.GradientThreshold or math.fabs(fy) >= args.GradientThreshold or math.fabs(fz) >= args.GradientThreshold:
outputGradients = True;
break;
# Decide whether to output the stress tensor.
outputStressTensor = False;
if stressTensor != None:
# Again, if no threshold is set, output by default (if available).
if args.StressThreshold == None:
outputStressTensor = True;
else:
# If a threshold has been set, the stress tensor components are output when any are above the threshold.
for element in stressTensor:
outputStressTensor = outputStressTensor or math.fabs(element) / 10.0 >= args.StressThreshold;
# Fetch the header comment and output name.
headerComment, name = dataSetLabels[i];
# Add the structure to the output data sets if required.
if outputGradients or outputStressTensor:
outputDataSet = {
'HeaderComment' : headerComment,
'Name' : name,
'LatticeVectors' : latticeVectors,
'AtomTypesList' : atomTypesList,
'AtomPositions' : atomPositions,
'TotalEnergy' : totalEnergy
};
if outputStressTensor:
outputDataSet['StressTensor'] = stressTensor;
if outputGradients:
outputDataSet['ForceSet'] = forceSet;
outputDataSets.append(outputDataSet);
else:
# If the gradients and diagonal stress-tensor elements are below the threshold, output a comment to note why the data set was excluded.
outputDataSets.append(
{ 'HeaderComment' : "INFO: The gradient and/or stress-tensor components for \"{0}\" ({1}) are below the set thresholds (gradients: {2:.2e}, stress: {3:.2e}) -> data set not output.".format(name, args.InputFiles[i], args.GradientThreshold, args.StressThreshold) }
);
# Work out a name for the output file.
outputFile = args.OutputFile;
if outputFile == None:
outputFile = _DefaultOutputFileNameBase;
# Avoid overwriting the default output file if already present.
root, ext = os.path.splitext(_DefaultOutputFileNameBase);
if os.path.isfile(outputFile):
fileNumber = 2;
while True:
outputFile = "{0}-{1}.{2}".format(root, fileNumber, ext);
if not os.path.isfile(outputFile):
break;
fileNumber = fileNumber + 1;
# Write out the data sets.
print( "Writing data to \"{0}\"...".format(outputFile))
_WriteGULPInputFile(outputDataSets, outputFile, addCommands = args.AddCommands);
print("");
# Print a "finished" message.
print("Done!");
| JMSkelton/VASPToGULP | OUTCARToGULP_ModeMap.py | Python | gpl-3.0 | 10,936 | [
"GULP",
"VASP"
] | 70b8f8c8d0e30167ac5acff7392fa12dac41fc8f9887ee8585c63cad53d0fa42 |
from mayavi.version import version, version as __version__
from mayavi.core.engine import Engine
from mayavi.core.off_screen_engine import OffScreenEngine
from mayavi.tests.runtests import m2_tests as test
| dmsurti/mayavi | mayavi/api.py | Python | bsd-3-clause | 206 | [
"Mayavi"
] | c5562d4d22d9436a4b2ac8b908935ed4b5faccabbad05c42edb7f84d20996d24 |
# Copyright 2000-2010 Michael Hudson-Doyle <micahel@gmail.com>
# Antonio Cuni
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from pyrepl import commands, reader
from pyrepl.reader import Reader
def prefix(wordlist, j = 0):
d = {}
i = j
try:
while 1:
for word in wordlist:
d[word[i]] = 1
if len(d) > 1:
return wordlist[0][j:i]
i += 1
d = {}
except IndexError:
return wordlist[0][j:i]
import re
def stripcolor(s):
return stripcolor.regexp.sub('', s)
stripcolor.regexp = re.compile(r"\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[m|K]")
def real_len(s):
return len(stripcolor(s))
def left_align(s, maxlen):
stripped = stripcolor(s)
if len(stripped) > maxlen:
# too bad, we remove the color
return stripped[:maxlen]
padding = maxlen - len(stripped)
return s + ' '*padding
def build_menu(cons, wordlist, start, use_brackets, sort_in_column):
if use_brackets:
item = "[ %s ]"
padding = 4
else:
item = "%s "
padding = 2
maxlen = min(max(map(real_len, wordlist)), cons.width - padding)
cols = cons.width // (maxlen + padding)
rows = (len(wordlist) - 1)//cols + 1
if sort_in_column:
# sort_in_column=False (default) sort_in_column=True
# A B C A D G
# D E F B E
# G C F
#
# "fill" the table with empty words, so we always have the same amout
# of rows for each column
missing = cols*rows - len(wordlist)
wordlist = wordlist + ['']*missing
indexes = [(i%cols)*rows + i//cols for i in range(len(wordlist))]
wordlist = [wordlist[i] for i in indexes]
menu = []
i = start
for r in range(rows):
row = []
for col in range(cols):
row.append(item % left_align(wordlist[i], maxlen))
i += 1
if i >= len(wordlist):
break
menu.append( ''.join(row) )
if i >= len(wordlist):
i = 0
break
if r + 5 > cons.height:
menu.append(" %d more... "%(len(wordlist) - i))
break
return menu, i
# this gets somewhat user interface-y, and as a result the logic gets
# very convoluted.
#
# To summarise the summary of the summary:- people are a problem.
# -- The Hitch-Hikers Guide to the Galaxy, Episode 12
#### Desired behaviour of the completions commands.
# the considerations are:
# (1) how many completions are possible
# (2) whether the last command was a completion
# (3) if we can assume that the completer is going to return the same set of
# completions: this is controlled by the ``assume_immutable_completions``
# variable on the reader, which is True by default to match the historical
# behaviour of pyrepl, but e.g. False in the ReadlineAlikeReader to match
# more closely readline's semantics (this is needed e.g. by
# fancycompleter)
#
# if there's no possible completion, beep at the user and point this out.
# this is easy.
#
# if there's only one possible completion, stick it in. if the last thing
# user did was a completion, point out that he isn't getting anywhere, but
# only if the ``assume_immutable_completions`` is True.
#
# now it gets complicated.
#
# for the first press of a completion key:
# if there's a common prefix, stick it in.
# irrespective of whether anything got stuck in, if the word is now
# complete, show the "complete but not unique" message
# if there's no common prefix and if the word is not now complete,
# beep.
# common prefix -> yes no
# word complete \/
# yes "cbnu" "cbnu"
# no - beep
# for the second bang on the completion key
# there will necessarily be no common prefix
# show a menu of the choices.
# for subsequent bangs, rotate the menu around (if there are sufficient
# choices).
class complete(commands.Command):
def do(self):
r = self.reader
stem = r.get_stem()
if r.assume_immutable_completions and \
r.last_command_is(self.__class__):
completions = r.cmpltn_menu_choices
else:
r.cmpltn_menu_choices = completions = \
r.get_completions(stem)
if len(completions) == 0:
r.error("no matches")
elif len(completions) == 1:
if r.assume_immutable_completions and \
len(completions[0]) == len(stem) and \
r.last_command_is(self.__class__):
r.msg = "[ sole completion ]"
r.dirty = 1
r.insert(completions[0][len(stem):])
else:
p = prefix(completions, len(stem))
if p:
r.insert(p)
if r.last_command_is(self.__class__):
if not r.cmpltn_menu_vis:
r.cmpltn_menu_vis = 1
r.cmpltn_menu, r.cmpltn_menu_end = build_menu(
r.console, completions, r.cmpltn_menu_end,
r.use_brackets, r.sort_in_column)
r.dirty = 1
elif stem + p in completions:
r.msg = "[ complete but not unique ]"
r.dirty = 1
else:
r.msg = "[ not unique ]"
r.dirty = 1
class self_insert(commands.self_insert):
def do(self):
commands.self_insert.do(self)
r = self.reader
if r.cmpltn_menu_vis:
stem = r.get_stem()
if len(stem) < 1:
r.cmpltn_reset()
else:
completions = [w for w in r.cmpltn_menu_choices
if w.startswith(stem)]
if completions:
r.cmpltn_menu, r.cmpltn_menu_end = build_menu(
r.console, completions, 0,
r.use_brackets, r.sort_in_column)
else:
r.cmpltn_reset()
class CompletingReader(Reader):
"""Adds completion support
Adds instance variables:
* cmpltn_menu, cmpltn_menu_vis, cmpltn_menu_end, cmpltn_choices:
*
"""
# see the comment for the complete command
assume_immutable_completions = True
use_brackets = True # display completions inside []
sort_in_column = False
def collect_keymap(self):
return super(CompletingReader, self).collect_keymap() + (
(r'\t', 'complete'),)
def __init__(self, console):
super(CompletingReader, self).__init__(console)
self.cmpltn_menu = ["[ menu 1 ]", "[ menu 2 ]"]
self.cmpltn_menu_vis = 0
self.cmpltn_menu_end = 0
for c in [complete, self_insert]:
self.commands[c.__name__] = c
self.commands[c.__name__.replace('_', '-')] = c
def after_command(self, cmd):
super(CompletingReader, self).after_command(cmd)
if not isinstance(cmd, self.commands['complete']) \
and not isinstance(cmd, self.commands['self_insert']):
self.cmpltn_reset()
def calc_screen(self):
screen = super(CompletingReader, self).calc_screen()
if self.cmpltn_menu_vis:
ly = self.lxy[1]
screen[ly:ly] = self.cmpltn_menu
self.screeninfo[ly:ly] = [(0, [])]*len(self.cmpltn_menu)
self.cxy = self.cxy[0], self.cxy[1] + len(self.cmpltn_menu)
return screen
def finish(self):
super(CompletingReader, self).finish()
self.cmpltn_reset()
def cmpltn_reset(self):
self.cmpltn_menu = []
self.cmpltn_menu_vis = 0
self.cmpltn_menu_end = 0
self.cmpltn_menu_choices = []
def get_stem(self):
st = self.syntax_table
SW = reader.SYNTAX_WORD
b = self.buffer
p = self.pos - 1
while p >= 0 and st.get(b[p], SW) == SW:
p -= 1
return ''.join(b[p+1:self.pos])
def get_completions(self, stem):
return []
def test():
class TestReader(CompletingReader):
def get_completions(self, stem):
return [s for l in map(lambda x:x.split(),self.history)
for s in l if s and s.startswith(stem)]
reader = TestReader()
reader.ps1 = "c**> "
reader.ps2 = "c/*> "
reader.ps3 = "c|*> "
reader.ps4 = "c\*> "
while reader.readline():
pass
if __name__=='__main__':
test()
| timm/timmnix | pypy3-v5.5.0-linux64/lib_pypy/pyrepl/completing_reader.py | Python | mit | 9,446 | [
"Galaxy"
] | 9a5d89d29dbb8d9139f1cdbb3806f451a374499d647964d1103d66ca4a08168a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.