text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import wx
import wx.grid
import wx.lib.scrolledpanel
import wx.stc
import os
import os.path
import time
import platform
import multiprocessing
import datetime
import Bio.PDB
import webbrowser
from threading import Thread
from tools import *
class AntibodyPanel(wx.lib.scrolledpanel.ScrolledPanel):
def __init__(self, parent, W, H):
#if (platform.system() == "Windows"):
wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, id=-1, pos=(10, 60), size=(340, H-330), name="ProtAntibody")
winh = H-330
#else:
#wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, id=-1, pos=(10, 60), size=(340, H-330), name="ProtMinimization")
#winh = H-290
self.SetBackgroundColour("#333333")
self.parent = parent
if (platform.system() == "Windows"):
self.lblProt = wx.StaticText(self, -1, "Antibody Modeling", (25, 15), (270, 25), wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblProt = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/antibody/lblAntibody.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(25, 15), size=(270, 25))
else:
self.lblProt = wx.StaticText(self, -1, "Antibody Modeling", (70, 15), style=wx.ALIGN_CENTRE)
self.lblProt.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
resizeTextControlForUNIX(self.lblProt, 0, self.GetSize()[0])
self.lblProt.SetForegroundColour("#FFFFFF")
if (platform.system() == "Darwin"):
self.HelpBtn = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/HelpBtn.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(295, 10), size=(25, 25))
else:
self.HelpBtn = wx.Button(self, id=-1, label="?", pos=(295, 10), size=(25, 25))
self.HelpBtn.SetForegroundColour("#0000FF")
self.HelpBtn.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.HelpBtn.Bind(wx.EVT_BUTTON, self.showHelp)
self.HelpBtn.SetToolTipString("Display the help file for this window")
if (platform.system() == "Windows"):
self.lblInst = wx.StaticText(self, -1, "Model antibodies on a remote server", (0, 45), (320, 25), wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
elif (platform.system() == "Darwin"):
self.lblInst = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/antibody/lblInstAntibody.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, 45), size=(320, 25))
else:
self.lblInst = wx.StaticText(self, -1, "Model antibodies on a remote server", (5, 45), style=wx.ALIGN_CENTRE)
self.lblInst.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
resizeTextControlForUNIX(self.lblInst, 0, self.GetSize()[0])
self.lblInst.SetForegroundColour("#FFFFFF")
if (platform.system() == "Windows"):
self.lblLightChain = wx.StaticText(self, -1, "Variable Light Chain Sequence:", (10, 90), (320, 20), wx.ALIGN_LEFT)
self.lblLightChain.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblLightChain = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/antibody/lblLightChain.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 90), size=(320, 20))
else:
self.lblLightChain = wx.StaticText(self, -1, "Variable Light Chain Sequence:", (10, 90), style=wx.ALIGN_LEFT)
self.lblLightChain.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.lblLightChain.SetForegroundColour("#FFFFFF")
self.txtLightChain = wx.TextCtrl(self, -1, pos=(0, 110), size=(320, 75), style=wx.TE_MULTILINE)
self.txtLightChain.SetValue("")
self.txtLightChain.SetToolTipString("Antibody variable light chain protein sequence")
if (platform.system() == "Windows"):
self.lblHeavyChain = wx.StaticText(self, -1, "Variable Heavy Chain Sequence:", (10, 200), (320, 20), wx.ALIGN_LEFT)
self.lblHeavyChain.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblHeavyChain = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/antibody/lblHeavyChain.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(10, 200), size=(320, 20))
else:
self.lblHeavyChain = wx.StaticText(self, -1, "Variable Heavy Chain Sequence:", (10, 200), style=wx.ALIGN_LEFT)
self.lblHeavyChain.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.lblHeavyChain.SetForegroundColour("#FFFFFF")
self.txtHeavyChain = wx.TextCtrl(self, -1, pos=(0, 220), size=(320, 75), style=wx.TE_MULTILINE)
self.txtHeavyChain.SetValue("")
self.txtHeavyChain.SetToolTipString("Antibody variable heavy chain protein sequence")
if (platform.system() == "Windows"):
self.lblNumModels = wx.StaticText(self, -1, "Models to Generate:", (0, 313), (260, 20), wx.ALIGN_CENTRE)
self.lblNumModels.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
elif (platform.system() == "Darwin"):
self.lblNumModels = wx.StaticBitmap(self, -1, wx.Image(self.parent.parent.scriptdir + "/images/osx/antibody/lblNumModels.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(0, 313), size=(260, 20))
else:
self.lblNumModels = wx.StaticText(self, -1, "Models to Generate:", (0, 313), style=wx.ALIGN_CENTRE)
self.lblNumModels.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
resizeTextControlForUNIX(self.lblNumModels, 0, 260)
self.lblNumModels.SetForegroundColour("#FFFFFF")
self.txtNumModels = wx.TextCtrl(self, -1, pos=(260, 310), size=(60, 25))
self.txtNumModels.SetValue("10")
self.txtNumModels.SetToolTipString("Number of antibody models to generate (1-100)")
if (platform.system() == "Darwin"):
self.btnImmunize = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.parent.scriptdir + "/images/osx/antibody/btnImmunize.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(110, 350), size=(100, 25))
else:
self.btnImmunize = wx.Button(self, id=-1, label="Immunize!", pos=(110, 350), size=(100, 25))
self.btnImmunize.SetForegroundColour("#000000")
self.btnImmunize.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.BOLD))
self.btnImmunize.Bind(wx.EVT_BUTTON, self.immunizeClick)
self.btnImmunize.SetToolTipString("Begin threading the sequence onto the template structures")
self.scrollh = self.btnImmunize.GetPosition()[1] + self.btnImmunize.GetSize()[1] + 5
self.SetScrollbars(1, 1, 320, self.scrollh)
self.winscrollpos = 0
self.Bind(wx.EVT_SCROLLWIN, self.scrolled)
def showHelp(self, event):
# Open the help page
if (platform.system() == "Darwin"):
try:
browser = webbrowser.get("Safari")
except:
print "Could not load Safari! The help files are located at " + self.scriptdir + "/help"
return
browser.open(self.parent.parent.scriptdir + "/help/msd.html")
else:
webbrowser.open(self.parent.parent.scriptdir + "/help/msd.html")
def scrolled(self, event):
self.winscrollpos = self.GetScrollPos(wx.VERTICAL)
event.Skip()
def activate(self):
self.Scroll(0, self.winscrollpos)
def setSeqWin(self, seqWin):
self.seqWin = seqWin
# So the sequence window knows about what model "designed_view" really is
self.seqWin.setProtocolPanel(self)
def setPyMOL(self, pymol):
self.pymol = pymol
self.cmd = pymol.cmd
self.stored = pymol.stored
def setSelectWin(self, selectWin):
self.selectWin = selectWin
self.selectWin.setProtPanel(self)
def immunizeClick(self, event):
# This is also the "Finalize!" button
logInfo("Immunize button clicked")
goToSandbox()
# Are both of the sequences valid?
LC_seq = self.txtLightChain.GetValue()
temp = ""
for line in LC_seq.split("\n"):
if (len(line.strip()) > 0 and line.strip()[0] != ">"):
temp += line
LC_seq = temp
LC_seq = LC_seq.replace(" ", "").replace("\t", "").replace("\n", "")
LC_seq = LC_seq.upper()
for char in LC_seq:
if (not(char in "ACDEFGHIKLMNPQRSTVWY")):
dlg = wx.MessageDialog(self, "The sequence for the light chain contains an invalid character: " + char, "Invalid Amino Acid", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
if (len(LC_seq) == 0):
dlg = wx.MessageDialog(self, "You have not provided a sequence for the light chain!", "No Light Chain", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
HC_seq = self.txtHeavyChain.GetValue()
temp = ""
for line in HC_seq.split("\n"):
if (len(line.strip()) > 0 and line.strip()[0] != ">"):
temp += line
HC_seq = temp
HC_seq = HC_seq.replace(" ", "").replace("\t", "").replace("\n", "")
HC_seq = HC_seq.upper()
for char in HC_seq:
if (not(char in "ACDEFGHIKLMNPQRSTVWY")):
dlg = wx.MessageDialog(self, "The sequence for the heavy chain contains an invalid character: " + char, "Invalid Amino Acid", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
if (len(HC_seq) == 0):
dlg = wx.MessageDialog(self, "You have not provided a sequence for the heavy chain!", "No Heavy Chain", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
# Is the number of models provided valid?
try:
nmodels = int(self.txtNumModels.GetValue())
if (nmodels < 1 or nmodels > 100):
raise Exception()
except:
dlg = wx.MessageDialog(self, "The number of models is not valid. Please provide an integer between 1 and 100.", "Number of Models Invalid", wx.OK | wx.ICON_ERROR | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
return
# Write the antibody input file
fin = open("antibodyinputtemp", "w")
fin.write("LCSEQ\t" + LC_seq + "\n")
fin.write("HCSEQ\t" + HC_seq + "\n")
fin.write("NMODELS\t" + str(nmodels) + "\n")
fin.close()
# Send it to the server
try:
self.ID = sendToServer("antibodyinput")
dlg = wx.TextEntryDialog(None, "Enter a description for this submission:", "Job Description", "")
if (dlg.ShowModal() == wx.ID_OK):
desc = dlg.GetValue()
desc = desc.replace("\t", " ").replace("\n", " ").strip()
else:
desc = self.ID
# Now write this to a text file
# A timer on the SequenceWindow will use this information to look for the files to appear on the server
goToSandbox()
# First make sure this isn't a duplicate
alreadythere = False
try:
f = open("downloadwatch", "r")
for aline in f:
if (len(aline.split("\t")) >= 2 and aline.split("\t")[0] == "ANTIBODY" and aline.split("\t")[1] == self.ID.strip()):
alreadythere = True
break
f.close()
except:
pass
if (not(alreadythere)):
f = open("downloadwatch", "a")
f.write("ANTIBODY\t" + self.ID.strip() + "\t" + str(datetime.datetime.now().strftime("%A, %B %d - %I:%M:%S %p")) + "\t" + getServerName() + "\t" + desc + "\n")
f.close()
dlg = wx.MessageDialog(self, "InteractiveROSETTA is now watching the server for job ID " + desc.strip() + ". You will be notified when the package is available for download.", "Listening for Download", wx.OK | wx.ICON_EXCLAMATION | wx.CENTRE)
dlg.ShowModal()
dlg.Destroy()
logInfo("Antibody design input sent to server daemon with ID " + self.ID)
except Exception as e:
logInfo("Server daemon not available")
f = open("errreport", "w")
f.write(e.message.strip())
f.close()
self.recoverFromError()
def recoverFromError(self):
# This function tells the user what the error was and tries to revert the protocol
# back to the pre-daemon state so the main GUI can continue to be used
f = open("errreport", "r")
errmsg = "An error was encountered during the protocol:\n\n"
for aline in f:
errmsg = errmsg + aline.strip()
f.close()
errmsg = str(errmsg)
logInfo("Error encountered")
logInfo(errmsg)
if (platform.system() == "Windows"):
sessioninfo = os.path.expanduser("~") + "\\InteractiveRosetta\\sessionlog"
else:
sessioninfo = os.path.expanduser("~") + "/.InteractiveRosetta/sessionlog"
errmsg = errmsg + "\n\nIf you don't know what caused this, send the file " + sessioninfo + " to a developer along with an explanation of what you did."
# You have to use a MessageDialog because the MessageBox doesn't always work for some reason
dlg = wx.MessageDialog(self, errmsg, "Error Encountered", wx.OK|wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
os.remove("errreport")
|
schenc3/InteractiveROSETTA
|
InteractiveROSETTA/scripts/antibody.py
|
Python
|
gpl-2.0
| 14,153
|
[
"PyMOL"
] |
fbcd7efa81898935509fcf098376afe5dd8167a192a02af2f10c9faed95e116f
|
#!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2012 Heiko Strathmann
# Copyright (C) 2012 Berlin Institute of Technology and Max-Planck-Society
#
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,2.1,1,1e-5,1e-2], \
[traindat,testdat,label_traindat,2.1,1,1e-5,1e-2]]
def modelselection_grid_search_krr_modular (fm_train=traindat,fm_test=testdat,label_train=label_traindat,\
width=2.1,C=1,epsilon=1e-5,tube_epsilon=1e-2):
from modshogun import CrossValidation, CrossValidationResult
from modshogun import MeanSquaredError
from modshogun import CrossValidationSplitting
from modshogun import RegressionLabels
from modshogun import RealFeatures
from modshogun import KernelRidgeRegression
from modshogun import GridSearchModelSelection
from modshogun import ModelSelectionParameters
# training data
features_train=RealFeatures(traindat)
features_test=RealFeatures(testdat)
labels=RegressionLabels(label_traindat)
# labels
labels=RegressionLabels(label_train)
# predictor, set tau=0 here, doesnt matter
predictor=KernelRidgeRegression()
# splitting strategy for 5 fold cross-validation (for classification its better
# to use "StratifiedCrossValidation", but the standard
# "StratifiedCrossValidationSplitting" is also available
splitting_strategy=CrossValidationSplitting(labels, 5)
# evaluation method
evaluation_criterium=MeanSquaredError()
# cross-validation instance
cross_validation=CrossValidation(predictor, features_train, labels,
splitting_strategy, evaluation_criterium)
# (optional) repeat x-val (set larger to get better estimates, at least two
# for confidence intervals)
cross_validation.set_num_runs(2)
# (optional) request 95% confidence intervals for results (not actually needed
# for this toy example)
cross_validation.set_conf_int_alpha(0.05)
# print all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#predictor.print_modsel_params()
# build parameter tree to select regularization parameter
param_tree_root=create_param_tree()
# model selection instance
model_selection=GridSearchModelSelection(cross_validation, param_tree_root)
# perform model selection with selected methods
#print "performing model selection of"
#print "parameter tree:"
#param_tree_root.print_tree()
#print "starting model selection"
# print the current parameter combination, if no parameter nothing is printed
print_state=False
best_parameters=model_selection.select_model(print_state)
# print best parameters
#print "best parameters:"
#best_parameters.print_tree()
# apply them and print result
best_parameters.apply_to_machine(predictor)
result=cross_validation.evaluate()
#print "mean:", result.mean
#if result.has_conf_int:
# print "[", result.conf_int_low, ",", result.conf_int_up, "] with alpha=", result.conf_int_alpha
# creates all the parameters to optimize
def create_param_tree():
from modshogun import ModelSelectionParameters, R_EXP, R_LINEAR
from modshogun import ParameterCombination
from modshogun import GaussianKernel, PolyKernel
root=ModelSelectionParameters()
tau=ModelSelectionParameters("tau")
root.append_child(tau)
# also R_LINEAR/R_LOG is available as type
min=-1
max=1
type=R_EXP
step=1.5
base=2
tau.build_values(min, max, type, step, base)
# gaussian kernel with width
gaussian_kernel=GaussianKernel()
# print all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#gaussian_kernel.print_modsel_params()
param_gaussian_kernel=ModelSelectionParameters("kernel", gaussian_kernel)
gaussian_kernel_width=ModelSelectionParameters("width");
gaussian_kernel_width.build_values(5.0, 6.0, R_EXP, 1.0, 2.0)
param_gaussian_kernel.append_child(gaussian_kernel_width)
root.append_child(param_gaussian_kernel)
# polynomial kernel with degree
poly_kernel=PolyKernel()
# print all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#poly_kernel.print_modsel_params()
param_poly_kernel=ModelSelectionParameters("kernel", poly_kernel)
root.append_child(param_poly_kernel)
# note that integers are used here
param_poly_kernel_degree=ModelSelectionParameters("degree")
param_poly_kernel_degree.build_values(1, 2, R_LINEAR)
param_poly_kernel.append_child(param_poly_kernel_degree)
return root
if __name__=='__main__':
print('ModelselectionGridSearchKRR')
modelselection_grid_search_krr_modular(*parameter_list[0])
|
abhiatgithub/shogun-toolbox
|
examples/undocumented/python_modular/modelselection_grid_search_krr_modular.py
|
Python
|
gpl-3.0
| 5,347
|
[
"Gaussian"
] |
545dd4d5346d73d49e1d63dfbc50d8172408adae731704e2d2757ff51e9ebb08
|
from datetime import timedelta
import datetime
from django.utils.translation import ugettext_noop
from django.utils import html
from casexml.apps.case.models import CommCareCase
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.translation import ugettext as _
from corehq.apps.reports.standard.cases.basic import CaseListReport
from corehq.apps.api.es import ReportCaseES
from corehq.apps.reports.standard import CustomProjectReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from corehq.pillows.base import restore_property_dict
from corehq.util.timezones.conversions import PhoneTime
from dimagi.utils.dates import force_to_datetime
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.parsing import json_format_date
def visit_completion_counter(case):
mother_counter = 0
child_counter = 0
case_obj = CommCareCase.get(case['_id'])
baby_case = [c for c in case_obj.get_subcases().all() if c.type == 'baby']
for i in range(1, 8):
if "pp_%s_done" % i in case:
val = case["pp_%s_done" % i]
try:
if val.lower() == 'yes':
mother_counter += 1
elif int(float(val)) == 1:
mother_counter += 1
except ValueError:
pass
if baby_case and "bb_pp_%s_done" % i in baby_case[0]:
val = baby_case[0]["bb_pp_%s_done" % i]
try:
if val.lower() == 'yes':
child_counter += 1
elif int(float(val)) == 1:
child_counter += 1
except ValueError:
pass
return mother_counter if mother_counter > child_counter else child_counter
class HNBCReportDisplay(CaseDisplay):
@property
def dob(self):
if 'date_birth' not in self.case:
return '---'
else:
return self.report.date_to_json(self.case['date_birth'])
@property
def visit_completion(self):
return "%s/7" % visit_completion_counter(self.case)
@property
def delivery(self):
if 'place_birth' not in self.case:
return '---'
else:
if "at_home" == self.case['place_birth']:
return _('Home')
elif "in_hospital" == self.case['place_birth']:
return _('Hospital')
else:
return _('Other')
@property
def case_link(self):
case_id, case_name = self.case['_id'], self.case['mother_name']
try:
return html.mark_safe("<a class='ajax_dialog' href='%s'>%s</a>" % (
html.escape(reverse('crs_details_report', args=[self.report.domain, case_id, self.report.slug])),
html.escape(case_name),
))
except NoReverseMatch:
return "%s (bad ID format)" % case_name
@property
def baby_name(self):
case = CommCareCase.get(self.case['_id'])
baby_case = [c for c in case.get_subcases().all() if c.type == 'baby']
if baby_case:
return baby_case[0].name
else:
return '---'
class BaseHNBCReport(CustomProjectReport, CaseListReport):
fields = ['custom.apps.crs_reports.fields.SelectBlockField',
'custom.apps.crs_reports.fields.SelectSubCenterField', # Todo: Currently there is no data about it in case
'custom.apps.crs_reports.fields.SelectASHAField']
ajax_pagination = True
include_inactive = True
module_name = 'crs_reports'
report_template_name = None
@property
@memoized
def case_es(self):
return ReportCaseES(self.domain)
def build_es_query(self, case_type=None, afilter=None, status=None):
def _domain_term():
return {"term": {"domain.exact": self.domain}}
subterms = [_domain_term(), afilter] if afilter else [_domain_term()]
if case_type:
subterms.append({"term": {"type.exact": case_type}})
if status:
subterms.append({"term": {"closed": (status == 'closed')}})
es_query = {
'query': {
'filtered': {
'query': {"match_all": {}},
'filter': {'and': subterms}
}
},
'sort': self.get_sorting_block(),
'from': self.pagination.start,
'size': self.pagination.count,
}
return es_query
@property
@memoized
def es_results(self):
query = self.build_es_query(case_type=self.case_type, afilter=self.case_filter, status=self.case_status)
return self.case_es.run_query(query)
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(_("Mother Name"), prop_name="mother_name.#value"),
DataTablesColumn(_("Baby Name"), sortable=False),
DataTablesColumn(_("CHW Name"), prop_name="owner_display", sortable=False),
DataTablesColumn(_("Date of Delivery"), prop_name="date_birth.#value"),
DataTablesColumn(_("PNC Visit Completion"), sortable=False),
DataTablesColumn(_("Delivery"), prop_name="place_birth.#value"),
)
return headers
@property
def rows(self):
case_displays = (HNBCReportDisplay(self, restore_property_dict(self.get_case(case)))
for case in self.es_results['hits'].get('hits', []))
for disp in case_displays:
yield [
disp.case_link,
disp.baby_name,
disp.owner_display,
disp.dob,
disp.visit_completion,
disp.delivery,
]
@property
@memoized
def rendered_report_title(self):
if not self.individual:
self.name = _("%(report_name)s for (0-42 days after delivery)") % {
"report_name": _(self.name)
}
return self.name
def base_filters(self):
block = self.request_params.get('block', '')
individual = self.request_params.get('individual', '')
filters = []
if block:
filters.append({'term': {'block.#value': block}})
if individual:
filters.append({'term': {'owner_id': individual}})
return filters
def date_to_json(self, date):
if date:
try:
date = force_to_datetime(date)
return (PhoneTime(date, self.timezone).user_time(self.timezone)
.ui_string('%d-%m-%Y'))
except ValueError:
return ''
else:
return ''
class HBNCMotherReport(BaseHNBCReport):
name = ugettext_noop('Mother HBNC Form')
slug = 'hbnc_mother_report'
report_template_name = 'mothers_form_reports_template'
default_case_type = 'pregnant_mother'
@property
def case_filter(self):
now = datetime.datetime.utcnow()
fromdate = now - timedelta(days=42)
filters = BaseHNBCReport.base_filters(self)
filters.append({'term': {'pp_case_filter.#value': "1"}})
filters.append({'range': {'date_birth.#value': {"gte": json_format_date(fromdate)}}})
status = self.request_params.get('PNC_status', '')
or_stmt = []
if status:
if status == 'On Time':
for i in range(1, 8):
filters.append({'term': {'case_pp_%s_done.#value' % i: 'yes'}})
else:
for i in range(1, 8):
or_stmt.append({"not": {'term': {'case_pp_%s_done.#value' % i: 'yes'}}})
or_stmt = {'or': or_stmt}
filters.append(or_stmt)
return {'and': filters} if filters else {}
CUSTOM_REPORTS = (
(_('Custom Reports'), (
HBNCMotherReport,
)),
)
QUESTION_TEMPLATES = (
(HBNCMotherReport.slug, [
{ 'questions' :[
{'case_property': 'section_a',
'question': _('A. Ask Mother.')
},
{'case_property': 'meals',
'question': _('Number of times mother takes full meals in 24 hours?'),
},
{'case_property': 'bleeding',
'question': _('Bleeding. How many Pads are changed in a day?'),
},
{'case_property': 'warm',
'question': _('During the cold season is the baby being kept warm?'),
},
{'case_property': 'feeding',
'question': _('Is the baby being fed properly?'),
},
{'case_property': 'incessant_cry',
'question': _('Is the baby crying incessantly or passing urine less than 6 times?'),
},
{'case_property': 'section_b',
'question': _('B. Examination of mother'),
},
{'case_property': 'maternal_temp',
'question': _('Temperature: Measure and Record?'),
},
{'case_property': 'discharge',
'question': _('Foul Smelling Discharge?'),
},
{'case_property': 'maternal_fits',
'question': _('Is mother speaking normally or having fits'),
},
{'case_property': 'no_milk',
'question': _('Mother has no milk since delivery or less milk'),
},
{'case_property': 'sore_breast',
'question': _('Cracked Nipples/Painful or Engorged Breast/'),
}]
},
{ 'questions' :[
{'case_property': 'section_c',
'question': _('C. Examination of Baby')
},
{'case_property': 'baby_eye',
'question': _('Eyes Swollen with pus?'),
},
{'case_property': 'weight',
'question': _('Weight (7,14,21,28)?'),
},
{'case_property': 'baby_temp',
'question': _('Temperature: Measure and Record?'),
},
{'case_property': 'pustules',
'question': _('Skin: Pus filled pustules?'),
},
{'case_property': 'cracks',
'question': _('Cracks and Redness on the skin fold?'),
},
{'case_property': 'jaundice',
'question': _('Yellowness in eyes'),
}]
},
{ 'questions' :[
{'case_property': 'section_d',
'question': _('D. Sepsis Signs Checkup')
},
{'case_property': 'limbs',
'question': _('All limbs up?'),
},
{'case_property': 'feeding_less',
'question': _('Feeding Less/Stopped?'),
},
{'case_property': 'cry',
'question': _('Cry Weak/Stopped?'),
},
{'case_property': 'abdomen_vomit',
'question': _('Distant Abdomen?'),
},
{'case_property': 'cold',
'question': _('Baby Cold to touch?'),
},
{'case_property': 'chest',
'question': _('Chest Indrawing?'),
},
{'case_property': 'pus',
'question': _('Pus on umbilicus?'),
}]
}
]),
)
|
qedsoftware/commcare-hq
|
custom/apps/crs_reports/reports.py
|
Python
|
bsd-3-clause
| 11,308
|
[
"VisIt"
] |
bd47870fa7a5780a6262818fea5f841017b197333c147cac70c2e8a6a5fcbc2d
|
import time
t0t=time.time()
from os.path import join
import os
import numpy as n
import glob
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import astropy.io.fits as fits
from scipy.interpolate import interp1d
from scipy.stats import norm as gaussD
plate = sys.argv[1]
mjd = sys.argv[2]
fiberid = sys.argv[3]
env = sys.argv[4]
#env = 'EBOSSDR14_DIR'
if env == 'EBOSSDR14_DIR':
z_name = 'Z_NOQSO'
if env == 'SDSSDR12_DIR':
z_name = 'Z'
# open the observation file
obs = fits.open(os.path.join(os.environ[env], 'spectra', plate, 'spec-'+plate+'-'+mjd+'-'+fiberid+".fits"))
wl_data = 10**obs[1].data['loglam']/(1+obs[2].data[z_name])
fl_data = obs[1].data['flux']
err_data = obs[1].data['ivar']**(-0.5)
ok_data = (obs[1].data['ivar']>0)
spec = interp1d(wl_data[ok_data], fl_data[ok_data])
err = interp1d(wl_data[ok_data], err_data[ok_data])
wl_data_max = n.max(wl_data[ok_data])
wl_data_min = n.min(wl_data[ok_data])
N_data_points = len(wl_data)
#dirs = ['stellarpop-m11-salpeter', 'stellarpop-m11-kroupa', 'stellarpop-m11-chabrier', 'stellarpop-m11-salpeter-stelib', 'stellarpop-m11-kroupa-stelib', 'stellarpop-m11-chabrier-stelib', 'stellarpop-m11-salpeter-elodie', 'stellarpop-m11-kroupa-elodie', 'stellarpop-m11-chabrier-elodie']
#suffixs = ["-ss.fits", "-kr.fits", "-cha.fits", "-ss.fits", "-kr.fits", "-cha.fits", "-ss.fits", "-kr.fits", "-cha.fits"]
dirs = ['stellarpop-m11-chabrier', 'stellarpop-m11-kroupa', 'stellarpop-m11-salpeter']
suffixs = ["-cha.fits", "-kr.fits", "-ss.fits"]
print plate, mjd, fiberid
sp_cha = fits.open(os.path.join(os.environ[env], dirs[0], 'stellarpop', plate, 'spFly-'+plate+'-'+mjd+'-'+fiberid+suffixs[0]))
sp_kr = fits.open(os.path.join(os.environ[env], dirs[1], 'stellarpop', plate, 'spFly-'+plate+'-'+mjd+'-'+fiberid+suffixs[1]))
sp_sa = fits.open(os.path.join(os.environ[env], dirs[2], 'stellarpop', plate, 'spFly-'+plate+'-'+mjd+'-'+fiberid+suffixs[2]))
#sp_cha_nd = fits.open(os.path.join(os.environ[env],dirs[3], 'stellarpop', plate, 'spFly-'+plate+'-'+mjd+'-'+fiberid+suffixs[3]))
#sp_kr_nd = fits.open(os.path.join(os.environ[env], dirs[4], 'stellarpop', plate, 'spFly-'+plate+'-'+mjd+'-'+fiberid+suffixs[4]))
#sp_sa_nd = fits.open(os.path.join(os.environ[env], dirs[5], 'stellarpop', plate, 'spFly-'+plate+'-'+mjd+'-'+fiberid+suffixs[5]))
#sp_cha_el = fits.open(os.path.join(os.environ[env],dirs[6], 'stellarpop', plate, 'spFly-'+plate+'-'+mjd+'-'+fiberid+suffixs[6]))
#sp_kr_el = fits.open(os.path.join(os.environ[env], dirs[7], 'stellarpop', plate, 'spFly-'+plate+'-'+mjd+'-'+fiberid+suffixs[7]))
#sp_sa_el = fits.open(os.path.join(os.environ[env], dirs[8], 'stellarpop', plate, 'spFly-'+plate+'-'+mjd+'-'+fiberid+suffixs[8]))
out_dir = os.path.join(os.environ[env], 'stellarpop', plate)
im_dir = os.path.join(os.environ[env], 'stellarpop', plate, 'images')
if os.path.isdir(out_dir)==False:
os.makedirs(out_dir)
if os.path.isdir(im_dir)==False:
os.makedirs(im_dir)
out_file = 'spFly-'+plate+'-'+mjd+'-'+fiberid+'.fits'
path_2_out_file = os.path.join(out_dir, out_file)
im_file = 'spFly-'+plate+'-'+mjd+'-'+fiberid+'.png'
path_2_im_file = os.path.join(im_dir, im_file)
def create_tbhdu(sp_cha, imf, lib):
c1 = fits.Column(name='wavelength', format='D', unit='Angstrom', array=sp_cha[1].data['wavelength'])
c2 = fits.Column(name='model_flux', format='D', unit='1e-17 erg/cm2/s', array=sp_cha[1].data['firefly_model'])
coldefs = fits.ColDefs([c1, c2])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.header['HIERARCH library'] = lib
tbhdu.header['HIERARCH IMF'] = imf
tbhdu.header['HIERARCH age_lightW'] = sp_cha[1].header['age_lightW_mean']
tbhdu.header['HIERARCH age_lightW_up'] = sp_cha[1].header['age_lightW_mean_up']
tbhdu.header['HIERARCH age_lightW_low'] = sp_cha[1].header['age_lightW_mean_low']
tbhdu.header['HIERARCH metallicity_lightW'] = sp_cha[1].header['metallicity_lightW_mean']
tbhdu.header['HIERARCH metallicity_lightW_up'] = sp_cha[1].header['metallicity_lightW_mean_up']
tbhdu.header['HIERARCH metallicity_lightW_low'] = sp_cha[1].header['metallicity_lightW_mean_low']
tbhdu.header['HIERARCH age_massW'] = sp_cha[1].header['age_massW_mean']
tbhdu.header['HIERARCH age_massW_up'] = sp_cha[1].header['age_massW_mean_up']
tbhdu.header['HIERARCH age_massW_low'] = sp_cha[1].header['age_massW_mean_low']
tbhdu.header['HIERARCH metallicity_massW'] = sp_cha[1].header['metallicity_massW_mean']
tbhdu.header['HIERARCH metallicity_massW_up'] = sp_cha[1].header['metallicity_massW_mean_up']
tbhdu.header['HIERARCH metallicity_massW_low'] = sp_cha[1].header['metallicity_massW_mean_low']
tbhdu.header['HIERARCH EBV'] = sp_cha[1].header['EBV']
tbhdu.header['HIERARCH stellar_mass'] = sp_cha[1].header['stellar_mass_mean']
tbhdu.header['HIERARCH stellar_mass_up'] = sp_cha[1].header['stellar_mass_mean_up']
tbhdu.header['HIERARCH stellar_mass_low'] = sp_cha[1].header['stellar_mass_mean_low']
tbhdu.header['HIERARCH ssp_number'] = sp_cha[1].header['ssp_number']
for el in sp_cha[1].header[33:]:
tbhdu.header['HIERARCH '+el] = sp_cha[1].header[el]
return tbhdu
tbhdu_cha = create_tbhdu(sp_cha, 'Chabrier', 'MILES')
tbhdu_kr = create_tbhdu(sp_kr, 'Kroupa' , 'MILES')
tbhdu_sa = create_tbhdu(sp_sa, 'Salpeter' , 'MILES')
#tbhdu_cha_nd = create_tbhdu(sp_cha_nd, 'Chabrier', 'STELIB')
#tbhdu_kr_nd = create_tbhdu(sp_kr_nd, 'Kroupa' , 'STELIB')
#tbhdu_sa_nd = create_tbhdu(sp_sa_nd, 'Salpeter' , 'STELIB')
#tbhdu_cha_el = create_tbhdu(sp_cha_el, 'Chabrier', 'ELODIE')
#tbhdu_kr_el = create_tbhdu(sp_kr_el, 'Kroupa' , 'ELODIE')
#tbhdu_sa_el = create_tbhdu(sp_sa_el, 'Salpeter' , 'ELODIE')
prihdr = fits.Header()
prihdr['file'] = out_file
prihdr['plate'] = int(plate)
prihdr['mjd'] = int(mjd)
prihdr['fiberid']= int(fiberid)
prihdr['models'] = 'Maraston_2011'
prihdr['fitter'] = 'FIREFLY'
prihdr['model'] = sp_cha[0].header['model']
prihdr['ageMin'] = sp_cha[0].header['ageMin']
prihdr['ageMax'] = sp_cha[0].header['ageMax']
prihdr['Zmin'] = sp_cha[0].header['Zmin']
prihdr['Zmax'] = sp_cha[0].header['Zmax']
prihdr['HIERARCH age_universe'] = sp_cha[1].header['age_universe']
prihdr['HIERARCH redshift'] = sp_cha[1].header['redshift']
# now creates the figure per model
fig = p.figure(0, figsize = (7, 10), frameon=False)#, tight_layout=True)
rect = 0.2, 0.15, 0.85, 0.95
#ax = fig.add_axes(rect, frameon=False)
# panel with the spectrum
fig.add_subplot(3,1,1)
p.plot(wl_data[::2], fl_data[::2], 'k', rasterized =True, alpha=0.5)
p.yscale('log')
mean_data = n.median(fl_data)
p.ylim((mean_data/8., mean_data*8.))
p.xlabel('Wavelength [Angstrom]')
p.ylabel(r'Flux [$f_\lambda$ $10^{-17}$ erg/cm2/s/A]')
p.title("plate=" + plate + ", mjd=" + mjd + ", fiber=" + fiberid + ", z=" + str(n.round(obs[2].data[z_name][0],3)))
# second panel distribution of residuals
fig.add_subplot(3,1,2)
for hdu in [tbhdu_cha, tbhdu_kr, tbhdu_sa]:
ok_model = (hdu.data['wavelength']>wl_data_min)&(hdu.data['wavelength']<wl_data_max)
wl_model = hdu.data['wavelength'][ok_model]
#p.plot(wl_model, (spec(wl_model)-hdu.data['model_flux'][ok_model])/err(wl_model), 'k', rasterized =True, alpha=0.5)
chi2s=(spec(wl_model)-hdu.data['model_flux'][ok_model])/err(wl_model)
p.hist(chi2s, bins = n.arange(-2,2,0.1), normed = True, histtype='step', label=hdu.header['IMF']+hdu.header['library']+", EBV="+str(n.round(hdu.header['EBV'],3))+r", $\chi^2=$"+str(n.round(n.sum(chi2s**2.)/(len(chi2s)-2.),4)))
p.ylim((-0.02,1.02))
#p.yscale('log')
p.xlabel('(data-model)/error')
p.ylabel('Normed distribution')
hdu.header['chi2'] = n.sum(chi2s**2.)
hdu.header['ndof'] = len(chi2s)-2.
p.plot(n.arange(-2,2,0.005), gaussD.pdf(n.arange(-2,2,0.005)), 'k--', label=r'N(0,1)', lw=0.5)
p.grid()
p.legend(frameon=False, loc=0, fontsize=8)
fig.add_subplot(3,1,3)
tpl = n.transpose(n.array([ [
hdu.header['age_lightW'],
hdu.header['stellar_mass'],
hdu.header['age_lightW_up']-hdu.header['age_lightW'],
hdu.header['age_lightW']-hdu.header['age_lightW_low'],
hdu.header['stellar_mass_up']-hdu.header['stellar_mass'],
hdu.header['stellar_mass']-hdu.header['stellar_mass_low']]
for hdu in [tbhdu_cha, tbhdu_kr, tbhdu_sa] ]))
p.errorbar(tpl[0], tpl[1], xerr=[tpl[2], tpl[3]], yerr=[tpl[4], tpl[5]], barsabove=True, fmt='o')
#p.axvline(prihdr['age_universe'], color='r', ls='dashed')
idsUP = n.argsort(tpl[1])
iterList = n.array([tbhdu_cha, tbhdu_kr, tbhdu_sa])[idsUP]
for jj, hdu in enumerate(iterList):
p.annotate(hdu.header['IMF']+hdu.header['library']+r", $\log(Z/Z_\odot)=$"+str(n.round(hdu.header['metallicity_lightW'],4)),
xy = (hdu.header['age_lightW'], hdu.header['stellar_mass']), xycoords='data',
xytext=(0.85, (jj+0.5)/len(iterList)), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05, width=0.2, headwidth=3),
horizontalalignment='right', verticalalignment='top', fontsize=9)
p.ylabel(r'$\log_{10}(M/[M_\odot])$')
p.xlabel(r'$\log_{10}(age/[yr])$')
#p.ylim((9,12.5))
p.grid()
p.savefig(path_2_im_file)
p.clf()
prihdu = fits.PrimaryHDU(header=prihdr)
thdulist = fits.HDUList([prihdu, tbhdu_cha, tbhdu_kr, tbhdu_sa]) # , tbhdu_cha_nd, tbhdu_kr_nd, tbhdu_sa_nd, tbhdu_cha_el, tbhdu_kr_el, tbhdu_sa_el ])
if os.path.isfile(path_2_out_file ):
os.remove(path_2_out_file )
thdulist.writeto( path_2_out_file )
print time.time()-t0t
|
JohanComparat/pySU
|
spm/bin/combine_model_spectra.py
|
Python
|
cc0-1.0
| 10,387
|
[
"Firefly"
] |
4ac4fe11fc8c89c37351027eccce379ea1e2a47ccd619245dfb4c6dfcdbc3a85
|
# $Id$
#
# Copyright (C) 2005-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
def ConstructEnsembleBV(bv, bitsToKeep):
"""
>>> from rdkit import DataStructs
>>> bv = DataStructs.ExplicitBitVect(128)
>>> bv.SetBitsFromList((1,5,47,99,120))
>>> r = ConstructEnsembleBV(bv,(0,1,2,3,45,46,47,48,49))
>>> r.GetNumBits()
9
>>> r.GetBit(0)
0
>>> r.GetBit(1)
1
>>> r.GetBit(5)
0
>>> r.GetBit(6) # old bit 47
1
"""
finalSize = len(bitsToKeep)
res = bv.__class__(finalSize)
for i, bit in enumerate(bitsToKeep):
if bv.GetBit(bit):
res.SetBit(i)
return res
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
|
ptosco/rdkit
|
rdkit/DataStructs/BitUtils.py
|
Python
|
bsd-3-clause
| 1,132
|
[
"RDKit"
] |
47dd3469bc2f57f9ebfdef83d35deead0b758f45dafc963b1774de44cb997476
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import (absolute_import, division,
print_function, with_statement)
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str, to_unicode
from tornado import httpclient, httputil
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask
try:
from urllib.parse import urlparse # py2
except ImportError:
from urlparse import urlparse # py3
try:
xrange # py2
except NameError:
xrange = range # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
"""
def __init__(self, application, request, **kwargs):
tornado.web.RequestHandler.__init__(self, application, request,
**kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.ws_connection.accept_connection()
else:
if not self.stream.closed():
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 7, 8, 13\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the memory and CPU usage of the compression,
but no such options are currently implemented.
.. versionadded:: 4.1
"""
return None
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self):
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
return WebSocketProtocol13(
self, compression_options=self.get_compression_options())
def _wrap_method(method):
def _disallow_for_websocket(self, *args, **kwargs):
if self.stream is None:
method(self, *args, **kwargs)
else:
raise RuntimeError("Method not supported for Web Sockets")
return _disallow_for_websocket
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method,
_wrap_method(getattr(WebSocketHandler, method)))
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
On error, aborts the websocket connection and returns False.
"""
try:
callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(-1, zlib.DEFLATED, -self._max_wbits)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None):
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocol_header = ''
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
subprotocol_header = ("Sec-WebSocket-Protocol: %s\r\n"
% selected)
extension_header = ''
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1])
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
extension_header = ('Sec-WebSocket-Extensions: %s\r\n' %
httputil._encode_header(
'permessage-deflate', ext[1]))
break
if self.stream.closed():
self._abort()
return
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"%s%s"
"\r\n" % (self._challenge_response(),
subprotocol_header, extension_header)))
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
return options
def _create_compressors(self, side, agreed_parameters):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length,
self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self.stream.read_bytes(self._frame_length,
self._on_masked_frame_data)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
self._handle_message(opcode, data)
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
elif opcode == 0xA:
# Pong
self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, on_message_callback=None,
compression_options=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = self.close_reason = None
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def get_websocket_protocol(self):
return WebSocketProtocol13(self, mask_outgoing=True,
compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connection(loop)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request,
on_message_callback=on_message_callback,
compression_options=compression_options)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/tornado/websocket.py
|
Python
|
mit
| 40,622
|
[
"VisIt"
] |
6bfcd3c6efdac39328f7339ce19e9e1c68a9830595f513ad0b5ebaee0f8a9793
|
"""
LISSOM and related sheet classes.
$Id$
"""
__version__='$Revision$'
from numpy import zeros,ones
import copy
import param
import topo
from topo.base.projection import Projection
from topo.base.sheet import activity_type
from topo.base.simulation import EPConnectionEvent
from topo.transferfn.basic import PiecewiseLinear
from topo.sheet import JointNormalizingCFSheet
class LISSOM(JointNormalizingCFSheet):
"""
A Sheet class implementing the LISSOM algorithm
(Sirosh and Miikkulainen, Biological Cybernetics 71:66-78, 1994).
A LISSOM sheet is a JointNormalizingCFSheet slightly modified to
enforce a fixed number of settling steps. Settling is controlled
by the tsettle parameter; once that number of settling steps has
been reached, an external input is required before the sheet will
activate again.
"""
strict_tsettle = param.Parameter(default = None,doc="""
If non-None, delay sending output until activation_count reaches this value.""")
mask_init_time=param.Integer(default=5,bounds=(0,None),doc="""
Determines when a new mask is initialized in each new iteration.
The mask is reset whenever new input comes in. Once the
activation_count (see tsettle) reaches mask_init_time, the mask
is initialized to reflect the current activity profile.""")
tsettle=param.Integer(default=8,bounds=(0,None),doc="""
Number of times to activate the LISSOM sheet for each external input event.
A counter is incremented each time an input is received from any
source, and once the counter reaches tsettle, the last activation
step is skipped so that there will not be any further recurrent
activation. The next external (i.e., afferent or feedback)
event will then start the counter over again.""")
continuous_learning = param.Boolean(default=False, doc="""
Whether to modify the weights after every settling step.
If false, waits until settling is completed before doing learning.""")
output_fns = param.HookList(default=[PiecewiseLinear(lower_bound=0.1,upper_bound=0.65)])
precedence = param.Number(0.6)
post_initialization_weights_output_fns = param.HookList([],doc="""
If not empty, weights output_fns that will replace the
existing ones after an initial normalization step.""")
beginning_of_iteration = param.HookList(default=[],instantiate=False,doc="""
List of callables to be executed at the beginning of each iteration.""")
end_of_iteration = param.HookList(default=[],instantiate=False,doc="""
List of callables to be executed at the end of each iteration.""")
def __init__(self,**params):
super(LISSOM,self).__init__(**params)
self.__counter_stack=[]
self.activation_count = 0
self.new_iteration = True
def start(self):
self._normalize_weights(active_units_mask=False)
if len(self.post_initialization_weights_output_fns)>0:
for proj in self.in_connections:
if not isinstance(proj,Projection):
self.debug("Skipping non-Projection ")
else:
proj.weights_output_fns=self.post_initialization_weights_output_fns
def input_event(self,conn,data):
# On a new afferent input, clear the activity
if self.new_iteration:
for f in self.beginning_of_iteration: f()
self.new_iteration = False
self.activity *= 0.0
for proj in self.in_connections:
proj.activity *= 0.0
self.mask.reset()
super(LISSOM,self).input_event(conn,data)
### JABALERT! There should be some sort of warning when
### tsettle times the input delay is larger than the input period.
### Right now it seems to do strange things in that case (does it
### settle at all after the first iteration?), but of course that
### is arguably an error condition anyway (and should thus be
### flagged).
# CEBALERT: there is at least one bug in here for tsettle==0: see
# CB/JAB email "LISSOM tsettle question", 2010/03/22.
def process_current_time(self):
"""
Pass the accumulated stimulation through self.output_fns and
send it out on the default output port.
"""
if self.new_input:
self.new_input = False
if self.activation_count == self.mask_init_time:
self.mask.calculate()
if self.tsettle == 0:
# Special case: behave just like a CFSheet
self.activate()
self.learn()
elif self.activation_count == self.tsettle:
# Once we have been activated the required number of times
# (determined by tsettle), reset various counters, learn
# if appropriate, and avoid further activation until an
# external event arrives.
for f in self.end_of_iteration: f()
self.activation_count = 0
self.new_iteration = True # used by input_event when it is called
if (self.plastic and not self.continuous_learning):
self.learn()
else:
self.activate()
self.activation_count += 1
if (self.plastic and self.continuous_learning):
self.learn()
# print the weights of a unit
def printwts(self,x,y):
for proj in self.in_connections:
print proj.name, x, y
print proj.cfs[x,y].weights
def state_push(self,**args):
super(LISSOM,self).state_push(**args)
self.__counter_stack.append((self.activation_count,self.new_iteration))
def state_pop(self,**args):
super(LISSOM,self).state_pop(**args)
self.activation_count,self.new_iteration=self.__counter_stack.pop()
def send_output(self,src_port=None,data=None):
"""Send some data out to all connections on the given src_port."""
out_conns_on_src_port = [conn for conn in self.out_connections
if self._port_match(conn.src_port,[src_port])]
for conn in out_conns_on_src_port:
if self.strict_tsettle != None:
if self.activation_count < self.strict_tsettle:
if len(conn.dest_port)>2 and conn.dest_port[2] == 'Afferent':
continue
self.verbose("Sending output on src_port %s via connection %s to %s" %
(str(src_port), conn.name, conn.dest.name))
e=EPConnectionEvent(self.simulation._convert_to_time_type(conn.delay)+self.simulation.time(),conn,data)
self.simulation.enqueue_event(e)
class JointScaling(LISSOM):
"""
LISSOM sheet extended to allow joint auto-scaling of Afferent input projections.
An exponentially weighted average is used to calculate the average
joint activity across all jointly-normalized afferent projections.
This average is then used to calculate a scaling factor for the
current afferent activity and for the afferent learning rate.
The target average activity for the afferent projections depends
on the statistics of the input; if units are activated more often
(e.g. the number of Gaussian patterns on the retina during each
iteration is increased) the target average activity should be
larger in order to maintain a constant average response to similar
inputs in V1. The target activity for learning rate scaling does
not need to change, because the learning rate should be scaled
regardless of what causes the change in average activity.
"""
# ALERT: Should probably be extended to jointly scale different
# groups of projections. Currently only works for the joint
# scaling of projections named "Afferent", grouped together by
# JointNormalize in dest_port.
target = param.Number(default=0.045, doc="""
Target average activity for jointly scaled projections.""")
# JABALERT: I cannot parse the docstring; is it an activity or a learning rate?
target_lr = param.Number(default=0.045, doc="""
Target average activity for jointly scaled projections.
Used for calculating a learning rate scaling factor.""")
smoothing = param.Number(default=0.999, doc="""
Influence of previous activity, relative to current, for computing the average.""")
apply_scaling = param.Boolean(default=True, doc="""Whether to apply the scaling factors.""")
precedence = param.Number(0.65)
def __init__(self,**params):
super(JointScaling,self).__init__(**params)
self.x_avg=None
self.sf=None
self.lr_sf=None
self.scaled_x_avg=None
self.__current_state_stack=[]
def calculate_joint_sf(self, joint_total):
"""
Calculate current scaling factors based on the target and previous average joint activities.
Keeps track of the scaled average for debugging. Could be
overridden by a subclass to calculate the factors differently.
"""
if self.plastic:
self.sf *=0.0
self.lr_sf *=0.0
self.sf += self.target/self.x_avg
self.lr_sf += self.target_lr/self.x_avg
self.x_avg = (1.0-self.smoothing)*joint_total + self.smoothing*self.x_avg
self.scaled_x_avg = (1.0-self.smoothing)*joint_total*self.sf + self.smoothing*self.scaled_x_avg
def do_joint_scaling(self):
"""
Scale jointly normalized projections together.
Assumes that the projections to be jointly scaled are those
that are being jointly normalized. Calculates the joint total
of the grouped projections, and uses this to calculate the
scaling factor.
"""
joint_total = zeros(self.shape, activity_type)
for key,projlist in self._grouped_in_projections('JointNormalize'):
if key is not None:
if key =='Afferent':
for proj in projlist:
joint_total += proj.activity
self.calculate_joint_sf(joint_total)
if self.apply_scaling:
for proj in projlist:
proj.activity *= self.sf
if hasattr(proj.learning_fn,'learning_rate_scaling_factor'):
proj.learning_fn.update_scaling_factor(self.lr_sf)
else:
raise ValueError("Projections to be joint scaled must have a learning_fn that supports scaling, such as CFPLF_PluginScaled")
else:
raise ValueError("Only Afferent scaling currently supported")
def activate(self):
"""
Compute appropriate scaling factors, apply them, and collect resulting activity.
Scaling factors are first computed for each set of jointly
normalized projections, and the resulting activity patterns
are then scaled. Then the activity is collected from each
projection, combined to calculate the activity for this sheet,
and the result is sent out.
"""
self.activity *= 0.0
if self.x_avg is None:
self.x_avg=self.target*ones(self.shape, activity_type)
if self.scaled_x_avg is None:
self.scaled_x_avg=self.target*ones(self.shape, activity_type)
if self.sf is None:
self.sf=ones(self.shape, activity_type)
if self.lr_sf is None:
self.lr_sf=ones(self.shape, activity_type)
#Afferent projections are only activated once at the beginning of each iteration
#therefore we only scale the projection activity and learning rate once.
if self.activation_count == 0:
self.do_joint_scaling()
for proj in self.in_connections:
self.activity += proj.activity
if self.apply_output_fns:
for of in self.output_fns:
of(self.activity)
self.send_output(src_port='Activity',data=self.activity)
def state_push(self,**args):
super(JointScaling,self).state_push(**args)
self.__current_state_stack.append((copy.copy(self.x_avg),copy.copy(self.scaled_x_avg),
copy.copy(self.sf), copy.copy(self.lr_sf)))
def state_pop(self,**args):
super(JointScaling,self).state_pop(**args)
self.x_avg,self.scaled_x_avg, self.sf, self.lr_sf=self.__current_state_stack.pop()
def schedule_events(sheet_str="topo.sim['V1']",st=0.5,aff_name="Afferent",
ids=1.0,ars=1.0,increase_inhibition=False):
"""
Convenience function for scheduling a default set of events
typically used with a LISSOM sheet. The parameters used
are the defaults from Miikkulainen, Bednar, Choe, and Sirosh
(2005), Computational Maps in the Visual Cortex, Springer.
Note that Miikulainen 2005 specifies only one output_fn for the
LISSOM sheet; where these scheduled actions operate on an
output_fn, they do so only on the first output_fn in the sheet's
list of output_fns.
Installs afferent learning rate changes for any projection whose
name contains the keyword specified by aff_name (typically
"Afferent").
The st argument determines the timescale relative to a
20000-iteration simulation, and results in the default
10000-iteration simulation for the default st=0.5.
The ids argument specifies the input density scale, i.e. how much
input there is at each iteration, on average, relative to the
default. The ars argument specifies how much to scale the
afferent learning rate, if necessary.
If increase_inhibition is true, gradually increases the strength
of the inhibitory connection, typically used for natural image
simulations.
"""
# Allow sheet.BoundingBox calls (below) after reloading a snapshot
topo.sim.startup_commands.append("from topo import sheet")
# Lateral excitatory bounds changes
# Convenience variable: excitatory projection
LE=sheet_str+".projections()['LateralExcitatory']"
topo.sim.schedule_command( 200*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.06250))')
topo.sim.schedule_command( 500*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.04375))')
topo.sim.schedule_command( 1000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.03500))')
topo.sim.schedule_command( 2000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.02800))')
topo.sim.schedule_command( 3000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.02240))')
topo.sim.schedule_command( 4000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.01344))')
topo.sim.schedule_command( 5000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.00806))')
topo.sim.schedule_command( 6500*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.00484))')
topo.sim.schedule_command( 8000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.00290))')
topo.sim.schedule_command(20000*st,LE+'.change_bounds(sheet.BoundingBox(radius=0.00174))')
# Lateral excitatory learning rate changes
idss=("" if ids==1 else "/%3.1f"%ids)
estr='%s.learning_rate=%%s%s*%s.n_units'%(LE,idss,LE)
topo.sim.schedule_command( 200*st,estr%'0.12168')
topo.sim.schedule_command( 500*st,estr%'0.06084')
topo.sim.schedule_command( 1000*st,estr%'0.06084')
topo.sim.schedule_command( 2000*st,estr%'0.06084')
topo.sim.schedule_command( 3000*st,estr%'0.06084')
topo.sim.schedule_command( 4000*st,estr%'0.06084')
topo.sim.schedule_command( 5000*st,estr%'0.06084')
topo.sim.schedule_command( 6500*st,estr%'0.06084')
topo.sim.schedule_command( 8000*st,estr%'0.06084')
topo.sim.schedule_command(20000*st,estr%'0.06084')
### Lateral inhibitory learning rate and strength changes
if increase_inhibition:
LI=sheet_str+".projections()['LateralInhibitory']"
istr='%s.learning_rate=%%s%s'%(LI,idss)
topo.sim.schedule_command( 1000*st,istr%'1.80873/5.0*2.0')
topo.sim.schedule_command( 2000*st,istr%'1.80873/5.0*3.0')
topo.sim.schedule_command( 5000*st,istr%'1.80873/5.0*5.0')
topo.sim.schedule_command( 1000*st,LI+'.strength=-2.2')
topo.sim.schedule_command( 2000*st,LI+'.strength=-2.6')
# Afferent learning rate changes (for every Projection named Afferent)
sheet_=eval(sheet_str)
projs = [pn for pn in sheet_.projections().keys() if pn.count(aff_name)]
num_aff=len(projs)
arss="" if ars==1.0 else "*%3.1f"%ars
for pn in projs:
ps="%s.projections()['%s'].learning_rate=%%s%s%s" % \
(sheet_str,pn,idss if num_aff==1 else "%s/%d"%(idss,num_aff),arss)
topo.sim.schedule_command( 500*st,ps%('0.6850'))
topo.sim.schedule_command( 2000*st,ps%('0.5480'))
topo.sim.schedule_command( 4000*st,ps%('0.4110'))
topo.sim.schedule_command(20000*st,ps%('0.2055'))
# Activation function threshold changes
bstr = sheet_str+'.output_fns[0].lower_bound=%5.3f;'+\
sheet_str+'.output_fns[0].upper_bound=%5.3f'
lbi=sheet_.output_fns[0].lower_bound
ubi=sheet_.output_fns[0].upper_bound
topo.sim.schedule_command( 200*st,bstr%(lbi+0.01,ubi+0.01))
topo.sim.schedule_command( 500*st,bstr%(lbi+0.02,ubi+0.02))
topo.sim.schedule_command( 1000*st,bstr%(lbi+0.05,ubi+0.03))
topo.sim.schedule_command( 2000*st,bstr%(lbi+0.08,ubi+0.05))
topo.sim.schedule_command( 3000*st,bstr%(lbi+0.10,ubi+0.08))
topo.sim.schedule_command( 4000*st,bstr%(lbi+0.10,ubi+0.11))
topo.sim.schedule_command( 5000*st,bstr%(lbi+0.11,ubi+0.14))
topo.sim.schedule_command( 6500*st,bstr%(lbi+0.12,ubi+0.17))
topo.sim.schedule_command( 8000*st,bstr%(lbi+0.13,ubi+0.20))
topo.sim.schedule_command(20000*st,bstr%(lbi+0.14,ubi+0.23))
# Just to get more progress reports
topo.sim.schedule_command(12000*st,'pass')
topo.sim.schedule_command(16000*st,'pass')
# Settling steps changes
topo.sim.schedule_command( 2000*st,sheet_str+'.tsettle=10')
topo.sim.schedule_command( 5000*st,sheet_str+'.tsettle=11')
topo.sim.schedule_command( 6500*st,sheet_str+'.tsettle=12')
topo.sim.schedule_command( 8000*st,sheet_str+'.tsettle=13')
|
jesuscript/topo-mpi
|
topo/sheet/lissom.py
|
Python
|
bsd-3-clause
| 18,697
|
[
"Gaussian"
] |
e6f4b37fe03040d8e162a04b52ec55426b95140f9a35d287f9377e39f228a23a
|
# coding: utf-8
# ## FFT solver for 1D Gross-Pitaevski equation
# We look for the complex function $\psi(x)$ satisfying the GP equation
#
# $ i\partial_t \psi = \frac{1}{2}(-i\partial_x - \Omega)^2\psi+ V(x)\psi + g|\psi|^2\psi $,
#
# with periodic boundary conditions.
#
# Integration: pseudospectral method with split (time evolution) operator;
# that is evolving in real (R) or momentum (K) space according to the operators
# in the Hamiltonian, i.e.
# first we evaluate
#
# $\hat{\psi}(x,\frac{t}{2})=\mathcal{F}^{-1}\left[\exp\left(-i \frac{\hbar^2 k^2}{2} \frac{t}{2}\right)\,\psi(k,0)\right] $
#
# and later
#
# $\psi(k,t) = \exp(-i \frac{\hbar^2 k^2}{2} \frac{t}{2})\,
# \mathcal{F}\left[\exp\left(-i (V(x)+\lvert \hat{\psi}(x,\frac{t}{2})\rvert ^2)\, t \right)\,\hat{\psi}(x,\frac{t}{2}) \,
# \right]$
#
# where $\cal{F}$ is the Fourier transform.
# _______________________________________________________________________________
#
#
#
# _______________________________________________________________________________
# Import libraries and general definitions
# -------------------------------------------------------------------------------
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft
from scipy.integrate import odeint
#import numpy.linalg as lin
from gpe_fft_utilities import * # local folder utilities
from wave_functions import * # local folder initial states
# comment next line to export as a python shell
#get_ipython().magic('matplotlib inline')
pi=np.pi
import matplotlib.animation as animation
# Data block
# --------------------------------------------------------------------------------
# In[2]:
Zmax = 2.0**7 # Grid half length
Npoint = 2**10 # Number of grid points - better if power of 2
Nparticle = 20 # Number of particles
a_s = -0.01 # scattering length
x0 = -30.0 #soliton initial position
v0 = 0.0 #initial velocity
v = 10.0 #inital push
Omega = 0* pi/(2*Zmax) # reference frame velocity
Ntime_fin = 20000 # total number of time steps
Ntime_out = 100 # number of time steps for intermediate outputs
Dtr = 1.0e-3*1.0 # real time step
Dti = 1.0e-3*100 # imaginary time step
#
# print evolution data:
#
print("Initial data:")
print(" Number of particles = %g"%(Nparticle))
print(" Domain half length = %g"%(Zmax))
print(" Number of grid points = %g"%(Npoint))
print(" Scattering length = %g"%(a_s))
print(" Total time of evolution = %g"%(Ntime_fin*Dtr))
print(" Real time step = %g"%(Dtr))
print(" Imaginary time = %g"%(Dti))
print(" Intermediate solutions = %g"%(Ntime_fin/Ntime_out-1))
# Derived quantities
# -------------------------------------------------------------------------------------
# In[3]:
NormWF = 1.0/(2*Zmax) # Wave function (WF) norm
gn = 2*a_s*Nparticle
gint = gn*NormWF # interaction (nonlinear-term) strength
xi = 1.0/(0.5*abs(gn)**2)**0.5 # healing length
Dz = 2*Zmax/Npoint # length step size
Dk = pi/Zmax # momentum step size
Kmax = Dk*(Npoint//2) # maximum momentum
Dt = Dtr-1j*Dti # complex time
Ninter = Ntime_fin//Ntime_out # Number of outputs with the intermediate states
print(" Characteristic interaction energy = %g"%(gn))
# Potential parameters
ww = 0.75*Zmax # walls width at a %1 of the grid
wh = 0.0 #walls height
bx = 0.0*Zmax #barrier position a %1 of the grid
bw = 1*xi #barrier width a %1 of the healing leangth
bl = bx-bw*0.5 #barrier left wall
br = bx+bw*0.5 #barrier right wall
bh = 0.0 #initial barrier heigh
a = 0.5 #final barrier factor a*Energy
who = 0.0 # harmonic oscilator angular frequency
# Grid definitions: physical and momentum space
# ---------------------------------------------------------------------------------------
# In[4]:
z = np.arange(-Zmax+Dz,Zmax+Dz,Dz) # physical (R-space) grid points in ascending order
# zp=[(0:Dz:Zmax) (-(Zmax-Dz):Dz:-Dz)];
zp = changeFFTposition(z,Npoint,1) # (R-space) grid points with FFT order
#print("grid points (K-order): "); print(zp)
#print(" R-order: "); print(z)
#
# kp=[(0:Dk:Kmax) (-(Kmax-Dk):Dk:-Dk)]; # grid points (K-space), FFT order
kp = np.arange(-Kmax+Dk,Kmax+Dk,Dk)
kp = changeFFTposition(kp,Npoint,1)
#print("momentum values: "); print(kp)
# Define operators
# ---------------------------------------------------------------------------------------
# In[5]:
Ekin_K = 0.5*(kp-Omega)**2 # Kinetic energy in K space
def T_K(t): # time evolution operator in K space (for second order accuracy)
return np.exp(-1j*0.5*t*Ekin_K)
# print("Ekin: "); print(Ekin_K)
#
# Potential energy in R space:
# Harmonic oscillator with angular frequency whoz:
# Vpot_R = 0.5*whoz**2*zp**2;
# print("Vpot: "); print(Vpot_R)
#
def potencial(bh):
# using Heaviside step function:
# f(x)=h/(1+exp(m(x-x0)))
# Squared box with the grid size:
m = 1000
Vpot = 0.0
Vpot = wh-wh/(1+np.exp(m*(z-ww)))+wh/(1+np.exp(m*(z+ww)))
Vbar = bh/(1+np.exp(m*(z-br)))-bh/(1+np.exp(m*(z-bl)))
Vho = 0.5*who**2*z**2
Vpot=Vpot+Vbar+Vho
return Vpot
# print("Vpot: "); print(Vpot_R)
# Main functions
# ---------------------------------------------------------------------------------------
# In[6]:
def Energy(c): # Energy (per particle) calculation
global gint, Vpot_R, Ekin_K, Npoint
ek = sum(Ekin_K*abs(c)**2) # Kinetic energy in K
psi = ifft(c)*Npoint; # wf FFT to R
ep = sum(Vpot_R*abs(psi)**2)/Npoint; # Potential energy
ei = 0.5*gint*sum(abs(psi)**4)/Npoint; # Interaction energy
em = ek+ep+ei; # average energy
chem_pot = em+ei; # chemical potential
return em, chem_pot, ek, ep, ei
#
def T_R_psi(t,Dt,psi): # Action of the time evolution operator over state c in R space
global gint, Vpot_R
# Includes the external potential and the interaction operators:
# T_R_psi = exp(-i Dt (Vpot_R+ gint|psi|^2) ) c
# psi is the wave function in R space
# t is the time (which is not used for time independant Hamiltonians)
# Dt is the complex time step
#
return np.exp( -1j*Dt*(Vpot_R + gint*(abs(psi)**2)) )*psi # return action on psi
def Integral(x):
ileft = integral(x,Dz,z,-Zmax,bl)
iin = integral(x,Dz,z,bl,br)
iright = integral(x,Dz,z,br,Zmax)
return ileft, iin, iright
# Choose initial state (wave function)
# ---------------------------------------------------------------------------------------
# In[7]:
# initial wf: gaussian(x,n,x0,s0,w,v)
# Gaussian centered at x0=0, width=w=1, velocity=v= 0, without nodes (s0=0)
#c0=normaliza(gaussian(zp,Npoint,3,0,1,0)); # wf at t=0
#
# initial wf: thomas_fermi(x,n,x0,s0,gN,Ve)
# Thomas Fermi for harmonic oscillator without nodes (s0=0)
#c0=normaliza(thomas_fermi(zp,Npoint,0,0,gint/NormWF,Vpot_R)); # wf at t=0
#
# initial wf: bright_soliton(x,n,x0,gn,v0): # 1/cosh(x/\xi)
# Bright Soliton
c0=normaliza(bright_soliton(zp,Npoint,x0,gn,v0)) # wf at t=0
# Evolve in time the initial state
# ---------------------------------------------------------------------------------------
# In[8]:
# parameters for the imaginary time evolution
Vpot_R = changeFFTposition(potencial(bh),Npoint,1) # prepare for fftpotencial(0)
t0=0.0
tevol=np.empty([Ninter+1]) # time vector
energy_cicle=np.empty([Ninter+1,5]) # put the energies in a matrix
energy_cicle[0,:] = Energy(c0) # Energies at t=0
print("Energies: Emed mu Ekin Epot Eint")
print(" initial = %g %g %g %g %g"%(Energy(c0)))
# print("$\psi(t=0)$: "); print(ct) # check
c=c0 # initialize
tevol[0]=t0
j=0
t=0
#f3 = plt.figure()
cc = ifft(c)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
psi = changeFFTposition(cc,Npoint,0) # psi is the final wave function
plt.plot(z, abs(psi)**2, 'r-',label='$|\psi|^2$') # plot initial density
plt.plot(z, potencial(bh), 'g-') # plot the box
for i in range(1, Ntime_fin+1): # time evolution cicle
t += Dt.real
psi=ifft(T_K(-1j*Dti)*c)*Npoint
c=T_K(-1j*Dti)*fft(T_R_psi(t0,-1j*Dti,psi))/Npoint
c = normaliza(c); # check norm in the wf
if(not(i%Ntime_out)):
j+=1
tevol[j] = t
energy_cicle[j,:] = Energy(c)
if(not(j%10)):
#prepare to plot
cc = ifft(c)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
psi = changeFFTposition(cc,Npoint,0) # psi is the final wave function
# plot features
plt.title('Imaginary Time evolution'%(tevol[Ninter]),fontsize=15)
plt.xlabel('$x/a_{ho}$',fontsize=15)
#plt.ylabel('$\\psi\,(x)$',fontsize=15)
plt.axis([-Zmax,Zmax,0, 0.3])
plt.xticks(np.arange(-Zmax, Zmax+1,Zmax/2))
plt.locator_params('y',nbins=3)
#plt.plot(z, psi.real, 'r.',label='real$(\psi)$')
#plt.plot(z, psi.imag, 'b--',label='imag$(\psi)$')
plt.plot(z, abs(psi)**2, 'b--',label='$|\psi|^2$') # plot density
#plt.legend(fontsize=15)
#f3.show()
print(" final = %g %g %g %g %g"%(Energy(c))) # check energies
print("Energy change at last step = %g"%(energy_cicle[Ninter,0]-energy_cicle[Ninter-1,0]))
# Plot convergence during the evolution in the average energy per particle
# ---------------------------------------------------------------------------------------
# In[9]:
plot_convergence(tevol,energy_cicle[:,0],Ninter) # convergence in the average energy per particle
# Plot the final density (or wave function)
# ---------------------------------------------------------------------------------------
# In[10]:
cc = ifft(c)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
psi = changeFFTposition(cc,Npoint,0) # psi is the final wave function
#plot_density(z,psi,Zmax,t)
#plot_phase(z,psi,Zmax,t)
#plot_real_imag(z,psi,Zmax,t)
cc0 = ifft(c0)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
psi0 = changeFFTposition(cc0,Npoint,0)
#plot_density(z,psi0,Zmax,0)
#plot_real_imag(z,psi0,Zmax,0)
##write psi
#fn = open("densidad" , "w")
#format = "%g \t %g \n"
#for i in range(0, Npoint):
# fn.write(format%(z[i], abs(psi[i])**2))
#fn.close()
# Evolve in real time the eigenstate
# ---------------------------------------------------------------------------------------
# In[11]:
# parameters for the real time evolution
psi_v = psi*np.exp(1j*v*z) # giving a push to the eigenstate
c = fft(changeFFTposition(psi_v,Npoint,1))
c = normaliza(c); # check norm in the wf
c0=c
bh = Energy(c)[0]*a # barrier height
Vpot_R = changeFFTposition(potencial(bh),Npoint,1) # prepare for fft # Add the barrier. # Prepare for fft
t0=0.0
tevol=np.empty([Ninter+1]) # time vector
energy_cicle=np.empty([Ninter+1,5]) # put the energies in a matrix
energy_cicle[0,:] = Energy(c) # Energies at t=0
print("Energies: Emed mu Ekin Epot Eint")
print(" initial = %g %g %g %g %g"%(Energy(c0)))
# print("$\psi(t=0)$: "); print(ct) # check
tevol[0]=t0
j=0
t=0
f4 = plt.figure()
cc = ifft(c)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
psi = changeFFTposition(cc,Npoint,0) # psi is the final wave function
wave_function = np.empty([Ninter+1,3]) # put the % of the wf in a matrix
wave_function[0,:] = Integral(abs(psi)**2) # % at t=0
plt.plot(z, abs(psi)**2, 'r-',label='$|\psi|^2$') # plot initial density
plt.plot(z, potencial(bh), 'g-') # plot the box
for i in range(1, Ntime_fin+1): # time evolution cicle
t += Dt.real
psi=ifft(T_K(Dtr)*c)*Npoint
c=T_K(Dtr)*fft(T_R_psi(t0,Dtr,psi))/Npoint
c = normaliza(c); # check norm in the wf
if(not(i%Ntime_out)):
j+=1
tevol[j] = t
energy_cicle[j,:] = Energy(c)
cc = ifft(c)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
psi = changeFFTposition(cc,Npoint,0) # psi is the final wave function
wave_function[j,:] = Integral(abs(psi)**2)
#print(wave_function[j,:])
if(not(j%10)):
#prepare to plot
# comment next 2 lines if already done
# cc = ifft(c)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
# psi = changeFFTposition(cc,Npoint,0) # psi is the final wave function
# plot features
plt.title('Real Time evolution'%(tevol[Ninter]),fontsize=15)
plt.xlabel('$x/a_{ho}$',fontsize=15)
#plt.ylabel('$\\psi\,(x)$',fontsize=15)
plt.axis([-Zmax,Zmax,0, 0.3])
plt.xticks(np.arange(-Zmax, Zmax+1,Zmax/2))
plt.locator_params('y',nbins=3)
#plt.plot(z, psi.real, 'r.',label='real$(\psi)$')
#plt.plot(z, psi.imag, 'b--',label='imag$(\psi)$')
plt.plot(z, abs(psi)**2, 'b--',label='$|\psi|^2$') # plot density
#plt.legend(fontsize=15)
f4.show()
print(" final = %g %g %g %g %g"%(Energy(c))) # check energies
print("Energy change at last step = %g"%(energy_cicle[Ninter,0]-energy_cicle[Ninter-1,0]))
plot_convergence(tevol,energy_cicle[:,0],Ninter) # convergence in the average energy per particle
plot_wave_function(tevol, wave_function, Ninter) # % in time
# Animation
# ---------------------------------------------------------------------------------------
# In[12]:
#figure window
fig = plt.figure()
ax = plt.axes(xlim=(-Zmax, Zmax), ylim=(0, 0.3))
line, = ax.plot([], [], lw=2)
c=c0
cc = ifft(c)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
psi = changeFFTposition(cc,Npoint,0) # psi is the final wave function
#base frame
def init():
line.set_data([], [])
plt.plot(z, abs(psi)**2, 'r-',label='$|\psi_0|^2$') # plot initial density
plt.plot(z, potencial(bh), 'g-') # plot the box
return line,
# animation function. This is called sequentially
def animate(i):
global c
psi=ifft(T_K(Dtr)*c)*Npoint
c=T_K(Dtr)*fft(T_R_psi(t0,Dtr,psi))/Npoint
c = normaliza(c); # check norm in the wf
#prepare to plot
cc = ifft(c)*Npoint*NormWF**0.5 # FFT from K3 to R3 and include the wf norm
psi = changeFFTposition(cc,Npoint,0) # psi is the final wave function
# plot features
line.set_data(z, abs(psi)**2)
return line,
#animation object
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=100, interval=20, blit=True)
fig.show()
plt.show()
|
brunojulia/ultracoldUB
|
brightsolitons/Alejandro/bs_v1.py
|
Python
|
gpl-3.0
| 14,708
|
[
"Gaussian"
] |
eb29f61d0f77379523db22e360ed1e90d396e1eee671b6f40de9e5bc6510cf83
|
#!/usr/bin/python2
import optparse
import subprocess
import sys
import shutil
from builds import GporcaBuild, GpcodegenBuild, GporcacodegenBuild
def install_gpdb(dependency_name):
status = subprocess.call("mkdir -p /usr/local/gpdb", shell=True)
if status:
return status
status = subprocess.call(
"tar -xzf " + dependency_name + "/*.tar.gz -C /usr/local/gpdb",
shell=True)
return status
def create_gpadmin_user():
status = subprocess.call("gpdb_src/concourse/scripts/setup_gpadmin_user.bash")
if status:
return status
def copy_output():
shutil.copyfile("gpdb_src/src/test/regress/regression.diffs", "icg_output/regression.diffs")
shutil.copyfile("gpdb_src/src/test/regress/regression.out", "icg_output/regression.out")
def main():
parser = optparse.OptionParser()
parser.add_option("--build_type", dest="build_type", default="RELEASE")
parser.add_option("--mode", choices=['orca', 'codegen', 'orca_codegen'])
parser.add_option("--compiler", dest="compiler")
parser.add_option("--cxxflags", dest="cxxflags")
parser.add_option("--output_dir", dest="output_dir", default="install")
parser.add_option("--gpdb_name", dest="gpdb_name")
(options, args) = parser.parse_args()
if options.mode == 'orca':
ciCommon = GporcaBuild()
elif options.mode == 'codegen':
ciCommon = GpcodegenBuild()
elif options.mode == 'orca_codegen':
ciCommon = GporcacodegenBuild()
status = ciCommon.install_system_deps()
if status:
return status
for dependency in args:
status = ciCommon.install_dependency(dependency)
if status:
return status
status = install_gpdb(options.gpdb_name)
if status:
return status
status = ciCommon.configure()
if status:
return status
status = create_gpadmin_user()
if status:
return status
status = ciCommon.icg()
if status:
copy_output()
return status
if __name__ == "__main__":
sys.exit(main())
|
xuegang/gpdb
|
concourse/scripts/test_gpdb.py
|
Python
|
apache-2.0
| 2,056
|
[
"ORCA"
] |
a3e8bc63f2f8496214f9170539080fbcad5e51257ec19736326966ab84db820f
|
from .base import *
class idmapper(object):
"""
cytoscape network interface as shown in CyREST's swagger documentation for 'idmapper'.
:param url: an url of the type 'http://' + host + ':' + str(port) + '/' + version + '/'.
"""
def __init__(self, url):
self.__url = url + 'commands/idmapper'
def map_column(self, only_use_one=None, source_column=None, species=None, target_selection= None, verbose=False):
"""
Uses the BridgeDB service to look up analogous identifiers from a wide
selection of other databases
:param only_use_one (string, optional): When multiple identifiers can be
mapped from a single term, this forces a singular result
:param source_column (string): Specifies the column nmae where the
source identifiers are located = ['']
:param source_selection (string): Specifies the database describing
the existing identifiers = ['']
:param species (string, optional): The combined common or latin name of
the species to which the identifiers apply = ['Human (Homo sapiens)',
'Mouse (Mus musculus)', 'Rat (Rattus norvegicus)', 'Frog (Xenopus tropicalis)',
'Zebra fish (Danio rerio)', 'Fruit fly (Drosophila melanogaster)',
'Mosquito (Anopheles gambiae)', 'Arabidopsis thaliana (Arabidopsis thaliana)',
'Yeast (Saccharomyces cerevisiae)', 'E. coli (Escherichia coli)',
'Tuberculosis (Mycobacterium tuberculosis)', 'Worm (Caenorhabditis elegans)']
:param target_selection (string): Specifies the database identifiers to be looked up = ['']
:param verbose: print more
:returns: eg. { "new column": "SGD " }
"""
PARAMS=set_param(["only_use_one","source_column","species","target_selection"],[only_use_one,source_column,species,target_selection])
response=api(url=self.__url+"/map column", PARAMS=PARAMS, method="POST", verbose=verbose)
return response
|
idekerlab/py2cytoscape
|
py2cytoscape/cyrest/idmapper.py
|
Python
|
mit
| 2,018
|
[
"Cytoscape"
] |
9ef15af9bff88612da5c77aa3378ba4f00c7e4ba95880fa2a9e8d43f737c6c34
|
#!/usr/bin/env python
import Tkinter
import math, os, sys
import vtk
import vtk.tk
from vtk.tk.vtkTkRenderWidget import *
# Make a root window
root = Tkinter.Tk()
# Add a vtkTkRenderWidget
pane = vtkTkRenderWidget(root,width=400,height=400)
pane.pack(expand='true',fill='both')
# Get the render window from the widget
renWin = pane.GetRenderWindow()
# Next, do the VTK stuff
ren = vtk.vtkRenderer()
renWin.AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(64)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInput(cone.GetOutput())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# Make a quit button
def quit():
root.destroy()
button = Tkinter.Button(text="Quit",command=quit)
button.pack(expand='true',fill='x')
# start up the event loop
root.mainloop()
|
andregouws/mrMeshPy
|
legacy/mrMesh/mrMeshPy/mrMeshRender.py
|
Python
|
mit
| 819
|
[
"VTK"
] |
20711a51f9db2b4842a98ec4a32b74ffb4d69be259fc4fe7424f9e4d34e29514
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
# To make a release follow these steps:
# python setup.py sdist
# twine upload dist/argos-0.2.0rc1.tar.gz
# or better
# rm -rf build dist
# python setup.py bdist_wheel
# twine check dist/*
# twine upload dist/argos-x.y.z-py3-none-any.whl
# If you get invalid command 'bdist_wheel', you must install the 'wheel' package first.
# See also https://packaging.python.org/en/latest/distributing.html
# TODO: still can't make a wheel even following the instructions in the link below.
# http://stackoverflow.com/questions/26664102/why-can-i-not-create-a-wheel-in-pyt
import os
import sys
def err(*args, **kwargs):
sys.stderr.write(*args, **kwargs)
sys.stderr.write('\n')
try:
from setuptools import setup, find_packages
except ImportError:
err("Argos requires setuptools for intallation. (https://pythonhosted.org/an_example_pypi_project/setuptools.html)")
err("You can download and install it simply with: python argos/external/ez_setup.py")
sys.exit(1)
from argos import info
if sys.version_info < (3,7):
err("Argos requires Python 3.7")
sys.exit(1)
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
# Don't require PyQt for two reasons. First users may use PySide2 (although at the moment PySide is
# not yet working). Second, users may use Anaconda to install PyQt. Anaconda uses a different
# package name (pyqt) than pip (PyQt5) and the tools can't detect correctly if PyQt has been
# installed. This leads to trouble. See:
# https://www.anaconda.com/using-pip-in-a-conda-environment/
# https://github.com/ContinuumIO/anaconda-issues/issues/1554
#
# In Debian pip will ignored installed system packages (e.g. use --ignore-installed).
# To override this use: export PIP_IGNORE_INSTALLED=0
# See https://github.com/pypa/pip/issues/4222
install_requires = [
#"PyQt5 >= 5.6.0", # Don't require PyQt. See comment above
"cmlib >= 1.1.2", # Needed, even if no plugins are installed.
"numpy >= 1.11",
# Argos will technically work without pyqtgraph and h5py, but with very limited functionality.
"pgcolorbar >= 1.1.1",
"pyqtgraph >= 0.11",
# "h5py >= 2.6"
]
long_description = readme + '\n\n' + history
print(long_description)
setup(
name = info.REPO_NAME,
version = info.VERSION,
description = info.SHORT_DESCRIPTION,
long_description = readme + '\n\n' + history,
long_description_content_type = 'text/x-rst',
author = info.AUTHOR,
author_email = info.EMAIL,
license = "GPLv3",
url=info.PROJECT_URL,
packages = find_packages(),
package_data = {'': ['HISTORY.rst'],
info.PACKAGE_NAME: ['img/argos.css', 'img/snipicons/*', 'utils/default_logging.json']},
entry_points={'gui_scripts': ['argosw = argos.main:main'],
'console_scripts': ['argos = argos.main:main',
'argos_make_wrappers = argos.argos_make_wrappers:main']},
install_requires = install_requires,
zip_safe = False,
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
],
keywords = 'NetCDF HDF5 plotting graphs',
#test_suite='tests',
#tests_require=test_requirements
)
|
titusjan/argos
|
setup.py
|
Python
|
gpl-3.0
| 4,069
|
[
"NetCDF"
] |
080bbb7824eee1b4e7d966c99932597d19373a475a79a249214af4c536a3d08c
|
from __future__ import print_function
import nibabel as nib
import numpy as np
import scipy
import scipy.ndimage
from scipy.ndimage.filters import gaussian_filter
def load_data(filename):
""" Return fMRI data corresponding to the given filename and prints
shape of the data array
----------
filename : string
This string should be a path to given file. The file must be
.nii
Returns
-------
data : numpy array
An array consisting of the data.
"""
img = nib.load(filename)
data = img.get_data()
data = data[:,:,:,4:]
print(data.shape)
return data
def load_all_data(filename):
""" Return fMRI data corresponding to the given filename and prints
shape of the data array
----------
filename : string
This string should be a path to given file. The file must be
.nii
Returns
-------
data : numpy array
An array consisting of the data.
"""
img = nib.load(filename)
data = img.get_data()
print(data.shape)
return data
def vox_by_time(data):
""" Returns a reshaped array w/ dimensions = #voxels x #volumes
Parameters
----------
data : 4d array
Returns
-------
reshaped : 2-D array
Reshaped array consisting of voxels by time
"""
n_voxels = np.prod(data.shape[:3])
return np.reshape(data, (n_voxels, data.shape[-1]))
def vol_std(data):
""" Return standard deviation across voxels for 4D array `data`
Parameters
----------
data : 4D array
4D array from FMRI run with last axis indexing volumes. Call the shape
of this array (M, N, P, T) where T is the number of volumes.
Returns
-------
std_values : array shape (T,)
One dimensonal array where ``std_values[i]`` gives the standard
deviation of all voxels contained in ``data[..., i]``.
"""
num_volumes = data.shape[-1]
vox_by_time = data.reshape((-1, num_volumes))
return np.std(vox_by_time, axis = 0)
def iqr_outliers(arr_1d, iqr_scale=1.5):
""" Return indices of outliers identified by interquartile range
Parameters
----------
arr_1d : 1D array
One-dimensional numpy array, from which we will identify outlier
values.
iqr_scale : float, optional
Scaling for IQR to set low and high thresholds. Low threshold is given
by 25th centile value minus ``iqr_scale * IQR``, and high threshold id
given by 75 centile value plus ``iqr_scale * IQR``.
Returns
-------
outlier_indices : array
Array containing indices in `arr_1d` that contain outlier values.
lo_hi_thresh : tuple
Tuple containing 2 values (low threshold, high thresold) as described
above.
"""
q75, q25 = np.percentile(arr_1d, [75 ,25])
IQR = q75 - q25
lower = q25 - iqr_scale * IQR
upper = q75 + iqr_scale * IQR
low_outliers = np.where(arr_1d < lower)[0]
high_outliers = np.where(arr_1d > upper)[0]
all_outliers = np.concatenate((low_outliers, high_outliers), axis=0)
all_outliers = np.sort(all_outliers)
return all_outliers, (lower, upper)
def vol_rms_diff(arr_4d):
""" Return root mean square of differences between sequential volumes
Parameters
----------
data : 4D array
4D array from FMRI run with last axis indexing volumes. Call the shape
of this array (M, N, P, T) where T is the number of volumes.
Returns
-------
rms_values : array shape (T-1,)
One dimensonal array where ``rms_values[i]`` gives the square root of
the mean (across voxels) of the squared difference between volume i and
volume i + 1.
"""
num_volumes = arr_4d.shape[-1]
vox_by_time = arr_4d.reshape((-1, num_volumes))
differences = np.diff(vox_by_time, axis=1)
exp_rms = np.sqrt(np.mean(differences ** 2, axis=0))
return exp_rms
def remove_outliers_iqr(arr, axis, iqr_scale=1.5):
""" Return data of outliers (identified by interquartile range) removed
Parameters
----------
arr : array
Numpy array, from which we will identify outlier values.
axis : integer
Integer indicating the axis at which to remove the outliers
iqr_scale : float, optional
Scaling for IQR to set low and high thresholds. Low threshold is given
by 25th centile value minus ``iqr_scale * IQR``, and high threshold id
given by 75 centile value plus ``iqr_scale * IQR``.
Returns
-------
outlier_indices : 1-D array
Array of removed outliers along the given axis.
lo_hi_thresh : tuple
Tuple containing 2 values (low threshold, high thresold) as described
above.
"""
axis_data = get_axis_data(data, axis)
indcs, lo_hi_thresh = iqr_outliers(axis_data, iqr_scale)
return (indcs, lo_hi_thresh)
def smooth_gauss(data_4d, fwhm, time):
"""
Smooth the data using a Gaussian fliter
Parameters
----------
data_4d : 4d numpy array
The image data of one subject
fwhm : width of normal gaussian curve
time : time slice (4th dimension)
Returns
-------
smooth_results : array of the smoothed data from data_4d (same dimensions but super-voxels will be
indicated by the same number) in time slice indicated.
"""
time_slice = data_4d[..., time]
smooth_results = scipy.ndimage.filters.gaussian_filter(time_slice, fwhm)
return smooth_results
|
yuchengdong/project-beta-1
|
code/utils/data_loading.py
|
Python
|
bsd-3-clause
| 5,511
|
[
"Gaussian"
] |
5a70db389ae00b51bf55f6a092130cfc2db02e1bb731e7a131aa5201c6ad6bcd
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Manages external processes and jobfolders.
This sub-module provides an abstraction between resources which
purports to launch external programs, say a
:py:class:`~pylada.vasp.functional.Vasp` instance, and the actual
program. There are two main issues the module attempts to resolve:
- an interface which hides the details of launching mpi jobs on one
or another super-computer
- an interface to launch different calculations in parallel but from
a single actual system process, e.g. asynchronous management of
different mpi-processes
The first point above is easy to understand: some machines use openmpi_
as is, others provide different flavors of mpich, and Cray use their
own crap pseudo-proprietary software. It's best to keep all those
details in one place. The second point is to make it easy to launch
different calculations simultaneously. It provides an additional layer
for parallelization, in addition to the one provided below at the
application level by mpich and friends, and above it by the queuing
system of a particular super-computer.
The module is organized around :py:class:`~process.Process` and its
derived classes. Instances of these classes are meant to be used as
follows:
.. code-block:: python
process = SomeProcess()
process.start(comm)
# do something else
try:
if process.poll():
# process did finish.
except Fail as e:
# an error occured
The first two lines initialize and start a process of some kind, which
could be as simple as lauching an external :py:class:`program
<program.ProgramProcess>` or as complicated as lauching different jobs
from a :py:class:`~pylada.jobfolder.jobfolder.JobFolder` instance in
:py:class:`parallel <jobfolder.JobFolderProcess>`. The rest of the
snippet checks whether the process is finished. If it finished
correctly, then :py:meth:`~process.Process.poll` (rather, the overloaded
functions in the derived class) returns True. Otherwise, it throws
:py:class:`Fail`.
It is expected that processes will be returned (or yielded) by
functionals, and then further managed by the script of interest. As such,
the initialization of a process will depend upon the actual functional,
and what it purports to do. However, it should be possible from then on
to manage the process in a standard way. This standard interface is
described by the abstract base class :py:class:`~process.Process`.
.. _openmpi: http://www.open-mpi.org/
"""
__docformat__ = "restructuredtext en"
__all__ = ['Process', 'ProgramProcess', 'CallProcess', 'IteratorProcess',
'JobFolderProcess', 'PoolProcess', 'Fail', 'which', 'DummyProcess']
from pylada import logger
logger = logger.getChild("process")
from ..error import root
from .pool import PoolProcess
from .process import Process
from .call import CallProcess
from .program import ProgramProcess
from .iterator import IteratorProcess
from .jobfolder import JobFolderProcess
from .dummy import DummyProcess
class ProcessError(root):
""" Root of special exceptions issued by process module. """
class Fail(ProcessError):
""" Process failed to run successfully. """
pass
class AlreadyStarted(ProcessError):
""" Process already started.
Thrown when :py:meth:`~process.Process.start` or its overloaded friend is
called for a second time.
"""
class NotStarted(ProcessError):
""" Process was never started.
Thrown when :py:meth:`~process.Process.poll` or its overloaded friend is
called before the process is started.
"""
def which(program):
""" Gets location of program by mimicking bash which command. """
from os import environ, getcwd
from os.path import split, expanduser, expandvars, join
from itertools import chain
from ..misc import RelativePath
from ..error import IOError
def is_exe(path):
from os import access, X_OK
from os.path import isfile
return isfile(path) and access(path, X_OK)
exprog = expanduser(expandvars(program))
fpath, fname = split(exprog)
if fpath:
if is_exe(exprog):
return RelativePath(exprog).path
else:
for dir in chain([getcwd()], environ["PATH"].split(':')):
if is_exe(join(dir, exprog)):
return RelativePath(join(dir, exprog)).path
raise IOError('Could not find executable {0}.'.format(program))
|
pylada/pylada-light
|
src/pylada/process/__init__.py
|
Python
|
gpl-3.0
| 5,719
|
[
"CRYSTAL",
"VASP"
] |
837aae7d87e9e5e5d540de411d08c128dcfa7058591479fb74207b90265dea0e
|
########################################################################
# File : JobWrapper.py
# Author : Stuart Paterson
########################################################################
""" The Job Wrapper Class is instantiated with arguments tailored for running
a particular job. The JobWrapper starts a thread for execution of the job
and a Watchdog Agent that can monitor progress.
"""
__RCSID__ = "$Id: $"
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.FailoverTransfer import FailoverTransfer
from DIRAC.Resources.Catalog.PoolXMLFile import getGUID
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.WorkloadManagementSystem.JobWrapper.WatchdogFactory import WatchdogFactory
from DIRAC.AccountingSystem.Client.Types.Job import Job as AccountingJob
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemSection
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.Client.JobReport import JobReport
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Utilities.ModuleFactory import ModuleFactory
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.Core.Utilities.Subprocess import Subprocess
from DIRAC.Core.Utilities.File import getGlobbedTotalSize, getGlobbedFiles
from DIRAC.Core.Utilities.Version import getCurrentVersion
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.Utilities import Time
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
import DIRAC
import os
import stat
import re
import sys
import time
import shutil
import threading
import tarfile
import glob
import urllib
import json
EXECUTION_RESULT = {}
class JobWrapper( object ):
#############################################################################
def __init__( self, jobID = None, jobReport = None ):
""" Standard constructor
"""
self.initialTiming = os.times()
self.section = os.path.join( getSystemSection( 'WorkloadManagement/JobWrapper' ), 'JobWrapper' )
self.log = gLogger
# Create the accounting report
self.accountingReport = AccountingJob()
# Initialize for accounting
self.wmsMajorStatus = "unknown"
self.wmsMinorStatus = "unknown"
# Set now as start time
self.accountingReport.setStartTime()
if not jobID:
self.jobID = 0
else:
self.jobID = jobID
self.siteName = gConfig.getValue( '/LocalSite/Site', 'Unknown' )
if jobReport:
self.jobReport = jobReport
else:
self.jobReport = JobReport( self.jobID, 'JobWrapper@%s' % self.siteName )
self.failoverTransfer = FailoverTransfer()
# self.root is the path the Wrapper is running at
self.root = os.getcwd()
# self.localSiteRoot is the path where the local DIRAC installation used to run the payload
# is taken from
self.localSiteRoot = gConfig.getValue( '/LocalSite/Root', DIRAC.rootPath )
# FIXME: Why do we need to load any .cfg file here????
self.__loadLocalCFGFiles( self.localSiteRoot )
result = getCurrentVersion()
if result['OK']:
self.diracVersion = result['Value']
else:
self.diracVersion = 'DIRAC version %s' % DIRAC.buildVersion
self.maxPeekLines = gConfig.getValue( self.section + '/MaxJobPeekLines', 20 )
if self.maxPeekLines < 0:
self.maxPeekLines = 0
self.defaultCPUTime = gConfig.getValue( self.section + '/DefaultCPUTime', 600 )
self.defaultOutputFile = gConfig.getValue( self.section + '/DefaultOutputFile', 'std.out' )
self.defaultErrorFile = gConfig.getValue( self.section + '/DefaultErrorFile', 'std.err' )
self.diskSE = gConfig.getValue( self.section + '/DiskSE', ['-disk', '-DST', '-USER'] )
self.tapeSE = gConfig.getValue( self.section + '/TapeSE', ['-tape', '-RDST', '-RAW'] )
self.sandboxSizeLimit = gConfig.getValue( self.section + '/OutputSandboxLimit', 1024 * 1024 * 10 )
self.cleanUpFlag = gConfig.getValue( self.section + '/CleanUpFlag', True )
self.pilotRef = gConfig.getValue( '/LocalSite/PilotReference', 'Unknown' )
self.cpuNormalizationFactor = gConfig.getValue ( "/LocalSite/CPUNormalizationFactor", 0.0 )
self.bufferLimit = gConfig.getValue( self.section + '/BufferLimit', 10485760 )
self.defaultOutputSE = gConfig.getValue( '/Resources/StorageElementGroups/SE-USER', [] )
self.defaultCatalog = gConfig.getValue( self.section + '/DefaultCatalog', [] )
self.masterCatalogOnlyFlag = gConfig.getValue( self.section + '/MasterCatalogOnlyFlag', True )
self.defaultFailoverSE = gConfig.getValue( '/Resources/StorageElementGroups/Tier1-Failover', [] )
self.defaultOutputPath = ''
self.dm = DataManager()
self.fc = FileCatalog()
self.log.verbose( '===========================================================================' )
self.log.verbose( 'Version %s' % ( __RCSID__ ) )
self.log.verbose( self.diracVersion )
self.currentPID = os.getpid()
self.log.verbose( 'Job Wrapper started under PID: %s' % self.currentPID )
# Define a new process group for the job wrapper
self.parentPGID = os.getpgid( self.currentPID )
self.log.verbose( 'Job Wrapper parent process group ID: %s' % self.parentPGID )
os.setpgid( self.currentPID, self.currentPID )
self.currentPGID = os.getpgid( self.currentPID )
self.log.verbose( 'Job Wrapper process group ID: %s' % self.currentPGID )
self.log.verbose( '==========================================================================' )
self.log.verbose( 'sys.path is: \n%s' % '\n'.join( sys.path ) )
self.log.verbose( '==========================================================================' )
if 'PYTHONPATH' not in os.environ:
self.log.verbose( 'PYTHONPATH is: null' )
else:
pypath = os.environ['PYTHONPATH']
self.log.verbose( 'PYTHONPATH is: \n%s' % '\n'.join( pypath.split( ':' ) ) )
self.log.verbose( '==========================================================================' )
if 'LD_LIBRARY_PATH_SAVE' in os.environ:
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] += ':' + os.environ['LD_LIBRARY_PATH_SAVE']
else:
os.environ['LD_LIBRARY_PATH'] = os.environ['LD_LIBRARY_PATH_SAVE']
if 'LD_LIBRARY_PATH' not in os.environ:
self.log.verbose( 'LD_LIBRARY_PATH is: null' )
else:
ldpath = os.environ['LD_LIBRARY_PATH']
self.log.verbose( 'LD_LIBRARY_PATH is: \n%s' % '\n'.join( ldpath.split( ':' ) ) )
self.log.verbose( '==========================================================================' )
if not self.cleanUpFlag:
self.log.verbose( 'CleanUp Flag is disabled by configuration' )
# Failure flag
self.failedFlag = True
# Set defaults for some global parameters to be defined for the accounting report
self.owner = 'unknown'
self.jobGroup = 'unknown'
self.jobType = 'unknown'
self.processingType = 'unknown'
self.userGroup = 'unknown'
self.jobClass = 'unknown'
self.inputDataFiles = 0
self.outputDataFiles = 0
self.inputDataSize = 0
self.inputSandboxSize = 0
self.outputSandboxSize = 0
self.outputDataSize = 0
self.processedEvents = 0
self.jobAccountingSent = False
self.jobArgs = {}
self.optArgs = {}
self.ceArgs = {}
#############################################################################
def initialize( self, arguments ):
""" Initializes parameters and environment for job.
"""
self.__report( 'Running', 'Job Initialization' )
self.log.info( 'Starting Job Wrapper Initialization for Job %s' % ( self.jobID ) )
self.jobArgs = arguments['Job']
self.log.verbose( self.jobArgs )
self.ceArgs = arguments ['CE']
self.log.verbose( self.ceArgs )
self.__setInitialJobParameters()
self.optArgs = arguments.get( 'Optimizer', {} )
# Fill some parameters for the accounting report
self.owner = self.jobArgs.get( 'Owner', self.owner )
self.jobGroup = self.jobArgs.get( 'JobGroup', self.jobGroup )
self.jobType = self.jobArgs.get( 'JobType', self.jobType )
dataParam = self.jobArgs.get( 'InputData', [] )
if dataParam and not isinstance( dataParam, list ):
dataParam = [dataParam]
self.inputDataFiles = len( dataParam )
dataParam = self.jobArgs.get( 'OutputData', [] )
if dataParam and not isinstance( dataParam, list ):
dataParam = [dataParam]
self.outputDataFiles = len( dataParam )
self.processingType = self.jobArgs.get( 'ProcessingType', self.processingType )
self.userGroup = self.jobArgs.get( 'OwnerGroup', self.userGroup )
self.jobClass = self.jobArgs.get( 'JobSplitType', self.jobClass )
# Prepare the working directory, cd to there, and copying eventual extra arguments in it
if self.jobID:
if os.path.exists( str( self.jobID ) ):
shutil.rmtree( str( self.jobID ) )
os.mkdir( str( self.jobID ) )
os.chdir( str( self.jobID ) )
extraOpts = self.jobArgs.get( 'ExtraOptions', '' )
if extraOpts:
if os.path.exists( '%s/%s' % ( self.root, extraOpts ) ):
shutil.copyfile( '%s/%s' % ( self.root, extraOpts ), extraOpts )
self.__loadLocalCFGFiles( self.localSiteRoot )
else:
self.log.info( 'JobID is not defined, running in current directory' )
infoFile = open( 'job.info', 'w' )
infoFile.write( self.__dictAsInfoString( self.jobArgs, '/Job' ) )
infoFile.close()
#############################################################################
def __setInitialJobParameters( self ):
"""Sets some initial job parameters
"""
parameters = []
if 'LocalSE' in self.ceArgs:
parameters.append( ( 'AgentLocalSE', ','.join( self.ceArgs['LocalSE'] ) ) )
if 'PilotReference' in self.ceArgs:
parameters.append( ( 'Pilot_Reference', self.ceArgs['PilotReference'] ) )
if 'CPUScalingFactor' in self.ceArgs:
parameters.append( ( 'CPUScalingFactor', self.ceArgs['CPUScalingFactor'] ) )
if 'CPUNormalizationFactor' in self.ceArgs:
parameters.append( ( 'CPUNormalizationFactor', self.ceArgs['CPUNormalizationFactor'] ) )
parameters.append( ( 'PilotAgent', self.diracVersion ) )
parameters.append( ( 'JobWrapperPID', self.currentPID ) )
result = self.__setJobParamList( parameters )
return result
#############################################################################
def __loadLocalCFGFiles( self, localRoot ):
"""Loads any extra CFG files residing in the local DIRAC site root.
"""
files = os.listdir( localRoot )
self.log.debug( 'Checking directory %s for *.cfg files' % localRoot )
for localFile in files:
if re.search( '.cfg$', localFile ):
gConfig.loadFile( '%s/%s' % ( localRoot, localFile ) )
self.log.verbose( "Found local .cfg file '%s'" % localFile )
#############################################################################
def __dictAsInfoString( self, dData, infoString = '', currentBase = "" ):
for key in dData:
value = dData[ key ]
if isinstance( value, dict ):
infoString = self.__dictAsInfoString( value, infoString, "%s/%s" % ( currentBase, key ) )
elif isinstance( value, ( list, tuple ) ):
if len( value ) and value[0] == '[':
infoString += "%s/%s = %s\n" % ( currentBase, key, " ".join( value ) )
else:
infoString += "%s/%s = %s\n" % ( currentBase, key, ", ".join( value ) )
else:
infoString += "%s/%s = %s\n" % ( currentBase, key, str( value ) )
return infoString
#############################################################################
def execute( self, arguments ):
"""The main execution method of the Job Wrapper
"""
self.log.info( 'Job Wrapper is starting execution phase for job %s' % ( self.jobID ) )
os.environ['DIRACJOBID'] = str( self.jobID )
os.environ['DIRACROOT'] = self.localSiteRoot
self.log.verbose( 'DIRACROOT = %s' % ( self.localSiteRoot ) )
os.environ['DIRACPYTHON'] = sys.executable
self.log.verbose( 'DIRACPYTHON = %s' % ( sys.executable ) )
os.environ['DIRACSITE'] = DIRAC.siteName()
self.log.verbose( 'DIRACSITE = %s' % ( DIRAC.siteName() ) )
errorFile = self.jobArgs.get( 'StdError', self.defaultErrorFile )
outputFile = self.jobArgs.get( 'StdOutput', self.defaultOutputFile )
if 'CPUTime' in self.jobArgs:
jobCPUTime = int( self.jobArgs['CPUTime'] )
else:
self.log.info( 'Job %s has no CPU time limit specified, '
'applying default of %s' % ( self.jobID, self.defaultCPUTime ) )
jobCPUTime = self.defaultCPUTime
jobMemory = 0.
if "Memory" in self.jobArgs:
# Job specifies memory in GB, internally use KB
jobMemory = int( self.jobArgs['Memory'] )*1024.*1024.
if 'Executable' in self.jobArgs:
executable = self.jobArgs['Executable'].strip()
else:
msg = 'Job %s has no specified executable' % ( self.jobID )
self.log.warn( msg )
return S_ERROR( msg )
jobArguments = self.jobArgs.get( 'Arguments', '' )
executable = os.path.expandvars( executable )
exeThread = None
spObject = None
if re.search( 'DIRACROOT', executable ):
executable = executable.replace( '$DIRACROOT', self.localSiteRoot )
self.log.verbose( 'Replaced $DIRACROOT for executable as %s' % ( self.localSiteRoot ) )
# Make the full path since . is not always in the PATH
executable = os.path.abspath( executable )
if not os.access( executable, os.X_OK ):
try:
os.chmod( executable, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH )
except Exception:
self.log.warn( 'Failed to change mode to 775 for the executable', executable )
exeEnv = dict( os.environ )
if 'ExecutionEnvironment' in self.jobArgs:
self.log.verbose( 'Adding variables to execution environment' )
variableList = self.jobArgs['ExecutionEnvironment']
if isinstance( variableList, basestring ):
variableList = [variableList]
for var in variableList:
nameEnv = var.split( '=' )[0]
valEnv = urllib.unquote( var.split( '=' )[1] )
exeEnv[nameEnv] = valEnv
self.log.verbose( '%s = %s' % ( nameEnv, valEnv ) )
if os.path.exists( executable ):
self.__report( 'Running', 'Application', sendFlag = True )
spObject = Subprocess( timeout = False, bufferLimit = int( self.bufferLimit ) )
command = executable
if jobArguments:
command += ' ' + jobArguments
self.log.verbose( 'Execution command: %s' % ( command ) )
maxPeekLines = self.maxPeekLines
exeThread = ExecutionThread( spObject, command, maxPeekLines, outputFile, errorFile, exeEnv )
exeThread.start()
time.sleep( 10 )
payloadPID = spObject.getChildPID()
if not payloadPID:
return S_ERROR( 'Payload process could not start after 10 seconds' )
else:
self.__report( 'Failed', 'Application not found', sendFlag = True )
return S_ERROR( 'Path to executable %s not found' % ( executable ) )
self.__setJobParam( 'PayloadPID', payloadPID )
watchdogInstance = WatchdogFactory().getWatchdog( self.currentPID, exeThread, spObject, jobCPUTime, jobMemory )
if not watchdogInstance['OK']:
self.log.error( 'Could not create Watchdog instance', watchdogInstance['Message'] )
return S_ERROR( 'Could not create Watchdog instance' )
self.log.verbose( 'WatchdogInstance %s' % ( watchdogInstance ) )
watchdog = watchdogInstance['Value']
self.log.verbose( 'Initializing Watchdog instance' )
watchdog.initialize()
self.log.verbose( 'Calibrating Watchdog instance' )
watchdog.calibrate()
# do not kill Test jobs by CPU time
if self.jobArgs.get( 'JobType', '' ) == 'Test':
watchdog.testCPUConsumed = False
if 'DisableCPUCheck' in self.jobArgs:
watchdog.testCPUConsumed = False
if exeThread.isAlive():
self.log.info( 'Application thread is started in Job Wrapper' )
watchdog.run()
else:
self.log.warn( 'Application thread stopped very quickly...' )
if exeThread.isAlive():
self.log.warn( 'Watchdog exited before completion of execution thread' )
while exeThread.isAlive():
time.sleep( 5 )
outputs = None
if 'Thread' in EXECUTION_RESULT:
threadResult = EXECUTION_RESULT['Thread']
if not threadResult['OK']:
self.log.error( 'Failed to execute the payload', threadResult['Message'] )
self.__report( 'Failed', 'Application thread failed', sendFlag = True )
if 'Value' in threadResult:
outs = threadResult['Value']
if outs:
self.__setJobParam( 'ApplicationError', outs[0], sendFlag = True )
else:
self.__setJobParam( 'ApplicationError', 'None reported', sendFlag = True )
else:
outputs = threadResult['Value']
if 'CPU' in EXECUTION_RESULT:
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
self.log.info( 'EXECUTION_RESULT[CPU] in JobWrapper execute', cpuString )
if watchdog.checkError:
# In this case, the Watchdog has killed the Payload and the ExecutionThread can not get the CPU statistics
# os.times only reports for waited children
# Take the CPU from the last value recorded by the Watchdog
self.__report( 'Failed', watchdog.checkError, sendFlag = True )
if 'CPU' in EXECUTION_RESULT:
if 'LastUpdateCPU(s)' in watchdog.currentStats:
EXECUTION_RESULT['CPU'][0] = 0
EXECUTION_RESULT['CPU'][0] = 0
EXECUTION_RESULT['CPU'][0] = 0
EXECUTION_RESULT['CPU'][0] = watchdog.currentStats['LastUpdateCPU(s)']
if watchdog.currentStats:
self.log.info( 'Statistics collected by the Watchdog:\n ',
'\n '.join( ['%s: %s' % items for items in watchdog.currentStats.items() ] ) )
if outputs:
status = threadResult['Value'][0]
# Send final heartbeat of a configurable number of lines here
self.log.verbose( 'Sending final application standard output heartbeat' )
self.__sendFinalStdOut( exeThread )
self.log.verbose( 'Execution thread status = %s' % ( status ) )
if not watchdog.checkError and not status:
self.failedFlag = False
self.__report( 'Completed', 'Application Finished Successfully', sendFlag = True )
elif not watchdog.checkError:
self.__report( 'Completed', 'Application Finished With Errors', sendFlag = True )
else:
return S_ERROR( 'No outputs generated from job execution' )
self.log.info( 'Checking directory contents after execution:' )
res = systemCall( 5, ['ls', '-al'] )
if not res['OK']:
self.log.error( 'Failed to list the current directory', res['Message'] )
elif res['Value'][0]:
self.log.error( 'Failed to list the current directory', res['Value'][2] )
else:
# no timeout and exit code is 0
self.log.info( res['Value'][1] )
return S_OK()
#############################################################################
def __sendFinalStdOut( self, exeThread ):
"""After the Watchdog process has finished, this function sends a final
report to be presented in the StdOut in the web page via the heartbeat
mechanism.
"""
cpuConsumed = self.__getCPU()['Value']
self.log.info( 'Total CPU Consumed is: %s' % cpuConsumed[1] )
self.__setJobParam( 'TotalCPUTime(s)', cpuConsumed[0] )
normCPU = cpuConsumed[0] * self.cpuNormalizationFactor
self.__setJobParam( 'NormCPUTime(s)', normCPU )
if self.cpuNormalizationFactor:
self.log.info( 'Normalized CPU Consumed is:', normCPU )
result = exeThread.getOutput( self.maxPeekLines )
if not result['OK']:
lines = 0
appStdOut = ''
else:
lines = len( result['Value'] )
appStdOut = '\n'.join( result['Value'] )
header = 'Last %s lines of application output from JobWrapper on %s :' % ( lines, Time.toString() )
border = '=' * len( header )
cpuTotal = 'CPU Total: %s (h:m:s)' % cpuConsumed[1]
cpuTotal += " Normalized CPU Total %.1f s @ HEP'06" % normCPU
header = '\n%s\n%s\n%s\n%s\n' % ( border, header, cpuTotal, border )
appStdOut = header + appStdOut
self.log.info( appStdOut )
heartBeatDict = {}
staticParamDict = {'StandardOutput':appStdOut}
if self.jobID:
jobReport = RPCClient( 'WorkloadManagement/JobStateUpdate', timeout = 120 )
result = jobReport.sendHeartBeat( self.jobID, heartBeatDict, staticParamDict )
if not result['OK']:
self.log.error( 'Problem sending final heartbeat from JobWrapper', result['Message'] )
return
#############################################################################
def __getCPU( self ):
"""Uses os.times() to get CPU time and returns HH:MM:SS after conversion.
"""
# TODO: normalize CPU consumed via scale factor
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
self.log.info( 'EXECUTION_RESULT[CPU] in __getCPU', cpuString )
utime, stime, cutime, cstime, _elapsed = EXECUTION_RESULT['CPU']
cpuTime = utime + stime + cutime + cstime
self.log.verbose( "Total CPU time consumed = %s" % ( cpuTime ) )
result = self.__getCPUHMS( cpuTime )
return result
#############################################################################
def __getCPUHMS( self, cpuTime ):
mins, secs = divmod( cpuTime, 60 )
hours, mins = divmod( mins, 60 )
humanTime = '%02d:%02d:%02d' % ( hours, mins, secs )
self.log.verbose( 'Human readable CPU time is: %s' % humanTime )
return S_OK( ( cpuTime, humanTime ) )
#############################################################################
def resolveInputData( self ):
"""Input data is resolved here using a VO specific plugin module.
"""
self.__report( 'Running', 'Input Data Resolution', sendFlag = True )
# What is this input data? - and exit if there's no input
inputData = self.jobArgs['InputData']
if not inputData:
msg = "Job Wrapper cannot resolve local replicas of input data with null job input data parameter "
self.log.error( msg )
return S_ERROR( msg )
else:
if isinstance( inputData, basestring ):
inputData = [inputData]
lfns = [ fname.replace( 'LFN:', '' ) for fname in inputData ]
self.log.verbose( 'Job input data requirement is \n%s' % ',\n'.join( lfns ) )
# Does this site have local SEs? - not failing if it doesn't
if 'LocalSE' in self.ceArgs:
localSEList = self.ceArgs[ 'LocalSE']
else:
localSEList = gConfig.getValue( '/LocalSite/LocalSE', [] )
if not localSEList:
self.log.warn( "Job has input data requirement but no site LocalSE defined" )
else:
if isinstance( localSEList, basestring ):
localSEList = List.fromChar( localSEList )
self.log.info( "Site has the following local SEs: %s" % ', '.join( localSEList ) )
# How to get this data?
if 'InputDataModule' not in self.jobArgs:
self.log.warn( "Job has no input data resolution module specified, using the default one" )
inputDataPolicy = 'DIRAC.WorkloadManagementSystem.Client.InputDataResolution'
else:
inputDataPolicy = self.jobArgs['InputDataModule']
self.log.verbose( "Job input data resolution policy module is %s" % ( inputDataPolicy ) )
# Now doing the real stuff
optReplicas = {}
if self.optArgs:
try:
optDict, _length = DEncode.decode( self.optArgs['InputData'] )
optReplicas = optDict['Value']
self.log.info( 'Found optimizer catalog result' )
self.log.verbose( optReplicas )
except Exception, x:
self.log.warn( str( x ) )
self.log.warn( 'Optimizer information could not be converted to a dictionary will call catalog directly' )
result = self.__checkFileCatalog( lfns, optReplicas )
if not result['OK']:
self.log.info( 'Could not obtain replica information from Optimizer File Catalog information' )
self.log.warn( result )
result = self.__checkFileCatalog( lfns )
if not result['OK']:
self.log.warn( 'Could not obtain replica information from File Catalog directly' )
self.log.warn( result )
return S_ERROR( result['Message'] )
else:
resolvedData = result
else:
resolvedData = result
# add input data size to accounting report (since resolution successful)
for lfn, mdata in resolvedData['Value']['Successful'].items():
if 'Size' in mdata:
lfnSize = mdata['Size']
if not isinstance( lfnSize, long ):
try:
lfnSize = long( lfnSize )
except Exception, x:
lfnSize = 0
self.log.info( 'File size for LFN:%s was not a long integer, setting size to 0' % ( lfn ) )
self.inputDataSize += lfnSize
configDict = {'JobID':self.jobID, 'LocalSEList':localSEList, 'DiskSEList':self.diskSE, 'TapeSEList':self.tapeSE}
self.log.info( configDict )
argumentsDict = {'FileCatalog':resolvedData, 'Configuration':configDict, 'InputData':lfns, 'Job':self.jobArgs}
self.log.info( argumentsDict )
moduleFactory = ModuleFactory()
self.log.verbose( "Now starting execution of input data policy module" )
moduleInstance = moduleFactory.getModule( inputDataPolicy, argumentsDict )
if not moduleInstance['OK']:
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
if not result['OK']:
self.log.warn( 'Input data resolution failed' )
return result
return S_OK()
#############################################################################
def __checkFileCatalog( self, lfns, optReplicaInfo = None ):
"""This function returns dictionaries containing all relevant parameters
to allow data access from the relevant file catalogue. Optionally, optimizer
parameters can be supplied here but if these are not sufficient, the file catalogue
is subsequently consulted.
N.B. this will be considerably simplified when the DMS evolves to have a
generic FC interface and a single call for all available information.
"""
replicas = optReplicaInfo
if not replicas:
replicas = self.__getReplicaMetadata( lfns )
if not replicas['OK']:
return replicas
self.log.verbose( replicas )
failedGUIDs = []
for lfn, reps in replicas['Value']['Successful'].items():
if 'GUID' not in reps:
failedGUIDs.append( lfn )
if failedGUIDs:
self.log.info( 'The following file(s) were found not to have a GUID:\n%s' % ',\n'.join( failedGUIDs ) )
if failedGUIDs:
return S_ERROR( 'File metadata is not available' )
else:
return replicas
#############################################################################
def __getReplicaMetadata( self, lfns ):
""" Wrapper function to consult catalog for all necessary file metadata
and check the result.
"""
start = time.time()
repsResult = self.dm.getReplicas( lfns )
timing = time.time() - start
self.log.info( 'Replica Lookup Time: %.2f seconds ' % ( timing ) )
if not repsResult['OK']:
self.log.warn( repsResult['Message'] )
return repsResult
badLFNCount = 0
badLFNs = []
catalogResult = repsResult['Value']
for lfn, cause in catalogResult.get( 'Failed', {} ).items():
badLFNCount += 1
badLFNs.append( 'LFN:%s Problem: %s' % ( lfn, cause ) )
for lfn, replicas in catalogResult.get( 'Successful', {} ).items():
if not replicas:
badLFNCount += 1
badLFNs.append( 'LFN:%s Problem: Null replica value' % ( lfn ) )
if badLFNCount:
self.log.warn( 'Job Wrapper found %s problematic LFN(s) for job %s' % ( badLFNCount, self.jobID ) )
param = '\n'.join( badLFNs )
self.log.info( param )
self.__setJobParam( 'MissingLFNs', param )
return S_ERROR( 'Input Data Not Available' )
# Must retrieve GUIDs from LFC for files
start = time.time()
guidDict = self.fc.getFileMetadata( lfns )
timing = time.time() - start
self.log.info( 'GUID Lookup Time: %.2f seconds ' % ( timing ) )
if not guidDict['OK']:
self.log.warn( 'Failed to retrieve GUIDs from file catalog' )
self.log.warn( guidDict['Message'] )
return guidDict
failed = guidDict['Value']['Failed']
if failed:
self.log.warn( 'Could not retrieve GUIDs from catalog for the following files' )
self.log.warn( failed )
return S_ERROR( 'Missing GUIDs' )
for lfn, reps in repsResult['Value']['Successful'].items():
guidDict['Value']['Successful'][lfn].update( reps )
catResult = guidDict
return catResult
#############################################################################
def processJobOutputs( self, arguments ):
"""Outputs for a job may be treated here.
"""
# first iteration of this, no checking of wildcards or oversize sandbox files etc.
outputSandbox = self.jobArgs.get( 'OutputSandbox', [] )
if isinstance( outputSandbox, basestring ):
outputSandbox = [ outputSandbox ]
if outputSandbox:
self.log.verbose( 'OutputSandbox files are: %s' % ', '.join( outputSandbox ) )
outputData = self.jobArgs.get( 'OutputData', [] )
if outputData and isinstance( outputData, basestring ):
outputData = outputData.split( ';' )
if outputData:
self.log.verbose( 'OutputData files are: %s' % ', '.join( outputData ) )
# First resolve any wildcards for output files and work out if any files are missing
resolvedSandbox = self.__resolveOutputSandboxFiles( outputSandbox )
if not resolvedSandbox['OK']:
self.log.warn( 'Output sandbox file resolution failed:' )
self.log.warn( resolvedSandbox['Message'] )
self.__report( 'Failed', 'Resolving Output Sandbox' )
fileList = resolvedSandbox['Value']['Files']
missingFiles = resolvedSandbox['Value']['Missing']
if missingFiles:
self.jobReport.setJobParameter( 'OutputSandboxMissingFiles', ', '.join( missingFiles ), sendFlag = False )
if 'Owner' not in self.jobArgs:
msg = 'Job has no owner specified'
self.log.warn( msg )
return S_OK( msg )
# Do not overwrite in case of Error
if not self.failedFlag:
self.__report( 'Completed', 'Uploading Output Sandbox' )
uploadOutputDataInAnyCase = False
if fileList and self.jobID:
self.outputSandboxSize = getGlobbedTotalSize( fileList )
self.log.info( 'Attempting to upload Sandbox with limit:', self.sandboxSizeLimit )
sandboxClient = SandboxStoreClient()
result = sandboxClient.uploadFilesAsSandboxForJob( fileList, self.jobID,
'Output', self.sandboxSizeLimit ) # 1024*1024*10
if not result['OK']:
self.log.error( 'Output sandbox upload failed with message', result['Message'] )
outputSandboxData = result.get( 'SandboxFileName' )
if outputSandboxData:
self.log.info( 'Attempting to upload %s as output data' % ( outputSandboxData ) )
if self.failedFlag:
outputData = [outputSandboxData]
uploadOutputDataInAnyCase = True
else:
outputData.append( outputSandboxData )
self.jobReport.setJobParameter( 'OutputSandbox', 'Sandbox uploaded to grid storage', sendFlag = False )
self.jobReport.setJobParameter( 'OutputSandboxLFN',
self.__getLFNfromOutputFile( outputSandboxData )[0], sendFlag = False )
else:
self.log.info( 'Could not get SandboxFileName to attempt upload to Grid storage' )
return S_ERROR( 'Output sandbox upload failed and no file name supplied for failover to Grid storage' )
else:
# Do not overwrite in case of Error
if not self.failedFlag:
self.__report( 'Completed', 'Output Sandbox Uploaded' )
self.log.info( 'Sandbox uploaded successfully' )
if ( outputData and not self.failedFlag ) or uploadOutputDataInAnyCase:
# Do not upload outputdata if the job has failed.
# The exception is when the outputData is what was the OutputSandbox, which should be uploaded in any case
outputSE = self.jobArgs.get( 'OutputSE', self.defaultOutputSE )
if isinstance( outputSE, basestring ):
outputSE = [outputSE]
outputPath = self.jobArgs.get( 'OutputPath', self.defaultOutputPath )
if not isinstance( outputPath, basestring ):
outputPath = self.defaultOutputPath
if not outputSE and not self.defaultFailoverSE:
return S_ERROR( 'No output SEs defined in VO configuration' )
result = self.__transferOutputDataFiles( outputData, outputSE, outputPath )
if not result['OK']:
return result
return S_OK( 'Job outputs processed' )
#############################################################################
def __resolveOutputSandboxFiles( self, outputSandbox ):
"""Checks the output sandbox file list and resolves any specified wildcards.
Also tars any specified directories.
"""
missing = []
okFiles = []
for i in outputSandbox:
self.log.verbose( 'Looking at OutputSandbox file/directory/wildcard: %s' % i )
globList = glob.glob( i )
for check in globList:
if os.path.isfile( check ):
self.log.verbose( 'Found locally existing OutputSandbox file: %s' % check )
okFiles.append( check )
if os.path.isdir( check ):
self.log.verbose( 'Found locally existing OutputSandbox directory: %s' % check )
cmd = ['tar', 'cf', '%s.tar' % check, check]
result = systemCall( 60, cmd )
if not result['OK']:
self.log.error( 'Failed to create OutputSandbox tar', result['Message'] )
elif result['Value'][0]:
self.log.error( 'Failed to create OutputSandbox tar', result['Value'][2] )
if os.path.isfile( '%s.tar' % ( check ) ):
self.log.verbose( 'Appending %s.tar to OutputSandbox' % check )
okFiles.append( '%s.tar' % ( check ) )
else:
self.log.warn( 'Could not tar OutputSandbox directory: %s' % check )
missing.append( check )
for i in outputSandbox:
if not i in okFiles:
if not '%s.tar' % i in okFiles:
if not re.search( '\*', i ):
if not i in missing:
missing.append( i )
result = {'Missing':missing, 'Files':okFiles}
return S_OK( result )
#############################################################################
def __transferOutputDataFiles( self, outputData, outputSE, outputPath ):
""" Performs the upload and registration in the File Catalog(s)
"""
self.log.verbose( 'Uploading output data files' )
self.__report( 'Completed', 'Uploading Output Data' )
self.log.info( 'Output data files %s to be uploaded to %s SE' % ( ', '.join( outputData ), outputSE ) )
missing = []
uploaded = []
# Separate outputdata in the form of lfns and local files
lfnList = []
nonlfnList = []
for out in outputData:
if out.lower().find( 'lfn:' ) != -1:
lfnList.append( out )
else:
nonlfnList.append( out )
# Check whether list of outputData has a globbable pattern
globbedOutputList = List.uniqueElements( getGlobbedFiles( nonlfnList ) )
if not globbedOutputList == nonlfnList and globbedOutputList:
self.log.info( 'Found a pattern in the output data file list, files to upload are:',
', '.join( globbedOutputList ) )
nonlfnList = globbedOutputList
outputData = lfnList + nonlfnList
pfnGUID = {}
result = getGUID( outputData )
if not result['OK']:
self.log.warn( 'Failed to determine POOL GUID(s) for output file list (OK if not POOL files)',
result['Message'] )
else:
pfnGUID = result['Value']
for outputFile in outputData:
( lfn, localfile ) = self.__getLFNfromOutputFile( outputFile, outputPath )
if not os.path.exists( localfile ):
self.log.error( 'Missing specified output data file:', outputFile )
continue
# # file size
localfileSize = getGlobbedTotalSize( localfile )
self.outputDataSize += getGlobbedTotalSize( localfile )
outputFilePath = os.path.join( os.getcwd(), localfile )
# # file GUID
fileGUID = pfnGUID[localfile] if localfile in pfnGUID else None
if fileGUID:
self.log.verbose( 'Found GUID for file from POOL XML catalogue %s' % localfile )
# # file checksum
cksm = fileAdler( outputFilePath )
fileMetaDict = { "Size": localfileSize,
"LFN" : lfn,
"ChecksumType" : "Adler32",
"Checksum": cksm,
"GUID" : fileGUID }
outputSEList = self.__getSortedSEList( outputSE )
upload = self.failoverTransfer.transferAndRegisterFile( fileName = localfile,
localPath = outputFilePath,
lfn = lfn,
destinationSEList = outputSEList,
fileMetaDict = fileMetaDict,
fileCatalog = self.defaultCatalog,
masterCatalogOnly = self.masterCatalogOnlyFlag )
if upload['OK']:
self.log.info( '"%s" successfully uploaded to "%s" as "LFN:%s"' % ( localfile,
upload['Value']['uploadedSE'],
lfn ) )
uploaded.append( lfn )
continue
self.log.error( 'Could not putAndRegister file',
'%s with LFN %s to %s with GUID %s trying failover storage' % ( localfile, lfn,
', '.join( outputSEList ),
fileGUID ) )
if not self.defaultFailoverSE:
self.log.info( 'No failover SEs defined for JobWrapper,',
'cannot try to upload output file %s anywhere else.' % outputFile )
missing.append( outputFile )
continue
failoverSEs = self.__getSortedSEList( self.defaultFailoverSE )
targetSE = outputSEList[0]
result = self.failoverTransfer.transferAndRegisterFileFailover( fileName = localfile,
localPath = outputFilePath,
lfn = lfn,
targetSE = targetSE,
failoverSEList = failoverSEs,
fileMetaDict = fileMetaDict,
fileCatalog = self.defaultCatalog,
masterCatalogOnly = self.masterCatalogOnlyFlag )
if not result['OK']:
self.log.error( 'Completely failed to upload file to failover SEs', result['Message'] )
missing.append( outputFile )
else:
self.log.info( 'File %s successfully uploaded to failover storage element' % lfn )
uploaded.append( lfn )
# For files correctly uploaded must report LFNs to job parameters
if uploaded:
report = ', '.join( uploaded )
# In case the VO payload has also uploaded data using the same parameter
# name this should be checked prior to setting.
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring', timeout = 120 )
result = monitoring.getJobParameter( int( self.jobID ), 'UploadedOutputData' )
if result['OK']:
if 'UploadedOutputData' in result['Value']:
report += ', %s' % result['Value']['UploadedOutputData']
self.jobReport.setJobParameter( 'UploadedOutputData', report, sendFlag = False )
# TODO Notify the user of any output data / output sandboxes
if missing:
self.__setJobParam( 'OutputData', 'MissingFiles: %s' % ', '.join( missing ) )
self.__report( 'Failed', 'Uploading Job OutputData' )
return S_ERROR( 'Failed to upload OutputData' )
self.__report( 'Completed', 'Output Data Uploaded' )
return S_OK( 'OutputData uploaded successfully' )
#############################################################################
def __getSortedSEList( self, seList ):
""" Randomize SE, putting first those that are Local/Close to the Site
"""
if not seList:
return seList
localSEs = []
otherSEs = []
siteSEs = []
seMapping = getSEsForSite( DIRAC.siteName() )
if seMapping['OK'] and seMapping['Value']:
siteSEs = seMapping['Value']
for seName in seList:
if seName in siteSEs:
localSEs.append( seName )
else:
otherSEs.append( seName )
return List.randomize( localSEs ) + List.randomize( otherSEs )
#############################################################################
def __getLFNfromOutputFile( self, outputFile, outputPath = '' ):
"""Provides a generic convention for VO output data
files if no path is specified.
"""
if not re.search( '^LFN:', outputFile ):
localfile = outputFile
initial = self.owner[:1]
vo = getVOForGroup( self.userGroup )
if not vo:
vo = 'dirac'
ops = Operations( vo = vo )
user_prefix = ops.getValue( "LFNUserPrefix", 'user' )
basePath = '/' + vo + '/' + user_prefix + '/' + initial + '/' + self.owner
if outputPath:
# If output path is given, append it to the user path and put output files in this directory
if outputPath.startswith( '/' ):
outputPath = outputPath[1:]
else:
# By default the output path is constructed from the job id
subdir = str( self.jobID / 1000 )
outputPath = subdir + '/' + str( self.jobID )
lfn = os.path.join( basePath, outputPath, os.path.basename( localfile ) )
else:
# if LFN is given, take it as it is
localfile = os.path.basename( outputFile.replace( "LFN:", "" ) )
lfn = outputFile.replace( "LFN:", "" )
return ( lfn, localfile )
#############################################################################
def transferInputSandbox( self, inputSandbox ):
"""Downloads the input sandbox for the job
"""
sandboxFiles = []
registeredISB = []
lfns = []
self.__report( 'Running', 'Downloading InputSandbox' )
if not isinstance( inputSandbox, ( list, tuple ) ):
inputSandbox = [ inputSandbox ]
for isb in inputSandbox:
if isb.find( "LFN:" ) == 0 or isb.find( "lfn:" ) == 0:
lfns.append( isb )
else:
if isb.find( "SB:" ) == 0:
registeredISB.append( isb )
else:
sandboxFiles.append( os.path.basename( isb ) )
self.log.info( 'Downloading InputSandbox for job %s: %s' % ( self.jobID, ', '.join( sandboxFiles ) ) )
if os.path.exists( '%s/inputsandbox' % ( self.root ) ):
# This is a debugging tool, get the file from local storage to debug Job Wrapper
sandboxFiles.append( 'jobDescription.xml' )
for inputFile in sandboxFiles:
if os.path.exists( '%s/inputsandbox/%s' % ( self.root, inputFile ) ):
self.log.info( 'Getting InputSandbox file %s from local directory for testing' % ( inputFile ) )
shutil.copy( self.root + '/inputsandbox/' + inputFile, inputFile )
result = S_OK( sandboxFiles )
else:
if registeredISB:
for isb in registeredISB:
self.log.info( "Downloading Input SandBox %s" % isb )
result = SandboxStoreClient().downloadSandbox( isb )
if not result[ 'OK' ]:
self.__report( 'Running', 'Failed Downloading InputSandbox' )
return S_ERROR( "Cannot download Input sandbox %s: %s" % ( isb, result[ 'Message' ] ) )
else:
self.inputSandboxSize += result[ 'Value' ]
if lfns:
self.log.info( "Downloading Input SandBox LFNs, number of files to get", len( lfns ) )
self.__report( 'Running', 'Downloading InputSandbox LFN(s)' )
lfns = [fname.replace( 'LFN:', '' ).replace( 'lfn:', '' ) for fname in lfns]
download = self.dm.getFile( lfns )
if not download['OK']:
self.log.warn( download )
self.__report( 'Running', 'Failed Downloading InputSandbox LFN(s)' )
return S_ERROR( download['Message'] )
failed = download['Value']['Failed']
if failed:
self.log.warn( 'Could not download InputSandbox LFN(s)' )
self.log.warn( failed )
return S_ERROR( str( failed ) )
for lfn in lfns:
if os.path.exists( '%s/%s' % ( self.root, os.path.basename( download['Value']['Successful'][lfn] ) ) ):
sandboxFiles.append( os.path.basename( download['Value']['Successful'][lfn] ) )
userFiles = sandboxFiles + [ os.path.basename( lfn ) for lfn in lfns ]
for possibleTarFile in userFiles:
if not os.path.exists( possibleTarFile ) :
continue
try:
if os.path.isfile( possibleTarFile ) and tarfile.is_tarfile( possibleTarFile ):
self.log.info( 'Unpacking input sandbox file %s' % ( possibleTarFile ) )
tarFile = tarfile.open( possibleTarFile, 'r' )
for member in tarFile.getmembers():
tarFile.extract( member, os.getcwd() )
except Exception, x :
return S_ERROR( 'Could not untar %s with exception %s' % ( possibleTarFile, str( x ) ) )
if userFiles:
self.inputSandboxSize = getGlobbedTotalSize( userFiles )
self.log.info( "Total size of input sandbox:",
"%0.2f MiB (%s bytes)" % ( self.inputSandboxSize / 1048576.0, self.inputSandboxSize ) )
return S_OK( 'InputSandbox downloaded' )
#############################################################################
def finalize( self, arguments ):
"""Perform any final actions to clean up after job execution.
"""
self.log.info( 'Running JobWrapper finalization' )
# find if there are pending failover requests
requests = self.__getRequestFiles()
outputDataRequest = self.failoverTransfer.getRequest()
requestFlag = len( requests ) > 0 or not outputDataRequest.isEmpty()
if self.failedFlag and requestFlag:
self.log.info( 'Application finished with errors and there are pending requests for this job.' )
self.__report( 'Failed', 'Pending Requests' )
elif not self.failedFlag and requestFlag:
self.log.info( 'Application finished successfully with pending requests for this job.' )
self.__report( 'Completed', 'Pending Requests' )
elif self.failedFlag and not requestFlag:
self.log.info( 'Application finished with errors with no pending requests.' )
self.__report( 'Failed' )
elif not self.failedFlag and not requestFlag:
self.log.info( 'Application finished successfully with no pending requests for this job.' )
self.__report( 'Done', 'Execution Complete' )
self.sendFailoverRequest()
self.__cleanUp()
if self.failedFlag:
return 1
else:
return 0
#############################################################################
def sendJobAccounting( self, status = '', minorStatus = '' ):
"""Send WMS accounting data.
"""
if self.jobAccountingSent:
return S_OK()
if status:
self.wmsMajorStatus = status
if minorStatus:
self.wmsMinorStatus = minorStatus
self.accountingReport.setEndTime()
# CPUTime and ExecTime
if not 'CPU' in EXECUTION_RESULT:
# If the payload has not started execution (error with input data, SW, SB,...)
# Execution result is not filled use self.initialTiming
self.log.info( 'EXECUTION_RESULT[CPU] missing in sendJobAccounting' )
finalStat = os.times()
EXECUTION_RESULT['CPU'] = []
for i in range( len( finalStat ) ):
EXECUTION_RESULT['CPU'].append( finalStat[i] - self.initialTiming[i] )
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
self.log.info( 'EXECUTION_RESULT[CPU] in sendJobAccounting', cpuString )
utime, stime, cutime, cstime, elapsed = EXECUTION_RESULT['CPU']
cpuTime = utime + stime + cutime + cstime
execTime = elapsed
diskSpaceConsumed = getGlobbedTotalSize( os.path.join( self.root, str( self.jobID ) ) )
# Fill the data
acData = {
'User' : self.owner,
'UserGroup' : self.userGroup,
'JobGroup' : self.jobGroup,
'JobType' : self.jobType,
'JobClass' : self.jobClass,
'ProcessingType' : self.processingType,
'FinalMajorStatus' : self.wmsMajorStatus,
'FinalMinorStatus' : self.wmsMinorStatus,
'CPUTime' : cpuTime,
# Based on the factor to convert raw CPU to Normalized units (based on the CPU Model)
'NormCPUTime' : cpuTime * self.cpuNormalizationFactor,
'ExecTime' : execTime,
'InputDataSize' : self.inputDataSize,
'OutputDataSize' : self.outputDataSize,
'InputDataFiles' : self.inputDataFiles,
'OutputDataFiles' : self.outputDataFiles,
'DiskSpace' : diskSpaceConsumed,
'InputSandBoxSize' : self.inputSandboxSize,
'OutputSandBoxSize' : self.outputSandboxSize,
'ProcessedEvents' : self.processedEvents
}
self.log.verbose( 'Accounting Report is:' )
self.log.verbose( acData )
self.accountingReport.setValuesFromDict( acData )
result = self.accountingReport.commit()
# Even if it fails a failover request will be created
self.jobAccountingSent = True
return result
#############################################################################
def sendFailoverRequest( self, status = '', minorStatus = '' ):
""" Create and send a combined job failover request if any
"""
request = Request()
requestName = 'job_%s' % self.jobID
if 'JobName' in self.jobArgs:
# To make the request names more appealing for users
jobName = self.jobArgs['JobName']
if isinstance( jobName, basestring ) and jobName:
jobName = jobName.replace( ' ', '' ).replace( '(', '' ).replace( ')', '' ).replace( '"', '' )
jobName = jobName.replace( '.', '' ).replace( '{', '' ).replace( '}', '' ).replace( ':', '' )
requestName = '%s_%s' % ( jobName, requestName )
request.RequestName = requestName.replace( '"', '' )
request.JobID = self.jobID
request.SourceComponent = "Job_%s" % self.jobID
# JobReport part first
result = self.jobReport.generateForwardDISET()
if result['OK']:
if isinstance( result["Value"], Operation ):
self.log.info( 'Adding a job state update DISET operation to the request' )
request.addOperation( result["Value"] )
else:
self.log.warn( 'JobReportFailure', "Could not generate a forwardDISET operation: %s" % result['Message'] )
self.log.warn( 'JobReportFailure', "The job won't fail, but the jobLogging info might be incomplete" )
# Accounting part
if not self.jobID:
self.log.debug( 'No accounting to be sent since running locally' )
else:
result = self.sendJobAccounting( status, minorStatus )
if not result['OK']:
self.log.warn( 'JobAccountingFailure', "Could not send job accounting with result: \n%s" % result['Message'] )
self.log.warn( 'JobAccountingFailure', "Trying to build a failover request" )
if 'rpcStub' in result:
self.log.verbose( "Adding accounting report to failover request object" )
forwardDISETOp = Operation()
forwardDISETOp.Type = "ForwardDISET"
forwardDISETOp.Arguments = DEncode.encode( result['rpcStub'] )
request.addOperation( forwardDISETOp )
self.log.verbose( "Added accounting report to failover request object" )
else:
self.log.warn( 'JobAccountingFailure', "No rpcStub found to construct failover request for job accounting report" )
self.log.warn( 'JobAccountingFailure', "The job won't fail, but the accounting for this job won't be sent" )
# Failover transfer requests
for storedOperation in self.failoverTransfer.request:
request.addOperation( storedOperation )
# Any other requests in the current directory
rfiles = self.__getRequestFiles()
for rfname in rfiles:
rFile = open( rfname, 'r' )
requestStored = Request( json.load( rFile ) )
rFile.close()
for storedOperation in requestStored:
request.addOperation( storedOperation )
if len( request ):
# The request is ready, send it now
isValid = RequestValidator().validate( request )
if not isValid["OK"]:
self.log.error( "Failover request is not valid", isValid["Message"] )
else:
# We try several times to put the request before failing the job: it's very important that requests go through,
# or the job will be in an unclear status (workflow ok, but, e.g., the output files won't be registered).
# It's a poor man solution, but I don't see fancy alternatives
for counter in range( 10 ):
requestClient = ReqClient()
result = requestClient.putRequest( request )
if result['OK']:
resDigest = request.getDigest()
digest = resDigest['Value']
self.jobReport.setJobParameter( 'PendingRequest', digest )
break
else:
self.log.error( 'Failed to set failover request',
'%d: %s. Re-trying...' % ( counter, result['Message'] ) )
del requestClient
time.sleep( counter ** 3 )
if not result['OK']:
self.__report( 'Failed', 'Failover Request Failed' )
return result
return S_OK()
#############################################################################
def __getRequestFiles( self ):
"""Simple wrapper to return the list of request files.
"""
return glob.glob( '*_request.json' )
#############################################################################
def __cleanUp( self ):
"""Cleans up after job processing. Can be switched off via environment
variable DO_NOT_DO_JOB_CLEANUP or by JobWrapper configuration option.
"""
# Environment variable is a feature for DIRAC (helps local debugging).
if 'DO_NOT_DO_JOB_CLEANUP' in os.environ or not self.cleanUpFlag:
cleanUp = False
else:
cleanUp = True
os.chdir( self.root )
if cleanUp:
self.log.verbose( 'Cleaning up job working directory' )
if os.path.exists( str( self.jobID ) ):
shutil.rmtree( str( self.jobID ) )
#############################################################################
def __report( self, status = '', minorStatus = '', sendFlag = False ):
"""Wraps around setJobStatus of state update client
"""
if status:
self.wmsMajorStatus = status
if minorStatus:
self.wmsMinorStatus = minorStatus
jobStatus = self.jobReport.setJobStatus( status = status, minor = minorStatus, sendFlag = sendFlag )
if not jobStatus['OK']:
self.log.warn( jobStatus['Message'] )
if self.jobID:
self.log.verbose( 'setJobStatus(%s,%s,%s,%s)' % ( self.jobID, status, minorStatus, 'JobWrapper' ) )
return jobStatus
#############################################################################
def __setJobParam( self, name, value, sendFlag = False ):
"""Wraps around setJobParameter of state update client
"""
jobParam = self.jobReport.setJobParameter( str( name ), str( value ), sendFlag )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
if self.jobID:
self.log.verbose( 'setJobParameter(%s,%s,%s)' % ( self.jobID, name, value ) )
return jobParam
#############################################################################
def __setJobParamList( self, value, sendFlag = False ):
"""Wraps around setJobParameters of state update client
"""
jobParam = self.jobReport.setJobParameters( value, sendFlag )
if not jobParam['OK']:
self.log.warn( jobParam['Message'] )
if self.jobID:
self.log.verbose( 'setJobParameters(%s,%s)' % ( self.jobID, value ) )
return jobParam
###############################################################################
###############################################################################
class ExecutionThread( threading.Thread ):
#############################################################################
def __init__( self, spObject, cmd, maxPeekLines, stdoutFile, stderrFile, exeEnv ):
threading.Thread.__init__( self )
self.cmd = cmd
self.spObject = spObject
self.outputLines = []
self.maxPeekLines = maxPeekLines
self.stdout = stdoutFile
self.stderr = stderrFile
self.exeEnv = exeEnv
#############################################################################
def run( self ):
# FIXME: why local instances of object variables are created?
cmd = self.cmd
spObject = self.spObject
start = time.time()
initialStat = os.times()
output = spObject.systemCall( cmd, env = self.exeEnv, callbackFunction = self.sendOutput, shell = True )
EXECUTION_RESULT['Thread'] = output
timing = time.time() - start
EXECUTION_RESULT['Timing'] = timing
finalStat = os.times()
EXECUTION_RESULT['CPU'] = []
for i in range( len( finalStat ) ):
EXECUTION_RESULT['CPU'].append( finalStat[i] - initialStat[i] )
cpuString = ' '.join( ['%.2f' % x for x in EXECUTION_RESULT['CPU'] ] )
gLogger.info( 'EXECUTION_RESULT[CPU] after Execution of spObject.systemCall', cpuString )
gLogger.info( 'EXECUTION_RESULT[Thread] after Execution of spObject.systemCall', str( EXECUTION_RESULT['Thread'] ) )
#############################################################################
def getCurrentPID( self ):
return self.spObject.getChildPID()
#############################################################################
def sendOutput( self, stdid, line ):
if stdid == 0 and self.stdout:
outputFile = open( self.stdout, 'a+' )
print >> outputFile, line
outputFile.close()
elif stdid == 1 and self.stderr:
errorFile = open( self.stderr, 'a+' )
print >> errorFile, line
errorFile.close()
self.outputLines.append( line )
size = len( self.outputLines )
if size > self.maxPeekLines:
# reduce max size of output peeking
self.outputLines.pop( 0 )
#############################################################################
def getOutput( self, lines = 0 ):
if self.outputLines:
# restrict to smaller number of lines for regular
# peeking by the watchdog
# FIXME: this is multithread, thus single line would be better
if lines:
size = len( self.outputLines )
cut = size - lines
self.outputLines = self.outputLines[cut:]
return S_OK( self.outputLines )
return S_ERROR( 'No Job output found' )
def rescheduleFailedJob( jobID, message, jobReport = None ):
rescheduleResult = 'Rescheduled'
try:
gLogger.warn( 'Failure during %s' % ( message ) )
# Setting a job parameter does not help since the job will be rescheduled,
# instead set the status with the cause and then another status showing the
# reschedule operation.
if not jobReport:
gLogger.info( 'Creating a new JobReport Object' )
jobReport = JobReport( int( jobID ), 'JobWrapper' )
jobReport.setApplicationStatus( 'Failed %s ' % message, sendFlag = False )
jobReport.setJobStatus( 'Rescheduled', message, sendFlag = False )
# We must send Job States and Parameters before it gets reschedule
jobReport.sendStoredStatusInfo()
jobReport.sendStoredJobParameters()
gLogger.info( 'Job will be rescheduled after exception during execution of the JobWrapper' )
jobManager = RPCClient( 'WorkloadManagement/JobManager' )
result = jobManager.rescheduleJob( int( jobID ) )
if not result['OK']:
gLogger.warn( result['Message'] )
if 'Maximum number of reschedulings is reached' in result['Message']:
rescheduleResult = 'Failed'
return rescheduleResult
except Exception:
gLogger.exception( 'JobWrapperTemplate failed to reschedule Job' )
return 'Failed'
# EOF
|
vmendez/DIRAC
|
WorkloadManagementSystem/JobWrapper/JobWrapper.py
|
Python
|
gpl-3.0
| 62,593
|
[
"DIRAC"
] |
39e6e3c6c339d81d1ab84219511b5b701a72be053bac1d506b785309abcbd852
|
"""
Universe configuration builder.
"""
import os
import sys
import logging
import logging.config
from optparse import OptionParser
import ConfigParser
from galaxy.util import string_as_bool, listify
log = logging.getLogger( __name__ )
def resolve_path( path, root ):
"""If 'path' is relative make absolute by prepending 'root'"""
if not( os.path.isabs( path ) ):
path = os.path.join( root, path )
return path
class ConfigurationError( Exception ):
pass
class Configuration( object ):
def __init__( self, **kwargs ):
self.config_dict = kwargs
self.root = kwargs.get( 'root_dir', '.' )
# Resolve paths of other config files
self.__parse_config_file_options( kwargs )
# Collect the umask and primary gid from the environment
self.umask = os.umask( 077 ) # get the current umask
os.umask( self.umask ) # can't get w/o set, so set it back
self.gid = os.getgid() # if running under newgrp(1) we'll need to fix the group of data created on the cluster
# Database related configuration
self.database = resolve_path( kwargs.get( "database_file", "database/community.sqlite" ), self.root )
self.database_connection = kwargs.get( "database_connection", False )
self.database_engine_options = get_database_engine_options( kwargs )
self.database_create_tables = string_as_bool( kwargs.get( "database_create_tables", "True" ) )
# Where dataset files are stored
self.file_path = resolve_path( kwargs.get( "file_path", "database/community_files" ), self.root )
self.new_file_path = resolve_path( kwargs.get( "new_file_path", "database/tmp" ), self.root )
self.cookie_path = kwargs.get( "cookie_path", "/" )
self.enable_quotas = string_as_bool( kwargs.get( 'enable_quotas', False ) )
self.test_conf = resolve_path( kwargs.get( "test_conf", "" ), self.root )
self.id_secret = kwargs.get( "id_secret", "USING THE DEFAULT IS NOT SECURE!" )
# Tool stuff
self.tool_filters = listify( kwargs.get( "tool_filters", [] ) )
self.tool_label_filters = listify( kwargs.get( "tool_label_filters", [] ) )
self.tool_section_filters = listify( kwargs.get( "tool_section_filters", [] ) )
self.tool_path = resolve_path( kwargs.get( "tool_path", "tools" ), self.root )
self.tool_secret = kwargs.get( "tool_secret", "" )
self.tool_data_path = resolve_path( kwargs.get( "tool_data_path", "shed-tool-data" ), os.getcwd() )
self.integrated_tool_panel_config = resolve_path( kwargs.get( 'integrated_tool_panel_config', 'integrated_tool_panel.xml' ), self.root )
self.builds_file_path = resolve_path( kwargs.get( "builds_file_path", os.path.join( self.tool_data_path, 'shared', 'ucsc', 'builds.txt') ), self.root )
self.len_file_path = resolve_path( kwargs.get( "len_file_path", os.path.join( self.tool_data_path, 'shared','ucsc','chrom') ), self.root )
self.ftp_upload_dir = kwargs.get( 'ftp_upload_dir', None )
# Install and test framework for testing tools contained in repositories.
self.num_tool_test_results_saved = kwargs.get( 'num_tool_test_results_saved', 5 )
# Location for dependencies
if 'tool_dependency_dir' in kwargs:
self.tool_dependency_dir = resolve_path( kwargs.get( "tool_dependency_dir" ), self.root )
self.use_tool_dependencies = True
else:
self.tool_dependency_dir = None
self.use_tool_dependencies = False
self.update_integrated_tool_panel = False
# Galaxy flavor Docker Image
self.enable_galaxy_flavor_docker_image = string_as_bool( kwargs.get( "enable_galaxy_flavor_docker_image", "False" ) )
self.use_remote_user = string_as_bool( kwargs.get( "use_remote_user", "False" ) )
self.user_activation_on = kwargs.get( 'user_activation_on', None )
self.activation_grace_period = kwargs.get( 'activation_grace_period', None )
self.inactivity_box_content = kwargs.get( 'inactivity_box_content', None )
self.registration_warning_message = kwargs.get( 'registration_warning_message', None )
self.terms_url = kwargs.get( 'terms_url', None )
self.blacklist_location = kwargs.get( 'blacklist_file', None )
self.blacklist_content = None
self.remote_user_maildomain = kwargs.get( "remote_user_maildomain", None )
self.remote_user_header = kwargs.get( "remote_user_header", 'HTTP_REMOTE_USER' )
self.remote_user_logout_href = kwargs.get( "remote_user_logout_href", None )
self.require_login = string_as_bool( kwargs.get( "require_login", "False" ) )
self.allow_user_creation = string_as_bool( kwargs.get( "allow_user_creation", "True" ) )
self.allow_user_deletion = string_as_bool( kwargs.get( "allow_user_deletion", "False" ) )
self.enable_openid = string_as_bool( kwargs.get( 'enable_openid', False ) )
self.template_path = resolve_path( kwargs.get( "template_path", "templates" ), self.root )
self.template_cache = resolve_path( kwargs.get( "template_cache_path", "database/compiled_templates/community" ), self.root )
self.admin_users = kwargs.get( "admin_users", "" )
self.admin_users_list = [u.strip() for u in self.admin_users.split(',') if u]
self.sendmail_path = kwargs.get('sendmail_path',"/usr/sbin/sendmail")
self.mailing_join_addr = kwargs.get('mailing_join_addr',"galaxy-announce-join@bx.psu.edu")
self.error_email_to = kwargs.get( 'error_email_to', None )
self.smtp_server = kwargs.get( 'smtp_server', None )
self.smtp_username = kwargs.get( 'smtp_username', None )
self.smtp_password = kwargs.get( 'smtp_password', None )
self.start_job_runners = kwargs.get( 'start_job_runners', None )
self.email_from = kwargs.get( 'email_from', None )
self.nginx_upload_path = kwargs.get( 'nginx_upload_path', False )
self.log_actions = string_as_bool( kwargs.get( 'log_actions', 'False' ) )
self.brand = kwargs.get( 'brand', None )
# Configuration for the message box directly below the masthead.
self.message_box_visible = kwargs.get( 'message_box_visible', False )
self.message_box_content = kwargs.get( 'message_box_content', None )
self.message_box_class = kwargs.get( 'message_box_class', 'info' )
self.support_url = kwargs.get( 'support_url', 'https://wiki.galaxyproject.org/Support' )
self.wiki_url = kwargs.get( 'wiki_url', 'https://wiki.galaxyproject.org/' )
self.blog_url = kwargs.get( 'blog_url', None )
self.biostar_url = kwargs.get( 'biostar_url', None )
self.screencasts_url = kwargs.get( 'screencasts_url', None )
self.log_events = False
self.cloud_controller_instance = False
self.server_name = ''
self.job_manager = ''
self.default_job_handlers = []
self.default_cluster_job_runner = 'local:///'
self.job_handlers = []
self.tool_handlers = []
self.tool_runners = []
# Error logging with sentry
self.sentry_dsn = kwargs.get( 'sentry_dsn', None )
# Where the tool shed hgweb.config file is stored - the default is the Galaxy installation directory.
self.hgweb_config_dir = resolve_path( kwargs.get( 'hgweb_config_dir', '' ), self.root )
# Proxy features
self.apache_xsendfile = kwargs.get( 'apache_xsendfile', False )
self.nginx_x_accel_redirect_base = kwargs.get( 'nginx_x_accel_redirect_base', False )
self.drmaa_external_runjob_script = kwargs.get('drmaa_external_runjob_script', None )
# Parse global_conf and save the parser
global_conf = kwargs.get( 'global_conf', None )
global_conf_parser = ConfigParser.ConfigParser()
self.global_conf_parser = global_conf_parser
if global_conf and "__file__" in global_conf:
global_conf_parser.read(global_conf['__file__'])
self.running_functional_tests = string_as_bool( kwargs.get( 'running_functional_tests', False ) )
self.citation_cache_type = kwargs.get( "citation_cache_type", "file" )
self.citation_cache_data_dir = resolve_path( kwargs.get( "citation_cache_data_dir", "database/tool_shed_citations/data" ), self.root )
self.citation_cache_lock_dir = resolve_path( kwargs.get( "citation_cache_lock_dir", "database/tool_shed_citations/locks" ), self.root )
def __parse_config_file_options( self, kwargs ):
defaults = dict(
datatypes_config_file = [ 'config/datatypes_conf.xml', 'datatypes_conf.xml', 'config/datatypes_conf.xml.sample' ],
shed_tool_data_table_config = [ 'shed_tool_data_table_conf.xml', 'config/shed_tool_data_table_conf.xml' ],
)
listify_defaults = dict(
tool_data_table_config_path = [ 'config/tool_data_table_conf.xml', 'tool_data_table_conf.xml', 'config/tool_data_table_conf.xml.sample' ],
)
for var, defaults in defaults.items():
if kwargs.get( var, None ) is not None:
path = kwargs.get( var )
else:
for default in defaults:
if os.path.exists( resolve_path( default, self.root ) ):
path = default
break
else:
path = defaults[-1]
setattr( self, var, resolve_path( path, self.root ) )
for var, defaults in listify_defaults.items():
paths = []
if kwargs.get( var, None ) is not None:
paths = listify( kwargs.get( var ) )
else:
for default in defaults:
for path in listify( default ):
if not os.path.exists( resolve_path( path, self.root ) ):
break
else:
paths = listify( default )
break
else:
paths = listify( defaults[-1] )
setattr( self, var, [ resolve_path( x, self.root ) for x in paths ] )
# Backwards compatibility for names used in too many places to fix
self.datatypes_config = self.datatypes_config_file
def get( self, key, default ):
return self.config_dict.get( key, default )
def get_bool( self, key, default ):
if key in self.config_dict:
return string_as_bool( self.config_dict[key] )
else:
return default
def check( self ):
# Check that required directories exist.
paths_to_check = [ self.root, self.file_path, self.hgweb_config_dir, self.tool_data_path, self.template_path ]
for path in paths_to_check:
if path not in [ None, False ] and not os.path.isdir( path ):
try:
os.makedirs( path )
except Exception, e:
raise ConfigurationError( "Unable to create missing directory: %s\n%s" % ( path, e ) )
# Create the directories that it makes sense to create.
for path in self.file_path, \
self.template_cache, \
os.path.join( self.tool_data_path, 'shared', 'jars' ):
if path not in [ None, False ] and not os.path.isdir( path ):
try:
os.makedirs( path )
except Exception, e:
raise ConfigurationError( "Unable to create missing directory: %s\n%s" % ( path, e ) )
# Check that required files exist.
if not os.path.isfile( self.datatypes_config ):
raise ConfigurationError( "File not found: %s" % self.datatypes_config )
def is_admin_user( self, user ):
"""
Determine if the provided user is listed in `admin_users`.
"""
admin_users = self.get( "admin_users", "" ).split( "," )
return user is not None and user.email in admin_users
def get_database_engine_options( kwargs ):
"""
Allow options for the SQLAlchemy database engine to be passed by using
the prefix "database_engine_option".
"""
conversions = {
'convert_unicode': string_as_bool,
'pool_timeout': int,
'echo': string_as_bool,
'echo_pool': string_as_bool,
'pool_recycle': int,
'pool_size': int,
'max_overflow': int,
'pool_threadlocal': string_as_bool,
'server_side_cursors': string_as_bool
}
prefix = "database_engine_option_"
prefix_len = len( prefix )
rval = {}
for key, value in kwargs.iteritems():
if key.startswith( prefix ):
key = key[prefix_len:]
if key in conversions:
value = conversions[key](value)
rval[ key ] = value
return rval
def configure_logging( config ):
"""
Allow some basic logging configuration to be read from the cherrpy
config.
"""
# PasteScript will have already configured the logger if the appropriate
# sections were found in the config file, so we do nothing if the
# config has a loggers section, otherwise we do some simple setup
# using the 'log_*' values from the config.
if config.global_conf_parser.has_section( "loggers" ):
return
format = config.get( "log_format", "%(name)s %(levelname)s %(asctime)s %(message)s" )
level = logging._levelNames[ config.get( "log_level", "DEBUG" ) ]
destination = config.get( "log_destination", "stdout" )
log.info( "Logging at '%s' level to '%s'" % ( level, destination ) )
# Get root logger
root = logging.getLogger()
# Set level
root.setLevel( level )
# Turn down paste httpserver logging
if level <= logging.DEBUG:
logging.getLogger( "paste.httpserver.ThreadPool" ).setLevel( logging.WARN )
# Remove old handlers
for h in root.handlers[:]:
root.removeHandler(h)
# Create handler
if destination == "stdout":
handler = logging.StreamHandler( sys.stdout )
else:
handler = logging.FileHandler( destination )
# Create formatter
formatter = logging.Formatter( format )
# Hook everything up
handler.setFormatter( formatter )
root.addHandler( handler )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/tool_shed/config.py
|
Python
|
gpl-3.0
| 14,329
|
[
"Galaxy"
] |
b869f6024a0b7334129cc60d215ca5429c554c0dcf8864daa832a7467d9eb759
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the unique domains visited analysis plugin."""
import unittest
from plaso.analysis import unique_domains_visited
from plaso.containers import reports
from plaso.lib import definitions
from tests.analysis import test_lib
class UniqueDomainsPluginTest(test_lib.AnalysisPluginTestCase):
"""Tests for the unique domains analysis plugin."""
_TEST_EVENTS = [
{'data_type': 'chrome:history:file_downloaded',
'domain': 'firstevent.com',
'parser': 'sqlite/chrome_history',
'path': '/1/index.html',
'timestamp': '2015-01-01 01:00:00',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'url': 'https://firstevent.com/1/index.html'},
{'data_type': 'firefox:places:page_visited',
'domain': 'secondevent.net',
'parser': 'sqlite/firefox_history',
'path': '/2/index.html',
'timestamp': '2015-02-02 02:00:00',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'url': 'https://secondevent.net/2/index.html'},
{'data_type': 'msiecf:redirected',
'domain': 'thirdevent.org',
'parser': 'msiecf',
'path': '/3/index.html',
'timestamp': '2015-03-03 03:00:00',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'url': 'https://thirdevent.org/3/index.html'},
{'data_type': 'safari:history:visit',
'domain': 'fourthevent.co',
'parser': 'safari_history',
'path': '/4/index.html',
'timestamp': '2015-04-04 04:00:00',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'url': 'https://fourthevent.co/4/index.html'}]
def testExamineEventAndCompileReport(self):
"""Tests the ExamineEvent and CompileReport functions."""
plugin = unique_domains_visited.UniqueDomainsVisitedPlugin()
storage_writer = self._AnalyzeEvents(self._TEST_EVENTS, plugin)
number_of_reports = storage_writer.GetNumberOfAttributeContainers(
'analysis_report')
self.assertEqual(number_of_reports, 1)
analysis_report = storage_writer.GetAttributeContainerByIndex(
reports.AnalysisReport.CONTAINER_TYPE, 0)
self.assertEqual(analysis_report.analysis_counter['firstevent.com'], 1)
self.assertEqual(analysis_report.analysis_counter['secondevent.net'], 1)
self.assertEqual(analysis_report.analysis_counter['thirdevent.org'], 1)
self.assertEqual(analysis_report.analysis_counter['fourthevent.co'], 1)
if __name__ == '__main__':
unittest.main()
|
joachimmetz/plaso
|
tests/analysis/unique_domains_visited.py
|
Python
|
apache-2.0
| 2,525
|
[
"VisIt"
] |
72275f4e0b28c0e5d2ac8898a55df67c8caf54ff09ea38a84c25b4468eab8791
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
# Jeremy Kolb <jkolb@brandeis.edu>
import gl_XML, glX_XML, glX_proto_common, license
import sys, getopt, copy, string
def convertStringForXCB(str):
tmp = ""
special = [ "ARB" ]
i = 0
while i < len(str):
if str[i:i+3] in special:
tmp = '%s_%s' % (tmp, string.lower(str[i:i+3]))
i = i + 2;
elif str[i].isupper():
tmp = '%s_%s' % (tmp, string.lower(str[i]))
else:
tmp = '%s%s' % (tmp, str[i])
i += 1
return tmp
def hash_pixel_function(func):
"""Generate a 'unique' key for a pixel function. The key is based on
the parameters written in the command packet. This includes any
padding that might be added for the original function and the 'NULL
image' flag."""
h = ""
hash_pre = ""
hash_suf = ""
for param in func.parameterIterateGlxSend():
if param.is_image():
[dim, junk, junk, junk, junk] = param.get_dimensions()
d = (dim + 1) & ~1
hash_pre = "%uD%uD_" % (d - 1, d)
if param.img_null_flag:
hash_suf = "_NF"
h += "%u" % (param.size())
if func.pad_after(param):
h += "4"
n = func.name.replace("%uD" % (dim), "")
n = "__glx_%s_%uD%uD" % (n, d - 1, d)
h = hash_pre + h + hash_suf
return [h, n]
class glx_pixel_function_stub(glX_XML.glx_function):
"""Dummy class used to generate pixel "utility" functions that are
shared by multiple dimension image functions. For example, these
objects are used to generate shared functions used to send GLX
protocol for TexImage1D and TexImage2D, TexSubImage1D and
TexSubImage2D, etc."""
def __init__(self, func, name):
# The parameters to the utility function are the same as the
# parameters to the real function except for the added "pad"
# parameters.
self.name = name
self.images = []
self.parameters = []
self.parameters_by_name = {}
for _p in func.parameterIterator():
p = copy.copy(_p)
self.parameters.append(p)
self.parameters_by_name[ p.name ] = p
if p.is_image():
self.images.append(p)
p.height = "height"
if p.img_yoff == None:
p.img_yoff = "yoffset"
if p.depth:
if p.extent == None:
p.extent = "extent"
if p.img_woff == None:
p.img_woff = "woffset"
pad_name = func.pad_after(p)
if pad_name:
pad = copy.copy(p)
pad.name = pad_name
self.parameters.append(pad)
self.parameters_by_name[ pad.name ] = pad
self.return_type = func.return_type
self.glx_rop = ~0
self.glx_sop = 0
self.glx_vendorpriv = 0
self.glx_doubles_in_order = func.glx_doubles_in_order
self.vectorequiv = None
self.output = None
self.can_be_large = func.can_be_large
self.reply_always_array = func.reply_always_array
self.dimensions_in_reply = func.dimensions_in_reply
self.img_reset = None
self.server_handcode = 0
self.client_handcode = 0
self.ignore = 0
self.count_parameter_list = func.count_parameter_list
self.counter_list = func.counter_list
self.offsets_calculated = 0
return
class PrintGlxProtoStubs(glX_proto_common.glx_print_proto):
def __init__(self):
glX_proto_common.glx_print_proto.__init__(self)
self.name = "glX_proto_send.py (from Mesa)"
self.license = license.bsd_license_template % ( "(C) Copyright IBM Corporation 2004, 2005", "IBM")
self.last_category = ""
self.generic_sizes = [3, 4, 6, 8, 12, 16, 24, 32]
self.pixel_stubs = {}
self.debug = 0
return
def printRealHeader(self):
print ''
print '#include <GL/gl.h>'
print '#include "indirect.h"'
print '#include "glxclient.h"'
print '#include "indirect_size.h"'
print '#include "glapi.h"'
print '#include <GL/glxproto.h>'
print '#include <X11/Xlib-xcb.h>'
print '#include <xcb/xcb.h>'
print '#include <xcb/glx.h>'
print ''
print '#define __GLX_PAD(n) (((n) + 3) & ~3)'
print ''
self.printFastcall()
self.printNoinline()
print ''
print '#ifndef __GNUC__'
print '# define __builtin_expect(x, y) x'
print '#endif'
print ''
print '/* If the size and opcode values are known at compile-time, this will, on'
print ' * x86 at least, emit them with a single instruction.'
print ' */'
print '#define emit_header(dest, op, size) \\'
print ' do { union { short s[2]; int i; } temp; \\'
print ' temp.s[0] = (size); temp.s[1] = (op); \\'
print ' *((int *)(dest)) = temp.i; } while(0)'
print ''
print """NOINLINE CARD32
__glXReadReply( Display *dpy, size_t size, void * dest, GLboolean reply_is_always_array )
{
xGLXSingleReply reply;
(void) _XReply(dpy, (xReply *) & reply, 0, False);
if (size != 0) {
if ((reply.length > 0) || reply_is_always_array) {
const GLint bytes = (reply_is_always_array)
? (4 * reply.length) : (reply.size * size);
const GLint extra = 4 - (bytes & 3);
_XRead(dpy, dest, bytes);
if ( extra < 4 ) {
_XEatData(dpy, extra);
}
}
else {
(void) memcpy( dest, &(reply.pad3), size);
}
}
return reply.retval;
}
NOINLINE void
__glXReadPixelReply( Display *dpy, struct glx_context * gc, unsigned max_dim,
GLint width, GLint height, GLint depth, GLenum format, GLenum type,
void * dest, GLboolean dimensions_in_reply )
{
xGLXSingleReply reply;
GLint size;
(void) _XReply(dpy, (xReply *) & reply, 0, False);
if ( dimensions_in_reply ) {
width = reply.pad3;
height = reply.pad4;
depth = reply.pad5;
if ((height == 0) || (max_dim < 2)) { height = 1; }
if ((depth == 0) || (max_dim < 3)) { depth = 1; }
}
size = reply.length * 4;
if (size != 0) {
void * buf = malloc( size );
if ( buf == NULL ) {
_XEatData(dpy, size);
__glXSetError(gc, GL_OUT_OF_MEMORY);
}
else {
const GLint extra = 4 - (size & 3);
_XRead(dpy, buf, size);
if ( extra < 4 ) {
_XEatData(dpy, extra);
}
__glEmptyImage(gc, 3, width, height, depth, format, type,
buf, dest);
free(buf);
}
}
}
#define X_GLXSingle 0
NOINLINE FASTCALL GLubyte *
__glXSetupSingleRequest( struct glx_context * gc, GLint sop, GLint cmdlen )
{
xGLXSingleReq * req;
Display * const dpy = gc->currentDpy;
(void) __glXFlushRenderBuffer(gc, gc->pc);
LockDisplay(dpy);
GetReqExtra(GLXSingle, cmdlen, req);
req->reqType = gc->majorOpcode;
req->contextTag = gc->currentContextTag;
req->glxCode = sop;
return (GLubyte *)(req) + sz_xGLXSingleReq;
}
NOINLINE FASTCALL GLubyte *
__glXSetupVendorRequest( struct glx_context * gc, GLint code, GLint vop, GLint cmdlen )
{
xGLXVendorPrivateReq * req;
Display * const dpy = gc->currentDpy;
(void) __glXFlushRenderBuffer(gc, gc->pc);
LockDisplay(dpy);
GetReqExtra(GLXVendorPrivate, cmdlen, req);
req->reqType = gc->majorOpcode;
req->glxCode = code;
req->vendorCode = vop;
req->contextTag = gc->currentContextTag;
return (GLubyte *)(req) + sz_xGLXVendorPrivateReq;
}
const GLuint __glXDefaultPixelStore[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };
#define zero (__glXDefaultPixelStore+0)
#define one (__glXDefaultPixelStore+8)
#define default_pixel_store_1D (__glXDefaultPixelStore+4)
#define default_pixel_store_1D_size 20
#define default_pixel_store_2D (__glXDefaultPixelStore+4)
#define default_pixel_store_2D_size 20
#define default_pixel_store_3D (__glXDefaultPixelStore+0)
#define default_pixel_store_3D_size 36
#define default_pixel_store_4D (__glXDefaultPixelStore+0)
#define default_pixel_store_4D_size 36
"""
for size in self.generic_sizes:
self.print_generic_function(size)
return
def printBody(self, api):
self.pixel_stubs = {}
generated_stubs = []
for func in api.functionIterateGlx():
if func.client_handcode: continue
# If the function is a pixel function with a certain
# GLX protocol signature, create a fake stub function
# for it. For example, create a single stub function
# that is used to implement both glTexImage1D and
# glTexImage2D.
if func.glx_rop != 0:
do_it = 0
for image in func.get_images():
if image.img_pad_dimensions:
do_it = 1
break
if do_it:
[h, n] = hash_pixel_function(func)
self.pixel_stubs[ func.name ] = n
if h not in generated_stubs:
generated_stubs.append(h)
fake_func = glx_pixel_function_stub( func, n )
self.printFunction(fake_func, fake_func.name)
self.printFunction(func, func.name)
if func.glx_sop and func.glx_vendorpriv:
self.printFunction(func, func.glx_vendorpriv_names[0])
self.printGetProcAddress(api)
return
def printGetProcAddress(self, api):
procs = {}
for func in api.functionIterateGlx():
for n in func.entry_points:
if func.has_different_protocol(n):
procs[n] = func.static_glx_name(n)
print """
#ifdef GLX_SHARED_GLAPI
static const struct proc_pair
{
const char *name;
_glapi_proc proc;
} proc_pairs[%d] = {""" % len(procs)
names = procs.keys()
names.sort()
for i in xrange(len(names)):
comma = ',' if i < len(names) - 1 else ''
print ' { "%s", (_glapi_proc) gl%s }%s' % (names[i], procs[names[i]], comma)
print """};
static int
__indirect_get_proc_compare(const void *key, const void *memb)
{
const struct proc_pair *pair = (const struct proc_pair *) memb;
return strcmp((const char *) key, pair->name);
}
_glapi_proc
__indirect_get_proc_address(const char *name)
{
const struct proc_pair *pair;
/* skip "gl" */
name += 2;
pair = (const struct proc_pair *) bsearch((const void *) name,
(const void *) proc_pairs, ARRAY_SIZE(proc_pairs), sizeof(proc_pairs[0]),
__indirect_get_proc_compare);
return (pair) ? pair->proc : NULL;
}
#endif /* GLX_SHARED_GLAPI */
"""
return
def printFunction(self, func, name):
footer = '}\n'
if func.glx_rop == ~0:
print 'static %s' % (func.return_type)
print '%s( unsigned opcode, unsigned dim, %s )' % (func.name, func.get_parameter_string())
print '{'
else:
if func.has_different_protocol(name):
if func.return_type == "void":
ret_string = ''
else:
ret_string = "return "
func_name = func.static_glx_name(name)
print '#define %s %d' % (func.opcode_vendor_name(name), func.glx_vendorpriv)
print '%s gl%s(%s)' % (func.return_type, func_name, func.get_parameter_string())
print '{'
print ' struct glx_context * const gc = __glXGetCurrentContext();'
print ''
print '#if defined(GLX_DIRECT_RENDERING) && !defined(GLX_USE_APPLEGL)'
print ' if (gc->isDirect) {'
print ' const _glapi_proc *const disp_table = (_glapi_proc *)GET_DISPATCH();'
print ' PFNGL%sPROC p =' % (name.upper())
print ' (PFNGL%sPROC) disp_table[%d];' % (name.upper(), func.offset)
print ' %sp(%s);' % (ret_string, func.get_called_parameter_string())
print ' } else'
print '#endif'
print ' {'
footer = '}\n}\n'
else:
print '#define %s %d' % (func.opcode_name(), func.opcode_value())
print '%s __indirect_gl%s(%s)' % (func.return_type, name, func.get_parameter_string())
print '{'
if func.glx_rop != 0 or func.vectorequiv != None:
if len(func.images):
self.printPixelFunction(func)
else:
self.printRenderFunction(func)
elif func.glx_sop != 0 or func.glx_vendorpriv != 0:
self.printSingleFunction(func, name)
pass
else:
print "/* Missing GLX protocol for %s. */" % (name)
print footer
return
def print_generic_function(self, n):
size = (n + 3) & ~3
print """static FASTCALL NOINLINE void
generic_%u_byte( GLint rop, const void * ptr )
{
struct glx_context * const gc = __glXGetCurrentContext();
const GLuint cmdlen = %u;
emit_header(gc->pc, rop, cmdlen);
(void) memcpy((void *)(gc->pc + 4), ptr, %u);
gc->pc += cmdlen;
if (__builtin_expect(gc->pc > gc->limit, 0)) { (void) __glXFlushRenderBuffer(gc, gc->pc); }
}
""" % (n, size + 4, size)
return
def common_emit_one_arg(self, p, pc, adjust, extra_offset):
if p.is_array():
src_ptr = p.name
else:
src_ptr = "&" + p.name
if p.is_padding:
print '(void) memset((void *)(%s + %u), 0, %s);' \
% (pc, p.offset + adjust, p.size_string() )
elif not extra_offset:
print '(void) memcpy((void *)(%s + %u), (void *)(%s), %s);' \
% (pc, p.offset + adjust, src_ptr, p.size_string() )
else:
print '(void) memcpy((void *)(%s + %u + %s), (void *)(%s), %s);' \
% (pc, p.offset + adjust, extra_offset, src_ptr, p.size_string() )
def common_emit_args(self, f, pc, adjust, skip_vla):
extra_offset = None
for p in f.parameterIterateGlxSend( not skip_vla ):
if p.name != f.img_reset:
self.common_emit_one_arg(p, pc, adjust, extra_offset)
if p.is_variable_length():
temp = p.size_string()
if extra_offset:
extra_offset += " + %s" % (temp)
else:
extra_offset = temp
return
def pixel_emit_args(self, f, pc, large):
"""Emit the arguments for a pixel function. This differs from
common_emit_args in that pixel functions may require padding
be inserted (i.e., for the missing width field for
TexImage1D), and they may also require a 'NULL image' flag
be inserted before the image data."""
if large:
adjust = 8
else:
adjust = 4
for param in f.parameterIterateGlxSend():
if not param.is_image():
self.common_emit_one_arg(param, pc, adjust, None)
if f.pad_after(param):
print '(void) memcpy((void *)(%s + %u), zero, 4);' % (pc, (param.offset + param.size()) + adjust)
else:
[dim, width, height, depth, extent] = param.get_dimensions()
if f.glx_rop == ~0:
dim_str = "dim"
else:
dim_str = str(dim)
if param.is_padding:
print '(void) memset((void *)(%s + %u), 0, %s);' \
% (pc, (param.offset - 4) + adjust, param.size_string() )
if param.img_null_flag:
if large:
print '(void) memcpy((void *)(%s + %u), zero, 4);' % (pc, (param.offset - 4) + adjust)
else:
print '(void) memcpy((void *)(%s + %u), (void *)((%s == NULL) ? one : zero), 4);' % (pc, (param.offset - 4) + adjust, param.name)
pixHeaderPtr = "%s + %u" % (pc, adjust)
pcPtr = "%s + %u" % (pc, param.offset + adjust)
if not large:
if param.img_send_null:
condition = '(compsize > 0) && (%s != NULL)' % (param.name)
else:
condition = 'compsize > 0'
print 'if (%s) {' % (condition)
print ' (*gc->fillImage)(gc, %s, %s, %s, %s, %s, %s, %s, %s, %s);' % (dim_str, width, height, depth, param.img_format, param.img_type, param.name, pcPtr, pixHeaderPtr)
print '} else {'
print ' (void) memcpy( %s, default_pixel_store_%uD, default_pixel_store_%uD_size );' % (pixHeaderPtr, dim, dim)
print '}'
else:
print '__glXSendLargeImage(gc, compsize, %s, %s, %s, %s, %s, %s, %s, %s, %s);' % (dim_str, width, height, depth, param.img_format, param.img_type, param.name, pcPtr, pixHeaderPtr)
return
def large_emit_begin(self, f, op_name = None):
if not op_name:
op_name = f.opcode_real_name()
print 'const GLint op = %s;' % (op_name)
print 'const GLuint cmdlenLarge = cmdlen + 4;'
print 'GLubyte * const pc = __glXFlushRenderBuffer(gc, gc->pc);'
print '(void) memcpy((void *)(pc + 0), (void *)(&cmdlenLarge), 4);'
print '(void) memcpy((void *)(pc + 4), (void *)(&op), 4);'
return
def common_func_print_just_start(self, f, name):
print ' struct glx_context * const gc = __glXGetCurrentContext();'
# The only reason that single and vendor private commands need
# a variable called 'dpy' is because they use the SyncHandle
# macro. For whatever brain-dead reason, that macro is hard-
# coded to use a variable called 'dpy' instead of taking a
# parameter.
# FIXME Simplify the logic related to skip_condition and
# FIXME condition_list in this function. Basically, remove
# FIXME skip_condition, and just append the "dpy != NULL" type
# FIXME condition to condition_list from the start. The only
# FIXME reason it's done in this confusing way now is to
# FIXME minimize the diffs in the generated code.
if not f.glx_rop:
for p in f.parameterIterateOutputs():
if p.is_image() and (p.img_format != "GL_COLOR_INDEX" or p.img_type != "GL_BITMAP"):
print ' const __GLXattribute * const state = gc->client_state_private;'
break
print ' Display * const dpy = gc->currentDpy;'
skip_condition = "dpy != NULL"
elif f.can_be_large:
skip_condition = "gc->currentDpy != NULL"
else:
skip_condition = None
if f.return_type != 'void':
print ' %s retval = (%s) 0;' % (f.return_type, f.return_type)
if name != None and name not in f.glx_vendorpriv_names:
print '#ifndef USE_XCB'
self.emit_packet_size_calculation(f, 0)
if name != None and name not in f.glx_vendorpriv_names:
print '#endif'
condition_list = []
for p in f.parameterIterateCounters():
condition_list.append( "%s >= 0" % (p.name) )
# 'counter' parameters cannot be negative
print " if (%s < 0) {" % p.name
print " __glXSetError(gc, GL_INVALID_VALUE);"
if f.return_type != 'void':
print " return 0;"
else:
print " return;"
print " }"
if skip_condition:
condition_list.append( skip_condition )
if len( condition_list ) > 0:
if len( condition_list ) > 1:
skip_condition = "(%s)" % (string.join( condition_list, ") && (" ))
else:
skip_condition = "%s" % (condition_list.pop(0))
print ' if (__builtin_expect(%s, 1)) {' % (skip_condition)
return 1
else:
return 0
def printSingleFunction(self, f, name):
self.common_func_print_just_start(f, name)
if self.debug:
print ' printf( "Enter %%s...\\n", "gl%s" );' % (f.name)
if name not in f.glx_vendorpriv_names:
# XCB specific:
print '#ifdef USE_XCB'
if self.debug:
print ' printf("\\tUsing XCB.\\n");'
print ' xcb_connection_t *c = XGetXCBConnection(dpy);'
print ' (void) __glXFlushRenderBuffer(gc, gc->pc);'
xcb_name = 'xcb_glx%s' % convertStringForXCB(name)
iparams=[]
extra_iparams = []
output = None
for p in f.parameterIterator():
if p.is_output:
output = p
if p.is_image():
if p.img_format != "GL_COLOR_INDEX" or p.img_type != "GL_BITMAP":
extra_iparams.append("state->storePack.swapEndian")
else:
extra_iparams.append("0")
# Hardcode this in. lsb_first param (apparently always GL_FALSE)
# also present in GetPolygonStipple, but taken care of above.
if xcb_name == "xcb_glx_read_pixels":
extra_iparams.append("0")
else:
iparams.append(p.name)
xcb_request = '%s(%s)' % (xcb_name, ", ".join(["c", "gc->currentContextTag"] + iparams + extra_iparams))
if f.needs_reply():
print ' %s_reply_t *reply = %s_reply(c, %s, NULL);' % (xcb_name, xcb_name, xcb_request)
if output:
if output.is_image():
[dim, w, h, d, junk] = output.get_dimensions()
if f.dimensions_in_reply:
w = "reply->width"
h = "reply->height"
d = "reply->depth"
if dim < 2:
h = "1"
else:
print ' if (%s == 0) { %s = 1; }' % (h, h)
if dim < 3:
d = "1"
else:
print ' if (%s == 0) { %s = 1; }' % (d, d)
print ' __glEmptyImage(gc, 3, %s, %s, %s, %s, %s, %s_data(reply), %s);' % (w, h, d, output.img_format, output.img_type, xcb_name, output.name)
else:
if f.reply_always_array:
print ' (void)memcpy(%s, %s_data(reply), %s_data_length(reply) * sizeof(%s));' % (output.name, xcb_name, xcb_name, output.get_base_type_string())
else:
print ' /* the XXX_data_length() xcb function name is misleading, it returns the number */'
print ' /* of elements, not the length of the data part. A single element is embedded. */'
print ' if (%s_data_length(reply) == 1)' % (xcb_name)
print ' (void)memcpy(%s, &reply->datum, sizeof(reply->datum));' % (output.name)
print ' else'
print ' (void)memcpy(%s, %s_data(reply), %s_data_length(reply) * sizeof(%s));' % (output.name, xcb_name, xcb_name, output.get_base_type_string())
if f.return_type != 'void':
print ' retval = reply->ret_val;'
print ' free(reply);'
else:
print ' ' + xcb_request + ';'
print '#else'
# End of XCB specific.
if f.parameters != []:
pc_decl = "GLubyte const * pc ="
else:
pc_decl = "(void)"
if name in f.glx_vendorpriv_names:
print ' %s __glXSetupVendorRequest(gc, %s, %s, cmdlen);' % (pc_decl, f.opcode_real_name(), f.opcode_vendor_name(name))
else:
print ' %s __glXSetupSingleRequest(gc, %s, cmdlen);' % (pc_decl, f.opcode_name())
self.common_emit_args(f, "pc", 0, 0)
images = f.get_images()
for img in images:
if img.is_output:
o = f.command_fixed_length() - 4
print ' *(int32_t *)(pc + %u) = 0;' % (o)
if img.img_format != "GL_COLOR_INDEX" or img.img_type != "GL_BITMAP":
print ' * (int8_t *)(pc + %u) = state->storePack.swapEndian;' % (o)
if f.img_reset:
print ' * (int8_t *)(pc + %u) = %s;' % (o + 1, f.img_reset)
return_name = ''
if f.needs_reply():
if f.return_type != 'void':
return_name = " retval"
return_str = " retval = (%s)" % (f.return_type)
else:
return_str = " (void)"
got_reply = 0
for p in f.parameterIterateOutputs():
if p.is_image():
[dim, w, h, d, junk] = p.get_dimensions()
if f.dimensions_in_reply:
print " __glXReadPixelReply(dpy, gc, %u, 0, 0, 0, %s, %s, %s, GL_TRUE);" % (dim, p.img_format, p.img_type, p.name)
else:
print " __glXReadPixelReply(dpy, gc, %u, %s, %s, %s, %s, %s, %s, GL_FALSE);" % (dim, w, h, d, p.img_format, p.img_type, p.name)
got_reply = 1
else:
if f.reply_always_array:
aa = "GL_TRUE"
else:
aa = "GL_FALSE"
# gl_parameter.size() returns the size
# of the entire data item. If the
# item is a fixed-size array, this is
# the size of the whole array. This
# is not what __glXReadReply wants. It
# wants the size of a single data
# element in the reply packet.
# Dividing by the array size (1 for
# non-arrays) gives us this.
s = p.size() / p.get_element_count()
print " %s __glXReadReply(dpy, %s, %s, %s);" % (return_str, s, p.name, aa)
got_reply = 1
# If a reply wasn't read to fill an output parameter,
# read a NULL reply to get the return value.
if not got_reply:
print " %s __glXReadReply(dpy, 0, NULL, GL_FALSE);" % (return_str)
elif self.debug:
# Only emit the extra glFinish call for functions
# that don't already require a reply from the server.
print ' __indirect_glFinish();'
if self.debug:
print ' printf( "Exit %%s.\\n", "gl%s" );' % (name)
print ' UnlockDisplay(dpy); SyncHandle();'
if name not in f.glx_vendorpriv_names:
print '#endif /* USE_XCB */'
print ' }'
print ' return%s;' % (return_name)
return
def printPixelFunction(self, f):
if self.pixel_stubs.has_key( f.name ):
# Normally gl_function::get_parameter_string could be
# used. However, this call needs to have the missing
# dimensions (e.g., a fake height value for
# glTexImage1D) added in.
p_string = ""
for param in f.parameterIterateGlxSend():
if param.is_padding:
continue
p_string += ", " + param.name
if param.is_image():
[dim, junk, junk, junk, junk] = param.get_dimensions()
if f.pad_after(param):
p_string += ", 1"
print ' %s(%s, %u%s );' % (self.pixel_stubs[f.name] , f.opcode_name(), dim, p_string)
return
if self.common_func_print_just_start(f, None):
trailer = " }"
else:
trailer = None
if f.can_be_large:
print 'if (cmdlen <= gc->maxSmallRenderCommandSize) {'
print ' if ( (gc->pc + cmdlen) > gc->bufEnd ) {'
print ' (void) __glXFlushRenderBuffer(gc, gc->pc);'
print ' }'
if f.glx_rop == ~0:
opcode = "opcode"
else:
opcode = f.opcode_real_name()
print 'emit_header(gc->pc, %s, cmdlen);' % (opcode)
self.pixel_emit_args( f, "gc->pc", 0 )
print 'gc->pc += cmdlen;'
print 'if (gc->pc > gc->limit) { (void) __glXFlushRenderBuffer(gc, gc->pc); }'
if f.can_be_large:
print '}'
print 'else {'
self.large_emit_begin(f, opcode)
self.pixel_emit_args(f, "pc", 1)
print '}'
if trailer: print trailer
return
def printRenderFunction(self, f):
# There is a class of GL functions that take a single pointer
# as a parameter. This pointer points to a fixed-size chunk
# of data, and the protocol for this functions is very
# regular. Since they are so regular and there are so many
# of them, special case them with generic functions. On
# x86, this saves about 26KB in the libGL.so binary.
if f.variable_length_parameter() == None and len(f.parameters) == 1:
p = f.parameters[0]
if p.is_pointer():
cmdlen = f.command_fixed_length()
if cmdlen in self.generic_sizes:
print ' generic_%u_byte( %s, %s );' % (cmdlen, f.opcode_real_name(), p.name)
return
if self.common_func_print_just_start(f, None):
trailer = " }"
else:
trailer = None
if self.debug:
print 'printf( "Enter %%s...\\n", "gl%s" );' % (f.name)
if f.can_be_large:
print 'if (cmdlen <= gc->maxSmallRenderCommandSize) {'
print ' if ( (gc->pc + cmdlen) > gc->bufEnd ) {'
print ' (void) __glXFlushRenderBuffer(gc, gc->pc);'
print ' }'
print 'emit_header(gc->pc, %s, cmdlen);' % (f.opcode_real_name())
self.common_emit_args(f, "gc->pc", 4, 0)
print 'gc->pc += cmdlen;'
print 'if (__builtin_expect(gc->pc > gc->limit, 0)) { (void) __glXFlushRenderBuffer(gc, gc->pc); }'
if f.can_be_large:
print '}'
print 'else {'
self.large_emit_begin(f)
self.common_emit_args(f, "pc", 8, 1)
p = f.variable_length_parameter()
print ' __glXSendLargeCommand(gc, pc, %u, %s, %s);' % (p.offset + 8, p.name, p.size_string())
print '}'
if self.debug:
print '__indirect_glFinish();'
print 'printf( "Exit %%s.\\n", "gl%s" );' % (f.name)
if trailer: print trailer
return
class PrintGlxProtoInit_c(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "glX_proto_send.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
(C) Copyright IBM Corporation 2004""", "PRECISION INSIGHT, IBM")
return
def printRealHeader(self):
print """/**
* \\file indirect_init.c
* Initialize indirect rendering dispatch table.
*
* \\author Kevin E. Martin <kevin@precisioninsight.com>
* \\author Brian Paul <brian@precisioninsight.com>
* \\author Ian Romanick <idr@us.ibm.com>
*/
#include "indirect_init.h"
#include "indirect.h"
#include "glapi.h"
#include <assert.h>
#ifndef GLX_USE_APPLEGL
/**
* No-op function used to initialize functions that have no GLX protocol
* support.
*/
static int NoOp(void)
{
return 0;
}
/**
* Create and initialize a new GL dispatch table. The table is initialized
* with GLX indirect rendering protocol functions.
*/
struct _glapi_table * __glXNewIndirectAPI( void )
{
_glapi_proc *table;
unsigned entries;
unsigned i;
int o;
entries = _glapi_get_dispatch_table_size();
table = malloc(entries * sizeof(_glapi_proc));
if (table == NULL)
return NULL;
/* first, set all entries to point to no-op functions */
for (i = 0; i < entries; i++) {
table[i] = (_glapi_proc) NoOp;
}
/* now, initialize the entries we understand */"""
def printRealFooter(self):
print """
return (struct _glapi_table *) table;
}
#endif
"""
return
def printBody(self, api):
for [name, number] in api.categoryIterate():
if number != None:
preamble = '\n /* %3u. %s */\n' % (int(number), name)
else:
preamble = '\n /* %s */\n' % (name)
for func in api.functionIterateByCategory(name):
if func.client_supported_for_indirect():
if preamble:
print preamble
preamble = None
if func.is_abi():
print ' table[{offset}] = (_glapi_proc) __indirect_gl{name};'.format(name = func.name, offset = func.offset)
else:
print ' o = _glapi_get_proc_offset("gl{0}");'.format(func.name)
print ' assert(o > 0);'
print ' table[o] = (_glapi_proc) __indirect_gl{0};'.format(func.name)
return
class PrintGlxProtoInit_h(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "glX_proto_send.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
(C) Copyright IBM Corporation 2004""", "PRECISION INSIGHT, IBM")
self.header_tag = "_INDIRECT_H_"
self.last_category = ""
return
def printRealHeader(self):
print """/**
* \\file
* Prototypes for indirect rendering functions.
*
* \\author Kevin E. Martin <kevin@precisioninsight.com>
* \\author Ian Romanick <idr@us.ibm.com>
*/
"""
self.printFastcall()
self.printNoinline()
print """
#include <X11/Xfuncproto.h>
#include "glxclient.h"
extern _X_HIDDEN NOINLINE CARD32 __glXReadReply( Display *dpy, size_t size,
void * dest, GLboolean reply_is_always_array );
extern _X_HIDDEN NOINLINE void __glXReadPixelReply( Display *dpy,
struct glx_context * gc, unsigned max_dim, GLint width, GLint height,
GLint depth, GLenum format, GLenum type, void * dest,
GLboolean dimensions_in_reply );
extern _X_HIDDEN NOINLINE FASTCALL GLubyte * __glXSetupSingleRequest(
struct glx_context * gc, GLint sop, GLint cmdlen );
extern _X_HIDDEN NOINLINE FASTCALL GLubyte * __glXSetupVendorRequest(
struct glx_context * gc, GLint code, GLint vop, GLint cmdlen );
"""
def printBody(self, api):
for func in api.functionIterateGlx():
params = func.get_parameter_string()
print 'extern _X_HIDDEN %s __indirect_gl%s(%s);' % (func.return_type, func.name, params)
for n in func.entry_points:
if func.has_different_protocol(n):
asdf = func.static_glx_name(n)
if asdf not in func.static_entry_points:
print 'extern _X_HIDDEN %s gl%s(%s);' % (func.return_type, asdf, params)
# give it a easy-to-remember name
if func.client_handcode:
print '#define gl_dispatch_stub_%s gl%s' % (n, asdf)
else:
print 'GLAPI %s GLAPIENTRY gl%s(%s);' % (func.return_type, asdf, params)
break
print ''
print '#ifdef GLX_SHARED_GLAPI'
print 'extern _X_HIDDEN void (*__indirect_get_proc_address(const char *name))(void);'
print '#endif'
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode] [-d]" % sys.argv[0]
print " -m output_mode Output mode can be one of 'proto', 'init_c' or 'init_h'."
print " -d Enable extra debug information in the generated code."
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:m:d")
except Exception,e:
show_usage()
debug = 0
mode = "proto"
for (arg,val) in args:
if arg == "-f":
file_name = val
elif arg == "-m":
mode = val
elif arg == "-d":
debug = 1
if mode == "proto":
printer = PrintGlxProtoStubs()
elif mode == "init_c":
printer = PrintGlxProtoInit_c()
elif mode == "init_h":
printer = PrintGlxProtoInit_h()
else:
show_usage()
printer.debug = debug
api = gl_XML.parse_GL_API( file_name, glX_XML.glx_item_factory() )
printer.Print( api )
|
ArcticaProject/vcxsrv
|
mesalib/src/mapi/glapi/gen/glX_proto_send.py
|
Python
|
gpl-3.0
| 39,193
|
[
"Brian"
] |
c7c6e262a249792abeb57a01c8781c438e2c7e402e49a56b1314ccf0b3d8d689
|
"""Contains the DataManager base class."""
# data_manager.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import copy
import logging
import os
import errno
import thread
import time
from mpf.system.config import Config
from mpf.system.file_manager import FileManager
class DataManager(object):
def __init__(self, machine, name):
"""
The DataManager is responsible for reading and writing data to/from a
file on disk.
Args:
machine: The main MachineController instance.
name: A string name that represents what this DataManager instance
is for. This name is used to lookup the configuration option
in the machine config in the mpf:paths:<name> location. That's
how you specify the file name this DataManager will use.
"""
self.machine = machine
self.name = name
self.filename = os.path.join(self.machine.machine_path,
self.machine.config['mpf']['paths'][name])
self.log = logging.getLogger('DataInterface')
self.data = dict()
self._setup_file()
def _setup_file(self):
self._make_sure_path_exists(os.path.dirname(self.filename))
self._load()
def _make_sure_path_exists(self, path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def _load(self):
self.log.debug("Loading %s from %s", self.name, self.filename)
if os.path.isfile(self.filename):
self.data = FileManager.load(self.filename, halt_on_error=False)
else:
self.log.debug("Didn't find the %s file. No prob. We'll create "
"it when we save.", self.name)
def get_data(self, section=None):
"""Returns the value of this DataManager's data.
Args:
section: Optional string name of a section (dictionary key) for the
data you want returned. Default is None which returns the
entire dictionary.
"""
if not section:
return copy.copy(self.data)
else:
return copy.copy(self.data[section])
def save_all(self, data=None, delay_secs=0):
"""Writes this DataManager's data to the disk.
Args:
data: An optional dict() of the data you want to write. If None
then it will write the data as it exists in its own data
attribute.
delay_secs: Optional integer value of the amount of time you want
to wait before the disk write occurs. Useful for writes that
occur when MPF is busy, so you can delay them by a few seconds
so they don't slow down MPF. Default is 0.
"""
self.log.debug("Will write %s to disk in %s sec(s)", self.name,
delay_secs)
if data:
self.data = data
thread.start_new_thread(self._writing_thread, (delay_secs, ))
def save_key(self, key, value, delay_secs=0):
"""Updates an individual key and then writes the entire dictionary to
disk.
Args:
key: String name of the key to add/update.
value: Value of the key
delay_secs: Optional number of seconds to wait before writing the
data to disk. Default is 0.
"""
self.data[key] = value
self.save_all(delay_secs=delay_secs)
def remove_key(self, key):
try:
del self.data[key]
self.save_all()
except KeyError:
pass
def _writing_thread(self, delay_secs=0):
if delay_secs:
time.sleep(delay_secs)
self.log.debug("Writing %s to: %s", self.name, self.filename)
FileManager.save(self.filename, self.data)
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
spierepf/mpf
|
mpf/system/data_manager.py
|
Python
|
mit
| 5,227
|
[
"Brian"
] |
d138a8307adc96ae7fa24663cbd66feab983bc0bca768a910ab838abb18a03bf
|
#!/usr/bin/env python
"""
Converts a step5_assembly.{psf,pdb} to a mae file appropriate
for membrane input to dabble
"""
from __future__ import print_function
import os
from vmd import atomsel, molecule
thedir = os.path.abspath(input("Which directory contains step5_assembly.{psf,crd}? > "))
if not os.path.isdir(thedir):
raise ValueError("%s not a valid directory" % thedir)
crd = os.path.join(thedir, "step5_assembly.crd")
psf = os.path.join(thedir, "step5_assembly.psf")
if not os.path.isfile(crd):
raise ValueError("No pdb file in directory!")
if not os.path.isfile(psf):
raise ValueError("No psf file in directory!")
molid = molecule.load('psf', psf, 'cor', crd)
xs = atomsel().get('x')
ys = atomsel().get('y')
zs = atomsel().get('z')
# 0.5A buffer to make it tile nicer
molecule.set_periodic(molid=molid,
a=max(xs)-min(xs)-8.0,
b=max(ys)-min(ys)-8.0,
c=max(zs)-min(zs)-8.0,
alpha=90., beta=90., gamma=90.)
outfile = os.path.join(thedir, "step5_assembly_dabble.mae")
molecule.write(molid=molid, filetype='mae', filename=outfile)
|
Eigenstate/dabble
|
convert_step5_to_dabble.py
|
Python
|
gpl-2.0
| 1,142
|
[
"VMD"
] |
4f7161be86a4f57b773f3569a4250290ee64640a817e8c31f6c1fbc11c525171
|
#!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
from basic_modules.workflow import Workflow
from utils import logger
from utils import remap
from tool.bwa_aligner import bwaAlignerTool
from tool.biobambam_filter import biobambam
from tool.macs2 import macs2
# ------------------------------------------------------------------------------
class process_chipseq(Workflow): # pylint: disable=invalid-name,too-few-public-methods
"""
Functions for processing Chip-Seq FastQ files. Files are the aligned,
filtered and analysed for peak calling
"""
def __init__(self, configuration=None):
"""
Initialise the class
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("Processing ChIP-Seq")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files): # pylint: disable=too-many-branches,too-many-locals,too-many-statements,line-too-long
"""
Main run function for processing ChIP-seq FastQ data. Pipeline aligns
the FASTQ files to the genome using BWA. MACS 2 is then used for peak
calling to identify transcription factor binding sites within the
genome.
Currently this can only handle a single data file and a single
background file.
Parameters
----------
input_files : dict
Location of the initial input files required by the workflow
genome : str
Genome FASTA file
index : str
Location of the BWA archived index files
loc : str
Location of the FASTQ reads files
fastq2 : str
Location of the paired end FASTQ file [OPTIONAL]
bg_loc : str
Location of the background FASTQ reads files [OPTIONAL]
fastq2_bg : str
Location of the paired end background FASTQ reads files [OPTIONAL]
metadata : dict
Input file meta data associated with their roles
genome : str
index : str
bg_loc : str
[OPTIONAL]
output_files : dict
Output file locations
bam [, "bam_bg"] : str
filtered [, "filtered_bg"] : str
narrow_peak : str
summits : str
broad_peak : str
gapped_peak : str
Returns
-------
output_files : dict
Output file locations associated with their roles, for the output
bam [, "bam_bg"] : str
Aligned FASTQ short read file [ and aligned background file]
locations
filtered [, "filtered_bg"] : str
Filtered versions of the respective bam files
narrow_peak : str
Results files in bed4+1 format
summits : str
Results files in bed6+4 format
broad_peak : str
Results files in bed6+3 format
gapped_peak : str
Results files in bed12+3 format
output_metadata : dict
Output metadata for the associated files in output_files
bam [, "bam_bg"] : Metadata
filtered [, "filtered_bg"] : Metadata
narrow_peak : Metadata
summits : Metadata
broad_peak : Metadata
gapped_peak : Metadata
"""
output_files_generated = {}
output_metadata = {}
logger.info("PROCESS CHIPSEQ - DEFINED OUTPUT:", output_files["bam"])
if "genome_public" in input_files:
align_input_files = remap(
input_files, genome="genome_public", loc="loc", index="index_public")
align_input_file_meta = remap(
metadata, genome="genome_public", loc="loc", index="index_public")
else:
align_input_files = remap(input_files, "genome", "loc", "index")
align_input_file_meta = remap(metadata, "genome", "loc", "index")
if "fastq2" in input_files:
align_input_files["fastq2"] = input_files["fastq2"]
align_input_file_meta["fastq2"] = metadata["fastq2"]
logger.progress("BWA Aligner", status="RUNNING")
bwa = bwaAlignerTool(self.configuration)
bwa_files, bwa_meta = bwa.run(
align_input_files,
align_input_file_meta,
{"output": output_files["bam"], "bai": output_files["bai"]}
)
logger.progress("BWA Aligner", status="DONE")
try:
output_files_generated["bam"] = bwa_files["bam"]
output_metadata["bam"] = bwa_meta["bam"]
tool_name = output_metadata['bam'].meta_data['tool']
output_metadata['bam'].meta_data['tool_description'] = tool_name
output_metadata['bam'].meta_data['tool'] = "process_chipseq"
output_files_generated["bai"] = bwa_files["bai"]
output_metadata["bai"] = bwa_meta["bai"]
tool_name = output_metadata['bai'].meta_data['tool']
output_metadata['bai'].meta_data['tool_description'] = tool_name
output_metadata['bai'].meta_data['tool'] = "process_chipseq"
except KeyError:
logger.fatal("BWA aligner failed")
if "bg_loc" in input_files:
# Align background files
if "genome_public" in input_files:
align_input_files_bg = remap(
input_files, genome="genome_public", index="index_public", loc="bg_loc")
align_input_file_meta_bg = remap(
metadata, genome="genome_public", index="index_public", loc="bg_loc")
else:
align_input_files_bg = remap(input_files, "genome", "index", loc="bg_loc")
align_input_file_meta_bg = remap(metadata, "genome", "index", loc="bg_loc")
if "fastq2" in input_files:
align_input_files_bg["fastq2"] = input_files["fastq2_bg"]
align_input_file_meta_bg["fastq2"] = metadata["fastq2_bg"]
logger.progress("BWA Aligner - Background", status="RUNNING")
bwa_bg_files, bwa_bg_meta = bwa.run(
align_input_files_bg,
align_input_file_meta_bg,
{"output": output_files["bam_bg"], "bai": output_files["bai_bg"]}
)
logger.progress("BWA Aligner - Background", status="DONE")
try:
output_files_generated["bam_bg"] = bwa_bg_files["bam"]
output_metadata["bam_bg"] = bwa_bg_meta["bam"]
tool_name = output_metadata['bam_bg'].meta_data['tool']
output_metadata['bam_bg'].meta_data['tool_description'] = tool_name
output_metadata['bam_bg'].meta_data['tool'] = "process_chipseq"
output_files_generated["bai_bg"] = bwa_bg_files["bai"]
output_metadata["bai_bg"] = bwa_bg_meta["bai"]
tool_name = output_metadata['bai_bg'].meta_data['tool']
output_metadata['bai_bg'].meta_data['tool_description'] = tool_name
output_metadata['bai_bg'].meta_data['tool'] = "process_chipseq"
except KeyError:
logger.fatal("Background BWA aligner failed")
# Filter the bams
b3f = biobambam(self.configuration)
logger.progress("BioBamBam", status="RUNNING")
b3f_files, b3f_meta = b3f.run(
{"input": bwa_files['bam']},
{"input": bwa_meta['bam']},
{"output": output_files["filtered"], "bai": output_files["filtered_bai"]}
)
logger.progress("BioBamBam", status="DONE")
try:
output_files_generated["filtered"] = b3f_files["bam"]
output_metadata["filtered"] = b3f_meta["bam"]
tool_name = output_metadata['filtered'].meta_data['tool']
output_metadata['filtered'].meta_data['tool_description'] = tool_name
output_metadata['filtered'].meta_data['tool'] = "process_chipseq"
output_files_generated["filtered_bai"] = b3f_files["bai"]
output_metadata["filtered_bai"] = b3f_meta["bai"]
tool_name = output_metadata['filtered_bai'].meta_data['tool']
output_metadata['filtered_bai'].meta_data['tool_description'] = tool_name
output_metadata['filtered_bai'].meta_data['tool'] = "process_chipseq"
except KeyError:
logger.fatal("BioBamBam filtering failed")
if "bg_loc" in input_files:
# Filter background aligned files
logger.progress("BioBamBam Background", status="RUNNING")
b3f_bg_files, b3f_bg_meta = b3f.run(
{"input": bwa_bg_files['bam']},
{"input": bwa_bg_meta['bam']},
{"output": output_files["filtered_bg"], "bai": output_files["filtered_bai_bg"]}
)
logger.progress("BioBamBam Background", status="DONE")
try:
output_files_generated["filtered_bg"] = b3f_bg_files["bam"]
output_metadata["filtered_bg"] = b3f_bg_meta["bam"]
tool_name = output_metadata['filtered_bg'].meta_data['tool']
output_metadata['filtered_bg'].meta_data['tool_description'] = tool_name
output_metadata['filtered_bg'].meta_data['tool'] = "process_chipseq"
output_files_generated["filtered_bai_bg"] = b3f_bg_files["bai"]
output_metadata["filtered_bai_bg"] = b3f_bg_meta["bai"]
tool_name = output_metadata['filtered_bai_bg'].meta_data['tool']
output_metadata['filtered_bai_bg'].meta_data['tool_description'] = tool_name
output_metadata['filtered_bai_bg'].meta_data['tool'] = "process_chipseq"
except KeyError:
logger.fatal("Background BioBamBam filtering failed")
# MACS2 to call peaks
# Duplicates have already been filtered so MACS2 does not need to due
# any further filtering
self.configuration["macs_keep-dup_param"] = "all"
macs_caller = macs2(self.configuration)
macs_inputs = {"bam": output_files_generated["filtered"]}
macs_metadt = {"bam": output_metadata['filtered']}
if "bg_loc" in input_files:
macs_inputs["bam_bg"] = output_files_generated["filtered_bg"]
macs_metadt["bam_bg"] = output_metadata['filtered_bg']
logger.progress("MACS2", status="RUNNING")
m_results_files, m_results_meta = macs_caller.run(
macs_inputs, macs_metadt,
# Outputs of the final step may match workflow outputs;
# Extra entries in output_files will be disregarded.
remap(
output_files,
'narrow_peak', 'summits', 'broad_peak', 'gapped_peak')
)
logger.progress("MACS2", status="DONE")
if not m_results_meta:
logger.fatal("MACS2 peak calling failed")
if 'narrow_peak' in m_results_meta:
output_files_generated['narrow_peak'] = m_results_files['narrow_peak']
output_metadata['narrow_peak'] = m_results_meta['narrow_peak']
tool_name = output_metadata['narrow_peak'].meta_data['tool']
output_metadata['narrow_peak'].meta_data['tool_description'] = tool_name
output_metadata['narrow_peak'].meta_data['tool'] = "process_chipseq"
if 'summits' in m_results_meta:
output_files_generated['summits'] = m_results_files['summits']
output_metadata['summits'] = m_results_meta['summits']
tool_name = output_metadata['summits'].meta_data['tool']
output_metadata['summits'].meta_data['tool_description'] = tool_name
output_metadata['summits'].meta_data['tool'] = "process_chipseq"
if 'broad_peak' in m_results_meta:
output_files_generated['broad_peak'] = m_results_files['broad_peak']
output_metadata['broad_peak'] = m_results_meta['broad_peak']
tool_name = output_metadata['broad_peak'].meta_data['tool']
output_metadata['broad_peak'].meta_data['tool_description'] = tool_name
output_metadata['broad_peak'].meta_data['tool'] = "process_chipseq"
if 'gapped_peak' in m_results_meta:
output_files_generated['gapped_peak'] = m_results_files['gapped_peak']
output_metadata['gapped_peak'] = m_results_meta['gapped_peak']
tool_name = output_metadata['gapped_peak'].meta_data['tool']
output_metadata['gapped_peak'].meta_data['tool_description'] = tool_name
output_metadata['gapped_peak'].meta_data['tool'] = "process_chipseq"
if 'control_lambda' in m_results_meta:
output_files_generated['control_lambda'] = m_results_files['control_lambda']
output_metadata['control_lambda'] = m_results_meta['control_lambda']
tool_name = output_metadata['control_lambda'].meta_data['tool']
output_metadata['control_lambda'].meta_data['tool_description'] = tool_name
output_metadata['control_lambda'].meta_data['tool'] = "process_chipseq"
if 'treat_pileup' in m_results_meta:
output_files_generated['treat_pileup'] = m_results_files['treat_pileup']
output_metadata['treat_pileup'] = m_results_meta['treat_pileup']
tool_name = output_metadata['treat_pileup'].meta_data['tool']
output_metadata['treat_pileup'].meta_data['tool_description'] = tool_name
output_metadata['treat_pileup'].meta_data['tool'] = "process_chipseq"
return output_files_generated, output_metadata
# ------------------------------------------------------------------------------
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
print("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(process_chipseq,
config,
in_metadata,
out_metadata)
# 2. The App has finished
print("2. Execution finished; see " + out_metadata)
print(result)
return result
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set up the command line parameters
PARSER = argparse.ArgumentParser(description="ChIP-seq peak calling")
PARSER.add_argument(
"--config", help="Configuration file")
PARSER.add_argument(
"--in_metadata", help="Location of input metadata file")
PARSER.add_argument(
"--out_metadata", help="Location of output metadata file")
PARSER.add_argument(
"--local", action="store_const", const=True, default=False)
# Get the matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
|
Multiscale-Genomics/mg-process-fastq
|
process_chipseq.py
|
Python
|
apache-2.0
| 16,373
|
[
"BWA"
] |
3be6bd8b04cf48083bbf6f754fbf4bf0452e7a565ecfb9773b785e10f114d8d5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
class Hdf(AutotoolsPackage):
"""HDF4 (also known as HDF) is a library and multi-object
file format for storing and managing data between machines."""
homepage = "https://portal.hdfgroup.org/display/support"
url = "https://support.hdfgroup.org/ftp/HDF/releases/HDF4.2.14/src/hdf-4.2.14.tar.gz"
list_url = "https://support.hdfgroup.org/ftp/HDF/releases/"
list_depth = 2
version('4.2.15', sha256='dbeeef525af7c2d01539906c28953f0fdab7dba603d1bc1ec4a5af60d002c459')
version('4.2.14', sha256='2d383e87c8a0ca6a5352adbd1d5546e6cc43dc21ff7d90f93efa644d85c0b14a')
version('4.2.13', sha256='be9813c1dc3712c2df977d4960e1f13f20f447dfa8c3ce53331d610c1f470483')
version('4.2.12', sha256='dd419c55e85d1a0e13f3ea5ed35d00710033ccb16c85df088eb7925d486e040c')
version('4.2.11', sha256='c3f7753b2fb9b27d09eced4d2164605f111f270c9a60b37a578f7de02de86d24')
variant('szip', default=False, description="Enable szip support")
variant('external-xdr', default=sys.platform != 'darwin',
description="Use an external XDR backend")
variant('netcdf', default=False,
description='Build NetCDF API (version 2.3.2)')
variant('fortran', default=False,
description='Enable Fortran interface')
variant('java', default=False,
description='Enable Java JNI interface')
variant('shared', default=False, description='Enable shared library')
variant('pic', default=True,
description='Produce position-independent code')
depends_on('zlib@1.1.4:')
depends_on('jpeg')
depends_on('szip', when='+szip')
depends_on('rpc', when='+external-xdr')
depends_on('bison', type='build')
depends_on('flex', type='build')
depends_on('java@7:', when='+java', type=('build', 'run'))
# https://forum.hdfgroup.org/t/cant-build-hdf-4-2-14-with-jdk-11-and-enable-java/5702
patch('disable_doclint.patch', when='@:4.2.14^java@9:')
conflicts('^libjpeg@:6a')
# configure: error: Cannot build shared fortran libraries.
# Please configure with --disable-fortran flag.
conflicts('+fortran', when='+shared')
# configure: error: Java requires shared libraries to be built
conflicts('+java', when='~shared')
# configure: WARNING: unrecognized options: --enable-java
conflicts('+java', when='@:4.2.11')
# The Java interface library uses netcdf-related macro definitions even
# when netcdf is disabled and the macros are not defined, e.g.:
# hdfsdsImp.c:158:30: error: 'MAX_NC_NAME' undeclared
conflicts('+java', when='@4.2.12:4.2.13~netcdf')
# TODO: '@:4.2.14 ~external-xdr' and the fact that we compile for 64 bit
# architecture should be in conflict
@property
def libs(self):
"""HDF can be queried for the following parameters:
- "shared": shared libraries (default if '+shared')
- "static": static libraries (default if '~shared')
- "transitive": append transitive dependencies to the list of static
libraries (the argument is ignored if shared libraries are
requested)
:return: list of matching libraries
"""
libraries = ['libmfhdf', 'libdf']
query_parameters = self.spec.last_query.extra_parameters
if 'shared' in query_parameters:
shared = True
elif 'static' in query_parameters:
shared = False
else:
shared = '+shared' in self.spec
libs = find_libraries(
libraries, root=self.prefix, shared=shared, recursive=True
)
if not libs:
msg = 'Unable to recursively locate {0} {1} libraries in {2}'
raise spack.error.NoLibrariesError(
msg.format('shared' if shared else 'static',
self.spec.name,
self.spec.prefix))
if not shared and 'transitive' in query_parameters:
libs += self.spec['jpeg:transitive'].libs
libs += self.spec['zlib:transitive'].libs
if '+szip' in self.spec:
libs += self.spec['szip:transitive'].libs
if ('+external-xdr' in self.spec and
self.spec['rpc'].name != 'libc'):
libs += self.spec['rpc:transitive'].libs
return libs
def flag_handler(self, name, flags):
if '+pic' in self.spec:
if name == 'cflags':
flags.append(self.compiler.cc_pic_flag)
elif name == 'fflags':
flags.append(self.compiler.f77_pic_flag)
return flags, None, None
def configure_args(self):
config_args = ['--enable-production',
'--enable-static',
'--with-zlib=%s' % self.spec['zlib'].prefix,
'--with-jpeg=%s' % self.spec['jpeg'].prefix]
config_args += self.enable_or_disable('shared')
config_args += self.enable_or_disable('netcdf')
config_args += self.enable_or_disable('fortran')
config_args += self.enable_or_disable('java')
if '+szip' in self.spec:
config_args.append('--with-szlib=%s' % self.spec['szip'].prefix)
else:
config_args.append('--without-szlib')
if '~external-xdr' in self.spec:
config_args.append('--enable-hdf4-xdr')
elif self.spec['rpc'].name != 'libc':
# We should not specify '--disable-hdf4-xdr' due to a bug in the
# configure script.
config_args.append('LIBS=%s' % self.spec['rpc'].libs.link_flags)
return config_args
# Otherwise, we randomly get:
# SDgetfilename:
# incorrect file being opened - expected <file755>, retrieved <file754>
def check(self):
with working_dir(self.build_directory):
make('check', parallel=False)
|
rspavel/spack
|
var/spack/repos/builtin/packages/hdf/package.py
|
Python
|
lgpl-2.1
| 6,068
|
[
"NetCDF"
] |
89c8febe3379de1c7dc055f9e71ce5fa5fda673b94e67bc29303160a150615bc
|
# -*- coding: utf-8 -*-
def VtkDibujaPropCells(nmbFilter):
# Dibuja las etiquetas de las líneas.
cc= vtk.vtkCellCenters()
cc.SetInput(nmbFilter) # Centroides de las celdas.
visCells= vtk.vtkSelectVisiblePoints()
visCells.SetInput(cc)
visCells.SetRenderer(renderer)
visCells.SelectionWindowOff()
#Create the mapper to display the cell ids. Specify the format to
# use for the labels. Also create the associated actor.
cellMapper= vtk.vtkLabeledShStrMapper()
cellMapper.SetInput(visCells)
ltp= cellMapper.LabelTextProperty()
ltp.SetColor(0,0,0.9)
ltp.BoldOff()
ltp.ShadowOff()
ltp.ItalicOff()
ltp.SetJutificationToCentered()
ltp.SetFontSize(10)
cellLabels= vtk.vtkActor2D()
cellLabels.SetMapper(cellMapper)
renderer.AddActor2D(cellLabels)
|
lcpt/xc
|
python_modules/postprocess/xcVtk/vtk_dibuja_prop_cells.py
|
Python
|
gpl-3.0
| 806
|
[
"VTK"
] |
7fc80f05a91982deac1b1b9c8b5abd6277888f0641d709f3e57207a7d8347b2c
|
from __future__ import print_function
import os
import datetime
import boto
from boto.s3.key import Key
from jinja2 import Environment
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
BUCKET_NAME = 'mdtraj-deps-wheelhouse'
ROOT = 'wheelhouse'
def main():
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-' + BUCKET_NAME
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.create_bucket(BUCKET_NAME)
for dirpath, dirnames, filenames in os.walk(ROOT):
for filename in filenames:
fn = os.path.join(dirpath, filename)
print('Uploading', fn, '...')
k = Key(bucket)
k.key = os.path.relpath(fn, ROOT)
k.set_contents_from_filename(fn)
# Put up an index page
k = Key(bucket)
k.key = os.path.relpath(os.path.join(dirpath, 'index.html'), ROOT)
k.metadata={'Content-Type': 'text/html'}
k.set_contents_from_string(index_page(dirpath, filenames))
def index_page(dirpath, filenames):
# Standard index page, copied from apache
html = '''
<html>
<head>
<title>Index of {{ dirpath }} </title>
</head>
<body>
<h1>Index of {{ dirpath }} </h1>
<table><tr><th><img src="/icons/blank.gif" alt="[ICO]"></th><th><a href="?C=N;O=D">Name</a></th><th><a href="?C=M;O=A">Last modified</a></th><th><a href="?C=S;O=A">Size</a></th><th><a href="?C=D;O=A">Description</a></th></tr><tr><th colspan="5"><hr></th></tr>
{% for fn in filenames %}
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="{{ fn }}">{{ fn }} </a></td><td align="right"> {{ ts }} </td><td align=" right"> </td><td> </td></tr>
{% endfor %}
<tr><th colspan="5"><hr></th></tr>
</table>
</body></html>'''
return Environment().from_string(html).render(dirpath=dirpath, filenames=filenames, ts=str(datetime.datetime.now()))
if __name__ == '__main__':
main()
|
marscher/mdtraj
|
tools/ci/speedpack/push-wheels-to-s3.py
|
Python
|
lgpl-2.1
| 2,173
|
[
"MDTraj"
] |
7aa42bdb5831723926358f16a6e6059040412084366cc4025ef4ba1e2760592b
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through k of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j),j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through n of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to bdtr vs. p
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to bdtr vs k
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to bdtr vs n
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of btdtr vs a
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of btdtr vs b
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function bei
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function ber
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighed integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_v(\nu, 2 a x) \, dx
where :math:`J_v` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a,b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to x::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute x such that betainc(a,b,x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(x)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
.. versionadded:: 0.14.0
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Examples
--------
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
.. versionadded:: 0.14.0
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Examples
--------
>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a,b,x)
Cumulative beta distribution.
Returns the area from zero to x under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a,b,p)
p-th quantile of the beta distribution.
This is effectively the inverse of btdtr returning the value of x for which
``btdtr(a,b,x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of x
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to x) of the Chi
square probability density function with v degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v,x)
Chi square survival function
Returns the area under the right hand tail (from x to
infinity) of the Chi square probability density function with v
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v,p)
Inverse to chdtrc
Returns the argument x such that ``chdtrc(v,x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to chdtr vs v
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to chndtr vs x
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to chndtr vs df
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to chndtr vs nc
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle x given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle x given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2),t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
::
integral(sqrt(1-m*sin(t)**2),t=0..pi/2)
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi,m)
Incomplete elliptic integral of the second kind
::
integral(sqrt(1-m*sin(t)**2),t=0..phi)
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter m between
0 and 1, and real u.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
The complete elliptic integral of the first kind around m=1.
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as m = 1 - p.
Returns
-------
K : array_like
Value of the elliptic integral.
See Also
--------
ellipk
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
::
integral(1/sqrt(1-m*sin(t)**2),t=0..phi)
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points x.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t,t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t,t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
.. versionadded:: 0.10.0
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer n and non-negative x and n::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to x under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to fdtr vs x
Finds the F density argument x such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to fdtr vs dfd
Finds the F density argument dfd such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to fdtr vs dfn
finds the F density argument dfn such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2),t=0..z)
csa = integral(cos(pi/2 * t**2),t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a,x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a,x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a,y)
Inverse to gammaincc
Returns `x` such that ``gammaincc(a,x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to gammainc
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Logarithm of absolute value of gamma function
Defined as::
ln(abs(gamma(z)))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a,b,x)
Gamma distribution cumulative density function.
Returns the integral from zero to x of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).
The arguments a and b are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a,b,x)
Gamma distribution survival function.
Integral from x to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of gdtr vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of gdtr vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of gdtr vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t,t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functios
Calculates the integral of Airy functions from 0 to x
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order modified
Bessel functions i0 and k0.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order Bessel
functions j0 and y0.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v,z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If z is of real type and negative, v must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v,z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v,z) = iv(v,z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order n
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order v
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order v
Defined as::
jve(v,z) = jv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at x. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order n
These are also sometimes called functions of the third kind.
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
"""
kv(v,z)
Modified Bessel function of the second kind of real order v
Returns the modified Bessel function of the second kind (sometimes
called the third kind) for real order v at complex z.
""")
add_newdoc("scipy.special", "kve",
"""
kve(v,z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order v at
complex z::
kve(v,z) = kv(v,z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when x is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
.. versionadded:: 0.10.0
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : real
Degree. Must be ``v>-m-1`` or ``v<m``
x : complex
Argument. Must be ``|x| <= 1``.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m,q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m,q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m,q,x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x,q)``, of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of ce_m(x,q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x,q)``, and its derivative at `x` for order m and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m,q,x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x,q), of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of se_m(x,q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t),t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t),t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order v at x, x must
be positive unless v is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through k of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j,j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k,n,p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of nbdtr vs p
Finds the argument p such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y,n,p)
Inverse of nbdtr vs k
Finds the argument k such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k,y,p)
Inverse of nbdtr vs n
Finds the argument n such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
""")
add_newdoc("scipy.special", "ncfdtri",
"""
""")
add_newdoc("scipy.special", "ncfdtrifn",
"""
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
""")
add_newdoc("scipy.special", "nctdtr",
"""
""")
add_newdoc("scipy.special", "nctdtridf",
"""
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
""")
add_newdoc("scipy.special", "nctdtrit",
"""
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
1/sqrt(2*pi) * integral(exp(-t**2 / 2),t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of ndtr vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to x)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate sheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate sheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate sheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m,n,c,x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate sheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m,n,c,cv,x)
Oblate sheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate sheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m,n,c,x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate sheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m,n,c,cv,x)
Oblate sheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate sheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d,dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v,x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a,x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a,x) in w and the
derivative, W'(a,x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first k terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k,y)
Inverse to pdtr vs m
Returns the Poisson variable m such that the sum from 0 to k of
the Poisson density is equal to the given probability y:
calculated by gammaincinv(k+1, y). k must be a nonnegative
integer and y between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p,m)
Inverse to pdtr vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m,n,c,x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate sheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m,n,c,cv,x)
Prolate sheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate sheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m,n,c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m,n,c,x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate sheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m,n,c,cv,x)
Prolate sheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate sheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m,n,c,x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate sheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and |x|<1.0.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m,n,c,cv,x)
Prolate sheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate sheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
z (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to x as a double precision floating
point result. If x ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t,t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t,t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t,t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t,t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n,e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on n samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n,y)
Inverse to smirnov
Returns ``e`` such that ``smirnov(n,e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1),t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df,t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p,t)
Inverse of stdtr vs df
Returns the argument df such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df,p)
Inverse of stdtr vs t
Returns the argument t such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v,x)
Struve function
Computes the struve function Hv(x) of order v at x, x must be
positive unless v is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if `x = 0`.
.. versionadded:: 0.13.0
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if `x = 0`.
.. versionadded:: 0.13.0
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at x.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at x.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n,x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order n
at x.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v,z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order v at
complex z.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v,z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order v at complex z::
yve(v,z) = yv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta funtion).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using Bessel function series
Returns
-------
v, err
""")
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/scipy/special/add_newdocs.py
|
Python
|
agpl-3.0
| 56,683
|
[
"Gaussian"
] |
a300b7d17494fa97ab43a685c6cd1b355c0de6911c96fd2dca283acbab8c3bf4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# oiddiscover - discover valid openid relying party endpoints for this realm
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Discovery of valid OpenID relying party endpoints for this realm.
The OpenID protocol specifies an optional verification mechanism for the
OpenID provider to verify that an allow request came from a valid relying party
endpoint.
http://openid.net/specs/openid-authentication-2_0.html#rp_discovery
We extract the OpenID setting and reply with a suitable YADIS XRDS document
here if OpenID is enabled.
"""
import os
import tempfile
import shared.returnvalues as returnvalues
from shared.functional import validate_input
from shared.init import initialize_main_variables
def signature():
"""Signature of the main function"""
defaults = {}
return ['file', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False, op_menu=False)
logger = configuration.logger
logger.info('oiddiscover: %s' % user_arguments_dict)
output_objects.append({'object_type': 'header', 'text'
: 'OpenID Discovery for %s' % \
configuration.short_title})
defaults = signature()[1]
(validate_status, accepted) = validate_input(user_arguments_dict,
defaults, output_objects, allow_rejects=False)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
# Force to raw file output unless something else is explicitly requested
raw_output = False
if os.environ['QUERY_STRING'].find('output_format') == -1:
raw_output = True
user_arguments_dict['output_format'] = ['file']
discovery_doc = '''<?xml version="1.0" encoding="UTF-8"?>
<xrds:XRDS
xmlns:xrds="xri://$xrds"
xmlns:openid="http://openid.net/xmlns/1.0"
xmlns="xri://$xrd*($v*2.0)">
<XRD>
<Service priority="1">
<Type>http://specs.openid.net/auth/2.0/return_to</Type>
%s
</Service>
</XRD>
</xrds:XRDS>
'''
if configuration.site_enable_openid:
# TMP! add own openid server realm as well
sid_url = configuration.migserver_https_sid_url
oid_url = configuration.migserver_https_oid_url
helper_urls = {
'migoid_entry_url': os.path.join(sid_url),
'migoid_signup_url': os.path.join(sid_url, 'cgi-sid', 'signup.py'),
'migoid_login_url': os.path.join(sid_url, 'cgi-sid', 'login.py'),
'migoid_create_url': os.path.join(sid_url, 'wsgi-bin',
'autocreate.py'),
'migoid_dash_url': os.path.join(sid_url, 'wsgi-bin',
'dashboard.py'),
'migoid_files_url': os.path.join(sid_url, 'wsgi-bin',
'fileman.py'),
'kitoid_entry_url': os.path.join(oid_url),
'kitoid_signup_url': os.path.join(oid_url, 'cgi-sid', 'signup.py'),
'kitoid_login_url': os.path.join(oid_url, 'cgi-sid', 'login.py'),
'kitoid_create_url': os.path.join(oid_url, 'cgi-sid',
'autocreate.py'),
'kitoid_dash_url': os.path.join(oid_url, 'wsgi-bin',
'dashboard.py'),
'kitoid_files_url': os.path.join(oid_url, 'wsgi-bin',
'fileman.py')}
discovery_uris = '''<URI>%(kitoid_entry_url)s</URI>
<URI>%(kitoid_signup_url)s</URI>
<URI>%(kitoid_login_url)s</URI>
<URI>%(kitoid_create_url)s</URI>
<URI>%(kitoid_dash_url)s</URI>
<URI>%(kitoid_files_url)s</URI>
<URI>%(migoid_entry_url)s</URI>
<URI>%(migoid_signup_url)s</URI>
<URI>%(migoid_login_url)s</URI>
<URI>%(migoid_create_url)s</URI>
<URI>%(migoid_dash_url)s</URI>
<URI>%(migoid_files_url)s</URI>
''' % helper_urls
else:
discovery_uris = ''
output_objects.append({'object_type': 'text', 'text':
'Advertising valid OpenID endpoints:'})
discovery_doc = discovery_doc % discovery_uris
if raw_output:
headers = [('Content-Type', 'application/xrds+xml'),
('Content-Disposition', 'attachment; filename=oid.xrds'),
('Content-Length', '%s' % len(discovery_doc))]
output_objects = [{'object_type': 'start', 'headers': headers}]
output_objects.append({'object_type': 'binary', 'data': discovery_doc})
return (output_objects, returnvalues.OK)
else:
# output discovery_doc as raw xrds doc in any case
output_objects.append({'object_type': 'file_output', 'lines':
[discovery_doc]})
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/oiddiscover.py
|
Python
|
gpl-2.0
| 5,822
|
[
"Brian"
] |
f8e946b966be791abe4805718e6ebbee4939d600facb47881c3eab99053e5eac
|
import os
import time
import yaml
import datetime
import linecache
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pymatgen import MPRester
from pymatgen.io.cif import CifWriter
from diffpy.Structure import loadStructure
from diffpy.srreal.structureadapter import nosymmetry
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
from diffpy.srreal.pdfcalculator import PDFCalculator
#from pdf_lib.glbl import glbl
#from glbl import glbl
class PdfLibBuild(object):
''' a class to look up cif data and calculate pdf automatically
Parameters:
-----------
API_key : str
uer-id-like generated from material project
crystal_system : str
name of crystal system. Capitalized, like 'CUBIC'
lib_dir : str
directory where you wants to store cif and pdf_data
'''
def __init__(self, API_key, lib_dir = None):
# set up API_key
self.API_key = API_key
test_m = MPRester(API_key)
print('You are using %s as API key' % API_key)
# create working dir
if not lib_dir:
lib_dir = time.strftime('PDF_Lib_%Y-%m-%d')
working_dir = os.path.expanduser('~/' + lib_dir)
self.working_dir = working_dir
self._makedirs(working_dir)
print('Lib dir %s has been built' % working_dir)
# output lib directory
self.data_dir = None
self.cif_dir = None # overwrite it later
self.output_dir = None # overwrite it later
self.crystal_system = None # overwrite it later
def get_symbol_list(self, crystal_system):
''' Get short names in one crystal system
'''
ref = "Space_group_ref.txt"
ind_list = []
ref_path = os.path.join(os.path.dirname(__file__), ref) # relative path
if os.path.isfile(ref_path):
print('open {} as reference'.format(ref_path))
# FIXME - temporily logic. Refine it later with loding Space_group_ref.txt
with open(ref_path, 'r') as f:
for ind, line in enumerate(f):
if crystal_system in line:
ind_list.append(ind-2)
# print(ind-2) debug line
symb_list = []
for ind in ind_list:
read = linecache.getline(ref_path, ind)
position_ind = [ ind for ind, x in enumerate(read) if x== '"']
#print('position_ind = {}'.format(position_ind)) debug line
if position_ind:
head = min(position_ind)
tail = max(position_ind)
symb = read[head+1:tail]
#print(symb) #degug line
symb_list.append(symb)
else:
pass
return symb_list
def _makedirs(self, path_name):
'''function to support python2 stupid logic'''
if os.path.isdir(path_name):
pass
else:
os.makedirs(path_name)
def cif_lib_build(self, crystal_system, size_limit=None):
''' function to build cif and pdf library based on space group symbol
Parameters
----------
crystal_system: str
name of crystal system. It capitalized, like CUBIC.
space group symbol will be generated by get_symbol_list method
size_list : int
optional. Uppder limit of data pulled out per symbol
'''
self.crystal_system = crystal_system
space_group_symbol = self.get_symbol_list(crystal_system)
if isinstance(space_group_symbol, list):
space_group_symbol_set = space_group_symbol
else:
space_group_symbol_set = list(spac_group_symbol)
## changing dir
data_dir = os.path.join(self.working_dir, crystal_system)
self.data_dir = data_dir
self._makedirs(data_dir)
os.chdir(data_dir)
if os.getcwd() == data_dir:
print('Library will be built at %s' % data_dir)
else:
e = 'Werid, return'
raise RuntimeError(e)
# summary lists
missed_list = [] # reference
m_id_list = [] # reference for searchs have been done in the past
time_info = time.strftime('%Y-%m-%d')
# create dirs, cif and calculated dir
cif_dir = os.path.join(data_dir, 'cif_data')
self._makedirs(cif_dir)
self.cif_dir = cif_dir
# looping
for space_group_symbol in space_group_symbol_set:
print('Building library with space_group symbol: {}'.format(space_group_symbol))
## search query
m = MPRester(self.API_key)
search = m.query(criteria = {"spacegroup.symbol": space_group_symbol},
properties = ["material_id"])
if search:
## crazy looping
if size_limit:
dim = 400 # 400 data sets per symbol
else:
dim = len(search)
print('Pull out %s data sets' % dim)
print('Now, starts to save cif and compute pdf...')
for i in range(dim):
# part 1: grab cif files from data base
m_id = search[i]['material_id']
m_id_list.append(m_id)
m_struc = m.get_structure_by_material_id(m_id)
m_formula = m_struc.formula
m_name = m_formula.replace(' ', '') # material name
cif_w = CifWriter(m_struc)
cif_name = '{}_{}.cif'.format(space_group_symbol, m_name)
cif_w_name = os.path.join(cif_dir, cif_name)
if os.path.isfile(cif_w_name):
print('already have {}, skip'.format(cif_name))
pass # skip files already exist
else:
cif_w.write_file(cif_w_name)
print('{} has been saved'.format(cif_name))
else:
print('Hmm, no reasult. Something wrong')
missed_list.append(space_group_symbol)
pass
m_id_list_name = '{}_{}_material_id.txt'.format(crystal_system, time_info)
m_id_list_w_name = os.path.join(data_dir, m_id_list_name)
np.savetxt(m_id_list_w_name, m_id_list)
print('''SUMMARY: for {} cystsal sytem,
Symbols {} can't be found from data base'''.format(crystal_system, missed_list))
return cif_dir
def gr_lib_build(self, cif_lib_path):
''' method to calculate G(r) based on path of cif library located at.
Paramters of G(r) calculation are set via glbl.<attribute>, one can tune it based on purpose of building library.
After entire method, text file contains all G(r), space_group_symbol and material name will be saved respectively
Parameters
----------
cif_lib_path : str
path to lib of cif files
'''
el_list = [] # data column
space_group_symbol_list = [] # reference for search have been done in the past
# set up calculation environment
#dbc = DebyePDFCalculator()
pdf = PDFCalculator()
pdf.rstep = glbl.rstep
cfg = {'qmin': glbl.q_min, 'qmax':glbl.q_max, 'rmin':glbl.r_min, 'rmax': glbl.r_max}
Bisoequiv = glbl.Bisoequiv #FIXME: current value = 0.5, need to figure out the most suitable value
print('====Parameter used in this PDF calculator is: {}===='.format(cfg))
print('====Bisoequiv used in this PDF calculator is: {}===='.format(Bisoequiv))
# step 1: list cif dir
output_dir = os.path.join(self.data_dir, 'lib_data')
self._makedirs(output_dir)
self.output_dir = output_dir
cif_f_list = [ f for f in os.listdir(self.cif_dir)]
# hidden step as numpy can't take an empty array and stack
struc = loadStructure(os.path.join(self.cif_dir, cif_f_list[0]))
struc.Bisoequiv = Bisoequiv
(r,g) = pdf(nosymmetry(struc), **cfg)
r_grid = np.array(r) # data x-axis
gr_list = np.empty_like(np.array(g)) # data y-axis
for cif in cif_f_list:
# part 2: calculate PDF with diffpy
struc = loadStructure(os.path.join(self.cif_dir, cif))
struc.Bisoequiv = Bisoequiv
#(r,g) = pdf(nosymmetry(struc), **cfg)
(r,g) = pdf(struc, **cfg)
print('Finished calculation of G(r) on {}'.format(cif))
sep = cif.index('_')
space_group_symbol = cif[:sep]
m_name = cif[sep+1:]
# part 3: save data
#if not gr_list.any():
#gr_list = np.append(gr_list, g)
gr_list = np.vstack((gr_list,g))
space_group_symbol_list.append(space_group_symbol)
el_list.append(m_name)
#print('size of gr_list = {}'.format(np.shape(gr_list)))
#space_group_symbol_list = np.concatenate([space_group_symbol_list, space_group_symbol], axis=0)
#el_list = np.concatenate([el_list, m_name], axis=0)
time_info = time.strftime('%Y-%m-%d')
gr_list_name = '{}_{}_Gr'.format(self.crystal_system, time_info)
gr_list_w_name = os.path.join(output_dir, gr_list_name)
print('Saving {}'.format(gr_list_w_name))
np.save(gr_list_w_name, gr_list)
r_grid_name = '{}_{}_rgrid'.format(self.crystal_system, time_info)
r_grid_w_name = os.path.join(output_dir, r_grid_name)
np.save(r_grid_w_name, r)
space_group_symbol_list_name = '{}_{}_SpaceGroupSymbol'.format(self.crystal_system, time_info)
space_group_symbol_list_w_name= os.path.join(output_dir, space_group_symbol_list_name)
np.save(space_group_symbol_list_w_name, space_group_symbol_list) #fmt="%s")
el_list_name = '{}_{}_Element'.format(self.crystal_system, time_info)
el_list_w_name = os.path.join(output_dir, el_list_name)
np.save(el_list_w_name, el_list) #fmt="%s")
print('''====SUMMARY====:
for {} cystsal sytem,
Number of cif pulled out and G(r) calculated is {}'''.format(self.crystal_system, np.shape(gr_list)))
return gr_list
|
chiahaoliu/pdf_lib
|
pdf_lib/pdf_lib.py
|
Python
|
mit
| 10,483
|
[
"CRYSTAL",
"pymatgen"
] |
68964a97dd79f0a7abfbd4bb62dbf2c9c0cc9c9b81c787bddb4f8fb3a5a07ccc
|
"""
simple, elegant templating
(part of web.py)
Template design:
Template string is split into tokens and the tokens are combined into nodes.
Parse tree is a nodelist. TextNode and ExpressionNode are simple nodes and
for-loop, if-loop etc are block nodes, which contain multiple child nodes.
Each node can emit some python string. python string emitted by the
root node is validated for safeeval and executed using python in the given environment.
Enough care is taken to make sure the generated code and the template has line to line match,
so that the error messages can point to exact line number in template. (It doesn't work in some cases still.)
Grammar:
template -> defwith sections
defwith -> '$def with (' arguments ')' | ''
sections -> section*
section -> block | assignment | line
assignment -> '$ ' <assignment expression>
line -> (text|expr)*
text -> <any characters other than $>
expr -> '$' pyexpr | '$(' pyexpr ')' | '${' pyexpr '}'
pyexpr -> <python expression>
"""
__all__ = [
"Template",
"Render", "render", "frender",
"ParseError", "SecurityError",
"test"
]
import tokenize
import os
import glob
import re
from utils import storage, safeunicode, safestr, re_compile
from webapi import config
from net import websafe
def splitline(text):
r"""
Splits the given text at newline.
>>> splitline('foo\nbar')
('foo\n', 'bar')
>>> splitline('foo')
('foo', '')
>>> splitline('')
('', '')
"""
index = text.find('\n') + 1
if index:
return text[:index], text[index:]
else:
return text, ''
class Parser:
"""Parser Base.
"""
def __init__(self, text, name="<template>"):
self.text = text
self.name = name
def parse(self):
text = self.text
defwith, text = self.read_defwith(text)
suite = self.read_suite(text)
return DefwithNode(defwith, suite)
def read_defwith(self, text):
if text.startswith('$def with'):
defwith, text = splitline(text)
defwith = defwith[1:].strip() # strip $ and spaces
return defwith, text
else:
return '', text
def read_section(self, text):
r"""Reads one section from the given text.
section -> block | assignment | line
>>> read_section = Parser('').read_section
>>> read_section('foo\nbar\n')
(<line: [t'foo\n']>, 'bar\n')
>>> read_section('$ a = b + 1\nfoo\n')
(<assignment: 'a = b + 1'>, 'foo\n')
read_section('$for in range(10):\n hello $i\nfoo)
"""
if text.lstrip(' ').startswith('$'):
index = text.index('$')
begin_indent, text2 = text[:index], text[index+1:]
ahead = self.python_lookahead(text2)
if ahead == 'var':
return self.read_var(text2)
elif ahead in STATEMENT_NODES:
return self.read_block_section(text2, begin_indent)
elif ahead in KEYWORDS:
return self.read_keyword(text2)
elif ahead.strip() == '':
# assignments starts with a space after $
# ex: $ a = b + 2
return self.read_assignment(text2)
return self.readline(text)
def read_var(self, text):
r"""Reads a var statement.
>>> read_var = Parser('').read_var
>>> read_var('var x=10\nfoo')
(<var: x = 10>, 'foo')
>>> read_var('var x: hello $name\nfoo')
(<var: x = join_('hello ', escape_(name, True))>, 'foo')
"""
line, text = splitline(text)
tokens = self.python_tokens(line)
if len(tokens) < 4:
raise SyntaxError('Invalid var statement')
name = tokens[1]
sep = tokens[2]
value = line.split(sep, 1)[1].strip()
if sep == '=':
pass # no need to process value
elif sep == ':':
#@@ Hack for backward-compatability
if tokens[3] == '\n': # multi-line var statement
block, text = self.read_indented_block(text, ' ')
lines = [self.readline(x)[0] for x in block.splitlines()]
nodes = []
for x in lines:
nodes.extend(x.nodes)
nodes.append(TextNode('\n'))
else: # single-line var statement
linenode, _ = self.readline(value)
nodes = linenode.nodes
parts = [node.emit('') for node in nodes]
value = "join_(%s)" % ", ".join(parts)
else:
raise SyntaxError('Invalid var statement')
return VarNode(name, value), text
def read_suite(self, text):
r"""Reads section by section till end of text.
>>> read_suite = Parser('').read_suite
>>> read_suite('hello $name\nfoo\n')
[<line: [t'hello ', $name, t'\n']>, <line: [t'foo\n']>]
"""
sections = []
while text:
section, text = self.read_section(text)
sections.append(section)
return SuiteNode(sections)
def readline(self, text):
r"""Reads one line from the text. Newline is supressed if the line ends with \.
>>> readline = Parser('').readline
>>> readline('hello $name!\nbye!')
(<line: [t'hello ', $name, t'!\n']>, 'bye!')
>>> readline('hello $name!\\\nbye!')
(<line: [t'hello ', $name, t'!']>, 'bye!')
>>> readline('$f()\n\n')
(<line: [$f(), t'\n']>, '\n')
"""
line, text = splitline(text)
# supress new line if line ends with \
if line.endswith('\\\n'):
line = line[:-2]
nodes = []
while line:
node, line = self.read_node(line)
nodes.append(node)
return LineNode(nodes), text
def read_node(self, text):
r"""Reads a node from the given text and returns the node and remaining text.
>>> read_node = Parser('').read_node
>>> read_node('hello $name')
(t'hello ', '$name')
>>> read_node('$name')
($name, '')
"""
if text.startswith('$$'):
return TextNode('$'), text[2:]
elif text.startswith('$#'): # comment
line, text = splitline(text)
return TextNode('\n'), text
elif text.startswith('$'):
text = text[1:] # strip $
if text.startswith(':'):
escape = False
text = text[1:] # strip :
else:
escape = True
return self.read_expr(text, escape=escape)
else:
return self.read_text(text)
def read_text(self, text):
r"""Reads a text node from the given text.
>>> read_text = Parser('').read_text
>>> read_text('hello $name')
(t'hello ', '$name')
"""
index = text.find('$')
if index < 0:
return TextNode(text), ''
else:
return TextNode(text[:index]), text[index:]
def read_keyword(self, text):
line, text = splitline(text)
return CodeNode(None, line.strip() + "\n"), text
def read_expr(self, text, escape=True):
"""Reads a python expression from the text and returns the expression and remaining text.
expr -> simple_expr | paren_expr
simple_expr -> id extended_expr
extended_expr -> attr_access | paren_expr extended_expr | ''
attr_access -> dot id extended_expr
paren_expr -> [ tokens ] | ( tokens ) | { tokens }
>>> read_expr = Parser('').read_expr
>>> read_expr("name")
($name, '')
>>> read_expr("a.b and c")
($a.b, ' and c')
>>> read_expr("a. b")
($a, '. b')
>>> read_expr("name</h1>")
($name, '</h1>')
>>> read_expr("(limit)ing")
($(limit), 'ing')
>>> read_expr('a[1, 2][:3].f(1+2, "weird string[).", 3 + 4) done.')
($a[1, 2][:3].f(1+2, "weird string[).", 3 + 4), ' done.')
"""
def simple_expr():
identifier()
extended_expr()
def identifier():
tokens.next()
def extended_expr():
lookahead = tokens.lookahead()
if lookahead is None:
return
elif lookahead.value == '.':
attr_access()
elif lookahead.value in parens:
paren_expr()
extended_expr()
else:
return
def attr_access():
from token import NAME # python token constants
dot = tokens.lookahead()
if tokens.lookahead2().type == NAME:
tokens.next() # consume dot
identifier()
extended_expr()
def paren_expr():
begin = tokens.next().value
end = parens[begin]
while True:
if tokens.lookahead().value in parens:
paren_expr()
else:
t = tokens.next()
if t.value == end:
break
return
parens = {
"(": ")",
"[": "]",
"{": "}"
}
def get_tokens(text):
"""tokenize text using python tokenizer.
Python tokenizer ignores spaces, but they might be important in some cases.
This function introduces dummy space tokens when it identifies any ignored space.
Each token is a storage object containing type, value, begin and end.
"""
readline = iter([text]).next
end = None
for t in tokenize.generate_tokens(readline):
t = storage(type=t[0], value=t[1], begin=t[2], end=t[3])
if end is not None and end != t.begin:
_, x1 = end
_, x2 = t.begin
yield storage(type=-1, value=text[x1:x2], begin=end, end=t.begin)
end = t.end
yield t
class BetterIter:
"""Iterator like object with 2 support for 2 look aheads."""
def __init__(self, items):
self.iteritems = iter(items)
self.items = []
self.position = 0
self.current_item = None
def lookahead(self):
if len(self.items) <= self.position:
self.items.append(self._next())
return self.items[self.position]
def _next(self):
try:
return self.iteritems.next()
except StopIteration:
return None
def lookahead2(self):
if len(self.items) <= self.position+1:
self.items.append(self._next())
return self.items[self.position+1]
def next(self):
self.current_item = self.lookahead()
self.position += 1
return self.current_item
tokens = BetterIter(get_tokens(text))
if tokens.lookahead().value in parens:
paren_expr()
else:
simple_expr()
row, col = tokens.current_item.end
return ExpressionNode(text[:col], escape=escape), text[col:]
def read_assignment(self, text):
r"""Reads assignment statement from text.
>>> read_assignment = Parser('').read_assignment
>>> read_assignment('a = b + 1\nfoo')
(<assignment: 'a = b + 1'>, 'foo')
"""
line, text = splitline(text)
return AssignmentNode(line.strip()), text
def python_lookahead(self, text):
"""Returns the first python token from the given text.
>>> python_lookahead = Parser('').python_lookahead
>>> python_lookahead('for i in range(10):')
'for'
>>> python_lookahead('else:')
'else'
>>> python_lookahead(' x = 1')
' '
"""
readline = iter([text]).next
tokens = tokenize.generate_tokens(readline)
return tokens.next()[1]
def python_tokens(self, text):
readline = iter([text]).next
tokens = tokenize.generate_tokens(readline)
return [t[1] for t in tokens]
def read_indented_block(self, text, indent):
r"""Read a block of text. A block is what typically follows a for or it statement.
It can be in the same line as that of the statement or an indented block.
>>> read_indented_block = Parser('').read_indented_block
>>> read_indented_block(' a\n b\nc', ' ')
('a\nb\n', 'c')
>>> read_indented_block(' a\n b\n c\nd', ' ')
('a\n b\nc\n', 'd')
>>> read_indented_block(' a\n\n b\nc', ' ')
('a\n\n b\n', 'c')
"""
if indent == '':
return '', text
block = ""
while text:
line, text2 = splitline(text)
if line.strip() == "":
block += '\n'
elif line.startswith(indent):
block += line[len(indent):]
else:
break
text = text2
return block, text
def read_statement(self, text):
r"""Reads a python statement.
>>> read_statement = Parser('').read_statement
>>> read_statement('for i in range(10): hello $name')
('for i in range(10):', ' hello $name')
"""
tok = PythonTokenizer(text)
tok.consume_till(':')
return text[:tok.index], text[tok.index:]
def read_block_section(self, text, begin_indent=''):
r"""
>>> read_block_section = Parser('').read_block_section
>>> read_block_section('for i in range(10): hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
>>> read_block_section('for i in range(10):\n hello $i\n foo', begin_indent=' ')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, ' foo')
>>> read_block_section('for i in range(10):\n hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
"""
line, text = splitline(text)
stmt, line = self.read_statement(line)
keyword = self.python_lookahead(stmt)
# if there is some thing left in the line
if line.strip():
block = line.lstrip()
else:
def find_indent(text):
rx = re_compile(' +')
match = rx.match(text)
first_indent = match and match.group(0)
return first_indent or ""
# find the indentation of the block by looking at the first line
first_indent = find_indent(text)[len(begin_indent):]
indent = begin_indent + min(first_indent, INDENT)
block, text = self.read_indented_block(text, indent)
return self.create_block_node(keyword, stmt, block, begin_indent), text
def create_block_node(self, keyword, stmt, block, begin_indent):
if keyword in STATEMENT_NODES:
return STATEMENT_NODES[keyword](stmt, block, begin_indent)
else:
raise ParseError, 'Unknown statement: %s' % repr(keyword)
class PythonTokenizer:
"""Utility wrapper over python tokenizer."""
def __init__(self, text):
self.text = text
readline = iter([text]).next
self.tokens = tokenize.generate_tokens(readline)
self.index = 0
def consume_till(self, delim):
"""Consumes tokens till colon.
>>> tok = PythonTokenizer('for i in range(10): hello $i')
>>> tok.consume_till(':')
>>> tok.text[:tok.index]
'for i in range(10):'
>>> tok.text[tok.index:]
' hello $i'
"""
try:
while True:
t = self.next()
if t.value == delim:
break
elif t.value == '(':
self.consume_till(')')
elif t.value == '[':
self.consume_till(']')
elif t.value == '{':
self.consume_till('}')
# if end of line is found, it is an exception.
# Since there is no easy way to report the line number,
# leave the error reporting to the python parser later
#@@ This should be fixed.
if t.value == '\n':
break
except:
#raise ParseError, "Expected %s, found end of line." % repr(delim)
# raising ParseError doesn't show the line number.
# if this error is ignored, then it will be caught when compiling the python code.
return
def next(self):
type, t, begin, end, line = self.tokens.next()
row, col = end
self.index = col
return storage(type=type, value=t, begin=begin, end=end)
class DefwithNode:
def __init__(self, defwith, suite):
if defwith:
self.defwith = defwith.replace('with', '__template__') + ':'
else:
self.defwith = 'def __template__():'
self.suite = suite
def emit(self, indent):
return self.defwith + self.suite.emit(indent + INDENT)
def __repr__(self):
return "<defwith: %s, %s>" % (self.defwith, self.nodes)
class TextNode:
def __init__(self, value):
self.value = value
def emit(self, indent):
return repr(self.value)
def __repr__(self):
return 't' + repr(self.value)
class ExpressionNode:
def __init__(self, value, escape=True):
self.value = value.strip()
# convert ${...} to $(...)
if value.startswith('{') and value.endswith('}'):
self.value = '(' + self.value[1:-1] + ')'
self.escape = escape
def emit(self, indent):
return 'escape_(%s, %s)' % (self.value, bool(self.escape))
def __repr__(self):
if self.escape:
escape = ''
else:
escape = ':'
return "$%s%s" % (escape, self.value)
class AssignmentNode:
def __init__(self, code):
self.code = code
def emit(self, indent, begin_indent=''):
return indent + self.code + "\n"
def __repr__(self):
return "<assignment: %s>" % repr(self.code)
class LineNode:
def __init__(self, nodes):
self.nodes = nodes
def emit(self, indent, text_indent='', name=''):
text = [node.emit('') for node in self.nodes]
if text_indent:
text = [repr(text_indent)] + text
return indent + 'yield %s, join_(%s)\n' % (repr(name), ', '.join(text))
def __repr__(self):
return "<line: %s>" % repr(self.nodes)
INDENT = ' ' # 4 spaces
class BlockNode:
def __init__(self, stmt, block, begin_indent=''):
self.stmt = stmt
self.suite = Parser('').read_suite(block)
self.begin_indent = begin_indent
def emit(self, indent, text_indent=''):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return out
def text(self):
return '${' + self.stmt + '}' + "".join([node.text(indent) for node in self.nodes])
def __repr__(self):
return "<block: %s, %s>" % (repr(self.stmt), repr(self.nodelist))
class ForNode(BlockNode):
def __init__(self, stmt, block, begin_indent=''):
self.original_stmt = stmt
tok = PythonTokenizer(stmt)
tok.consume_till('in')
a = stmt[:tok.index] # for i in
b = stmt[tok.index:-1] # rest of for stmt excluding :
stmt = a + ' loop.setup(' + b.strip() + '):'
BlockNode.__init__(self, stmt, block, begin_indent)
def __repr__(self):
return "<block: %s, %s>" % (repr(self.original_stmt), repr(self.suite))
class CodeNode:
def __init__(self, stmt, block, begin_indent=''):
self.code = block
def emit(self, indent, text_indent=''):
import re
rx = re.compile('^', re.M)
return rx.sub(indent, self.code).rstrip(' ')
def __repr__(self):
return "<code: %s>" % repr(self.code)
class IfNode(BlockNode):
pass
class ElseNode(BlockNode):
pass
class ElifNode(BlockNode):
pass
class DefNode(BlockNode):
pass
class VarNode:
def __init__(self, name, value):
self.name = name
self.value = value
def emit(self, indent, text_indent):
return indent + 'yield %s, %s\n' % (repr(self.name), self.value)
def __repr__(self):
return "<var: %s = %s>" % (self.name, self.value)
class SuiteNode:
"""Suite is a list of sections."""
def __init__(self, sections):
self.sections = sections
def emit(self, indent, text_indent=''):
return "\n" + "".join([s.emit(indent, text_indent) for s in self.sections])
def __repr__(self):
return repr(self.sections)
STATEMENT_NODES = {
'for': ForNode,
'while': BlockNode,
'if': IfNode,
'elif': ElifNode,
'else': ElseNode,
'def': DefNode,
'code': CodeNode
}
KEYWORDS = [
"pass",
"break",
"continue",
"return"
]
TEMPLATE_BUILTIN_NAMES = [
"dict", "enumerate", "float", "int", "bool", "list", "long", "reversed",
"set", "slice", "tuple", "xrange",
"abs", "all", "any", "callable", "chr", "cmp", "divmod", "filter", "hex",
"id", "isinstance", "iter", "len", "max", "min", "oct", "ord", "pow", "range",
"True", "False",
"None",
"__import__", # some c-libraries like datetime requires __import__ to present in the namespace
]
import __builtin__
TEMPLATE_BUILTINS = dict([(name, getattr(__builtin__, name)) for name in TEMPLATE_BUILTIN_NAMES if name in __builtin__.__dict__])
class ForLoop:
"""
Wrapper for expression in for stament to support loop.xxx helpers.
>>> loop = ForLoop()
>>> for x in loop.setup(['a', 'b', 'c']):
... print loop.index, loop.revindex, loop.parity, x
...
1 3 odd a
2 2 even b
3 1 odd c
>>> loop.index
Traceback (most recent call last):
...
AttributeError: index
"""
def __init__(self):
self._ctx = None
def __getattr__(self, name):
if self._ctx is None:
raise AttributeError, name
else:
return getattr(self._ctx, name)
def setup(self, seq):
self._push()
return self._ctx.setup(seq)
def _push(self):
self._ctx = ForLoopContext(self, self._ctx)
def _pop(self):
self._ctx = self._ctx.parent
class ForLoopContext:
"""Stackable context for ForLoop to support nested for loops.
"""
def __init__(self, forloop, parent):
self._forloop = forloop
self.parent = parent
def setup(self, seq):
if hasattr(seq, '__len__'):
n = len(seq)
else:
n = 0
self.index = 0
seq = iter(seq)
# Pre python-2.5 does not support yield in try-except.
# This is a work-around to overcome that limitation.
def next(seq):
try:
return seq.next()
except:
self._forloop._pop()
raise
while True:
self._next(self.index + 1, n)
yield next(seq)
def _next(self, i, n):
self.index = i
self.index0 = i - 1
self.first = (i == 1)
self.last = (i == n)
self.odd = (i % 2 == 1)
self.even = (i % 2 == 0)
self.parity = ['odd', 'even'][self.even]
if n:
self.length = n
self.revindex0 = n - i
self.revindex = self.revindex0 + 1
class BaseTemplate:
def __init__(self, code, filename, filter, globals, builtins):
self.filename = filename
self.filter = filter
self._globals = globals
self._builtins = builtins
if code:
self.t = self._compile(code)
else:
self.t = lambda: ''
def _compile(self, code):
env = self.make_env(self._globals or {}, self._builtins)
exec(code, env)
return env['__template__']
def __call__(self, *a, **kw):
out = self.t(*a, **kw)
return self._join_output(out)
def _join_output(self, out):
d = TemplateResult()
data = []
for name, value in out:
if name:
d[name] = value
else:
data.append(value)
d.__body__ = u"".join(data)
return d
def make_env(self, globals, builtins):
return dict(globals,
__builtins__=builtins,
loop=ForLoop(),
escape_=self._escape,
join_=self._join
)
def _join(self, *items):
return u"".join([safeunicode(item) for item in items])
def _escape(self, value, escape=False):
import types
if value is None:
value = ''
elif isinstance(value, types.GeneratorType):
value = self._join_output(value)
value = safeunicode(value)
if escape and self.filter:
value = self.filter(value)
return value
class Template(BaseTemplate):
CONTENT_TYPES = {
'.html' : 'text/html; charset=utf-8',
'.xhtml' : 'application/xhtml+xml; charset=utf-8',
'.txt' : 'text/plain',
}
FILTERS = {
'.html': websafe,
'.xhtml': websafe,
'.xml': websafe
}
globals = {}
def __init__(self, text, filename='<template>', filter=None, globals=None, builtins=None):
text = Template.normalize_text(text)
code = self.compile_template(text, filename)
_, ext = os.path.splitext(filename)
filter = filter or self.FILTERS.get(ext, None)
self.content_type = self.CONTENT_TYPES.get(ext, None)
if globals is None:
globals = self.globals
if builtins is None:
builtins = TEMPLATE_BUILTINS
BaseTemplate.__init__(self, code=code, filename=filename, filter=filter, globals=globals, builtins=builtins)
def normalize_text(text):
"""Normalizes template text by correcting \r\n, tabs and BOM chars."""
text = text.replace('\r\n', '\n').replace('\r', '\n').expandtabs()
if not text.endswith('\n'):
text += '\n'
# ignore BOM chars at the begining of template
BOM = '\xef\xbb\xbf'
if isinstance(text, str) and text.startswith(BOM):
text = text[len(BOM):]
# support fort \$ for backward-compatibility
text = text.replace(r'\$', '$$')
return text
normalize_text = staticmethod(normalize_text)
def __call__(self, *a, **kw):
import webapi as web
if 'headers' in web.ctx and self.content_type:
web.header('Content-Type', self.content_type, unique=True)
return BaseTemplate.__call__(self, *a, **kw)
def generate_code(text, filename):
# parse the text
rootnode = Parser(text, filename).parse()
# generate python code from the parse tree
code = rootnode.emit(indent="").strip()
return safestr(code)
generate_code = staticmethod(generate_code)
def compile_template(self, template_string, filename):
code = Template.generate_code(template_string, filename)
def get_source_line(filename, lineno):
try:
lines = open(filename).read().splitlines()
return lines[lineno]
except:
return None
try:
# compile the code first to report the errors, if any, with the filename
compiled_code = compile(code, filename, 'exec')
except SyntaxError, e:
# display template line that caused the error along with the traceback.
try:
e.msg += '\n\nTemplate traceback:\n File %s, line %s\n %s' % \
(repr(e.filename), e.lineno, get_source_line(e.filename, e.lineno-1))
except:
pass
raise
# make sure code is safe
import compiler
ast = compiler.parse(code)
SafeVisitor().walk(ast, filename)
return compiled_code
class CompiledTemplate(Template):
def __init__(self, f, filename):
Template.__init__(self, '', filename)
self.t = f
def compile_template(self, *a):
return None
def _compile(self, *a):
return None
class Render:
"""The most preferred way of using templates.
render = web.template.render('templates')
print render.foo()
Optional parameter can be `base` can be used to pass output of
every template through the base template.
render = web.template.render('templates', base='layout')
"""
def __init__(self, loc='templates', cache=None, base=None, **keywords):
self._loc = loc
self._keywords = keywords
if cache is None:
cache = not config.get('debug', False)
if cache:
self._cache = {}
else:
self._cache = None
if base and not hasattr(base, '__call__'):
# make base a function, so that it can be passed to sub-renders
self._base = lambda page: self._template(base)(page)
else:
self._base = base
def _lookup(self, name):
path = os.path.join(self._loc, name)
if os.path.isdir(path):
return 'dir', path
else:
path = self._findfile(path)
if path:
return 'file', path
else:
return 'none', None
def _load_template(self, name):
kind, path = self._lookup(name)
if kind == 'dir':
return Render(path, cache=self._cache is not None, base=self._base, **self._keywords)
elif kind == 'file':
return Template(open(path).read(), filename=path, **self._keywords)
else:
raise AttributeError, "No template named " + name
def _findfile(self, path_prefix):
p = [f for f in glob.glob(path_prefix + '.*') if not f.endswith('~')] # skip backup files
return p and p[0]
def _template(self, name):
if self._cache is not None:
if name not in self._cache:
self._cache[name] = self._load_template(name)
return self._cache[name]
else:
return self._load_template(name)
def __getattr__(self, name):
t = self._template(name)
if self._base and isinstance(t, Template):
def template(*a, **kw):
return self._base(t(*a, **kw))
return template
else:
return self._template(name)
class GAE_Render(Render):
# Render gets over-written. make a copy here.
super = Render
def __init__(self, loc, *a, **kw):
GAE_Render.super.__init__(self, loc, *a, **kw)
import types
if isinstance(loc, types.ModuleType):
self.mod = loc
else:
name = loc.rstrip('/').replace('/', '.')
self.mod = __import__(name, None, None, ['x'])
self.mod.__dict__.update(kw.get('builtins', TEMPLATE_BUILTINS))
self.mod.__dict__.update(Template.globals)
self.mod.__dict__.update(kw.get('globals', {}))
def _load_template(self, name):
t = getattr(self.mod, name)
import types
if isinstance(t, types.ModuleType):
return GAE_Render(t, cache=self._cache is not None, base=self._base, **self._keywords)
else:
return t
render = Render
# setup render for Google App Engine.
try:
from google import appengine
render = Render = GAE_Render
except ImportError:
pass
def frender(path, **keywords):
"""Creates a template from the given file path.
"""
return Template(open(path).read(), filename=path, **keywords)
def compile_templates(root):
"""Compiles templates to python code."""
re_start = re_compile('^', re.M)
for dirpath, dirnames, filenames in os.walk(root):
filenames = [f for f in filenames if not f.startswith('.') and not f.endswith('~') and not f.startswith('__init__.py')]
for d in dirnames[:]:
if d.startswith('.'):
dirnames.remove(d) # don't visit this dir
out = open(os.path.join(dirpath, '__init__.py'), 'w')
out.write('from web.template import CompiledTemplate, ForLoop\n\n')
if dirnames:
out.write("import " + ", ".join(dirnames))
for f in filenames:
path = os.path.join(dirpath, f)
if '.' in f:
name, _ = f.split('.', 1)
else:
name = f
text = open(path).read()
text = Template.normalize_text(text)
code = Template.generate_code(text, path)
code = re_start.sub(' ', code)
_gen = '' + \
'\ndef %s():' + \
'\n loop = ForLoop()' + \
'\n _dummy = CompiledTemplate(lambda: None, "dummy")' + \
'\n join_ = _dummy._join' + \
'\n escape_ = _dummy._escape' + \
'\n' + \
'\n%s' + \
'\n return __template__'
gen_code = _gen % (name, code)
out.write(gen_code)
out.write('\n\n')
out.write('%s = CompiledTemplate(%s(), %s)\n\n' % (name, name, repr(path)))
# create template to make sure it compiles
t = Template(open(path).read(), path)
out.close()
class ParseError(Exception):
pass
class SecurityError(Exception):
"""The template seems to be trying to do something naughty."""
pass
# Enumerate all the allowed AST nodes
ALLOWED_AST_NODES = [
"Add", "And",
# "AssAttr",
"AssList", "AssName", "AssTuple",
# "Assert",
"Assign", "AugAssign",
# "Backquote",
"Bitand", "Bitor", "Bitxor", "Break",
"CallFunc","Class", "Compare", "Const", "Continue",
"Decorators", "Dict", "Discard", "Div",
"Ellipsis", "EmptyNode",
# "Exec",
"Expression", "FloorDiv", "For",
# "From",
"Function",
"GenExpr", "GenExprFor", "GenExprIf", "GenExprInner",
"Getattr",
# "Global",
"If", "IfExp",
# "Import",
"Invert", "Keyword", "Lambda", "LeftShift",
"List", "ListComp", "ListCompFor", "ListCompIf", "Mod",
"Module",
"Mul", "Name", "Not", "Or", "Pass", "Power",
# "Print", "Printnl", "Raise",
"Return", "RightShift", "Slice", "Sliceobj",
"Stmt", "Sub", "Subscript",
# "TryExcept", "TryFinally",
"Tuple", "UnaryAdd", "UnarySub",
"While", "With", "Yield",
]
class SafeVisitor(object):
"""
Make sure code is safe by walking through the AST.
Code considered unsafe if:
* it has restricted AST nodes
* it is trying to access resricted attributes
Adopted from http://www.zafar.se/bkz/uploads/safe.txt (public domain, Babar K. Zafar)
"""
def __init__(self):
"Initialize visitor by generating callbacks for all AST node types."
self.errors = []
def walk(self, ast, filename):
"Validate each node in AST and raise SecurityError if the code is not safe."
self.filename = filename
self.visit(ast)
if self.errors:
raise SecurityError, '\n'.join([str(err) for err in self.errors])
def visit(self, node, *args):
"Recursively validate node and all of its children."
def classname(obj):
return obj.__class__.__name__
nodename = classname(node)
fn = getattr(self, 'visit' + nodename, None)
if fn:
fn(node, *args)
else:
if nodename not in ALLOWED_AST_NODES:
self.fail(node, *args)
for child in node.getChildNodes():
self.visit(child, *args)
def visitName(self, node, *args):
"Disallow any attempts to access a restricted attr."
#self.assert_attr(node.getChildren()[0], node)
pass
def visitGetattr(self, node, *args):
"Disallow any attempts to access a restricted attribute."
self.assert_attr(node.attrname, node)
def assert_attr(self, attrname, node):
if self.is_unallowed_attr(attrname):
lineno = self.get_node_lineno(node)
e = SecurityError("%s:%d - access to attribute '%s' is denied" % (self.filename, lineno, attrname))
self.errors.append(e)
def is_unallowed_attr(self, name):
return name.startswith('_') \
or name.startswith('func_') \
or name.startswith('im_')
def get_node_lineno(self, node):
return (node.lineno) and node.lineno or 0
def fail(self, node, *args):
"Default callback for unallowed AST nodes."
lineno = self.get_node_lineno(node)
nodename = node.__class__.__name__
e = SecurityError("%s:%d - execution of '%s' statements is denied" % (self.filename, lineno, nodename))
self.errors.append(e)
class TemplateResult(storage):
"""Dictionary like object for storing template output.
A template can specify key-value pairs in the output using
`var` statements. Each `var` statement adds a new key to the
template output and the main output is stored with key
__body__.
>>> d = TemplateResult(__body__='hello, world', x='foo')
>>> d
<TemplateResult: {'__body__': 'hello, world', 'x': 'foo'}>
>>> print d
hello, world
"""
def __unicode__(self):
return safeunicode(self.get('__body__', ''))
def __str__(self):
return safestr(self.get('__body__', ''))
def __repr__(self):
return "<TemplateResult: %s>" % dict.__repr__(self)
def test():
r"""Doctest for testing template module.
Define a utility function to run template test.
>>> class TestResult(TemplateResult):
... def __repr__(self): return repr(unicode(self))
...
>>> def t(code, **keywords):
... tmpl = Template(code, **keywords)
... return lambda *a, **kw: TestResult(tmpl(*a, **kw))
...
Simple tests.
>>> t('1')()
u'1\n'
>>> t('$def with ()\n1')()
u'1\n'
>>> t('$def with (a)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(a=1)
u'1\n'
Test complicated expressions.
>>> t('$def with (x)\n$x.upper()')('hello')
u'HELLO\n'
>>> t('$(2 * 3 + 4 * 5)')()
u'26\n'
>>> t('${2 * 3 + 4 * 5}')()
u'26\n'
>>> t('$def with (limit)\nkeep $(limit)ing.')('go')
u'keep going.\n'
>>> t('$def with (a)\n$a.b[0]')(storage(b=[1]))
u'1\n'
Test html escaping.
>>> t('$def with (x)\n$x', filename='a.html')('<html>')
u'<html>\n'
>>> t('$def with (x)\n$x', filename='a.txt')('<html>')
u'<html>\n'
Test if, for and while.
>>> t('$if 1: 1')()
u'1\n'
>>> t('$if 1:\n 1')()
u'1\n'
>>> t('$if 1:\n 1\\')()
u'1'
>>> t('$if 0: 0\n$elif 1: 1')()
u'1\n'
>>> t('$if 0: 0\n$elif None: 0\n$else: 1')()
u'1\n'
>>> t('$if 0 < 1 and 1 < 2: 1')()
u'1\n'
>>> t('$for x in [1, 2, 3]: $x')()
u'1\n2\n3\n'
>>> t('$def with (d)\n$for k, v in d.iteritems(): $k')({1: 1})
u'1\n'
>>> t('$for x in [1, 2, 3]:\n\t$x')()
u' 1\n 2\n 3\n'
>>> t('$def with (a)\n$while a and a.pop():1')([1, 2, 3])
u'1\n1\n1\n'
The space after : must be ignored.
>>> t('$if True: foo')()
u'foo\n'
Test loop.xxx.
>>> t("$for i in range(5):$loop.index, $loop.parity")()
u'1, odd\n2, even\n3, odd\n4, even\n5, odd\n'
>>> t("$for i in range(2):\n $for j in range(2):$loop.parent.parity $loop.parity")()
u'odd odd\nodd even\neven odd\neven even\n'
Test assignment.
>>> t('$ a = 1\n$a')()
u'1\n'
>>> t('$ a = [1]\n$a[0]')()
u'1\n'
>>> t('$ a = {1: 1}\n$a.keys()[0]')()
u'1\n'
>>> t('$ a = []\n$if not a: 1')()
u'1\n'
>>> t('$ a = {}\n$if not a: 1')()
u'1\n'
>>> t('$ a = -1\n$a')()
u'-1\n'
>>> t('$ a = "1"\n$a')()
u'1\n'
Test comments.
>>> t('$# 0')()
u'\n'
>>> t('hello$#comment1\nhello$#comment2')()
u'hello\nhello\n'
>>> t('$#comment0\nhello$#comment1\nhello$#comment2')()
u'\nhello\nhello\n'
Test unicode.
>>> t('$def with (a)\n$a')(u'\u203d')
u'\u203d\n'
>>> t('$def with (a)\n$a')(u'\u203d'.encode('utf-8'))
u'\u203d\n'
>>> t(u'$def with (a)\n$a $:a')(u'\u203d')
u'\u203d \u203d\n'
>>> t(u'$def with ()\nfoo')()
u'foo\n'
>>> def f(x): return x
...
>>> t(u'$def with (f)\n$:f("x")')(f)
u'x\n'
>>> t('$def with (f)\n$:f("x")')(f)
u'x\n'
Test dollar escaping.
>>> t("Stop, $$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
>>> t("Stop, \$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
Test space sensitivity.
>>> t('$def with (x)\n$x')(1)
u'1\n'
>>> t('$def with(x ,y)\n$x')(1, 1)
u'1\n'
>>> t('$(1 + 2*3 + 4)')()
u'11\n'
Make sure globals are working.
>>> t('$x')()
Traceback (most recent call last):
...
NameError: global name 'x' is not defined
>>> t('$x', globals={'x': 1})()
u'1\n'
Can't change globals.
>>> t('$ x = 2\n$x', globals={'x': 1})()
u'2\n'
>>> t('$ x = x + 1\n$x', globals={'x': 1})()
Traceback (most recent call last):
...
UnboundLocalError: local variable 'x' referenced before assignment
Make sure builtins are customizable.
>>> t('$min(1, 2)')()
u'1\n'
>>> t('$min(1, 2)', builtins={})()
Traceback (most recent call last):
...
NameError: global name 'min' is not defined
Test vars.
>>> x = t('$var x: 1')()
>>> x.x
u'1'
>>> x = t('$var x = 1')()
>>> x.x
1
>>> x = t('$var x: \n foo\n bar')()
>>> x.x
u'foo\nbar\n'
Test BOM chars.
>>> t('\xef\xbb\xbf$def with(x)\n$x')('foo')
u'foo\n'
Test for with weird cases.
>>> t('$for i in range(10)[1:5]:\n $i')()
u'1\n2\n3\n4\n'
>>> t("$for k, v in {'a': 1, 'b': 2}.items():\n $k $v")()
u'a 1\nb 2\n'
>>> t("$for k, v in ({'a': 1, 'b': 2}.items():\n $k $v")()
Traceback (most recent call last):
...
SyntaxError: invalid syntax
Test datetime.
>>> import datetime
>>> t("$def with (date)\n$date.strftime('%m %Y')")(datetime.datetime(2009, 1, 1))
u'01 2009\n'
"""
pass
if __name__ == "__main__":
import sys
if '--compile' in sys.argv:
compile_templates(sys.argv[2])
else:
import doctest
doctest.testmod()
|
gabelerner/finkin
|
api/web/template.py
|
Python
|
mit
| 45,408
|
[
"VisIt"
] |
a5cc9cf94bd7989d809c099d7c347e07ca7062296a25380da6aa9e7104c6c7c6
|
###
### This script can be run with pvpython rather than pvbatch, as it does not
### need mpi.
###
### Purpose:
###
### Generate a static image dataset of volume rendering on the ne cooling data
###
### Example usages (assumes you are in directory with this script):
###
### 1) To run on the coarse mesh, times = [0, 3, 6, 9, 12]:
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vorticity-custom.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse" --inputpattern "101results_%d.vtk" --outputdir "/tmp/vorticity/tent" --tstart 0 --tstop 15 --tstep 3
###
### 2) To run on the fine mesh, times = [30, 31, 32, 33, 34]:
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vorticity-custom.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/fine" --inputpattern "fine_results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/fine/Output/vorticity/custom" --tstart 30 --tstop 35 --tstep 1
import sys, os, argparse
from paraview.simple import *
from paraview import data_exploration as wx
#import matplotlib.pyplot as plt
###############################################################################
# Helper function to generate the tent functions needed for scalar opacity
# function
###############################################################################
def createHatFunctions():
baseWidth = 0.20
spacing = baseWidth / 2.0
halfWidth = baseWidth / 2.0
numberCenters = 1.0 / baseWidth
centers = [ (baseWidth / 2.0) + (i * baseWidth) for i in range(int(numberCenters)) ]
hatFunctions = []
for c in centers:
startPoint = c - halfWidth
xPoints = [ 0.0, startPoint, startPoint + spacing, startPoint + (2 * spacing), 1.0 ]
yPoints = [ 0.0, 0.0, 1.0, 0.0, 0.0 ]
hatFunctions.append([xPoints, yPoints])
#plt.plot(xPoints, yPoints, marker='o')
#plt.show()
return hatFunctions
###############################################################################
# This method does all the processing
###############################################################################
def doProcessing(inputDir, inputPattern, outputDir, tstart, tstop, tstep):
# -----------------------------------------------------------------------------
# Path to input/output data/directories
# -----------------------------------------------------------------------------
files_pattern = os.path.join(inputDir, inputPattern)
file_times = range(tstart, tstop, tstep)
filenames = [ (files_pattern % time) for time in file_times]
# -----------------------------------------------------------------------------
# Rendering configuration
# -----------------------------------------------------------------------------
resolution = 500
view_size = [resolution, resolution]
angle_steps = [15, 15]
#angle_steps = [90, 90]
distance = 24632.991324377483
rotation_axis = [0.0, 1.0, 0.0]
#center_of_rotation = [-1649.1046142578125, -752.328125, 1374.1217346191406]
center_of_rotation = [0.0, 0.0, 0.0]
view = GetRenderView()
view.ViewSize = view_size
view.Background = [0.0, 0.0, 0.0]
view.OrientationAxesVisibility = 0
view.CenterAxesVisibility = 0
# -----------------------------------------------------------------------------
# Output configuration
# -----------------------------------------------------------------------------
fng = wx.FileNameGenerator(outputDir, '{time}/{volumeIdx}/{theta}_{phi}.jpg')
exporter = wx.ThreeSixtyImageStackExporter(fng,
view,
center_of_rotation,
distance,
rotation_axis,
angle_steps)
# -----------------------------------------------------------------------------
# Pipeline configuration
# -----------------------------------------------------------------------------
# create a new 'Legacy VTK Reader'
readerProxy = LegacyVTKReader(FileNames=filenames)
# This translation transform is a workaround for a bug in the camera orbiting
# calculations made in ThreeSixtyImageStackExporter
transform1 = Transform(Input=readerProxy)
transform1.Transform = 'Transform'
transform1.Transform.Translate = [1649.1046142578125, 752.328125, -1374.1217346191406]
# create a new 'Cell Data to Point Data'
cellDatatoPointData1 = CellDatatoPointData(Input=transform1)
# get color transfer function/color map for 'vorticity'
vorticityLUT = GetColorTransferFunction('vorticity')
vorticityLUT.RGBPoints = [0.0, 0.0, 0.0, 1.0, 200.0, 1.0, 0.0, 0.0]
vorticityLUT.LockScalarRange = 1
vorticityLUT.ColorSpace = 'HSV'
vorticityLUT.NanColor = [0.498039, 0.498039, 0.498039]
vorticityLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'vorticity'
vorticityPWF = GetOpacityTransferFunction('vorticity')
vorticityPWF.Points = [0.0, 0.0, 0.5, 0.0, 200.0, 1.0, 0.5, 0.0]
vorticityPWF.ScalarRangeInitialized = 1
# show data from fine_results_
readerDisplay = Show(transform1)
readerDisplay.ColorArrayName = [None, '']
readerDisplay.Opacity = 0.15
readerDisplay.ScalarOpacityUnitDistance = 158.07645437184576
# show data from cellDatatoPointData1
cellDatatoPointData1Display = Show(cellDatatoPointData1)
cellDatatoPointData1Display.Representation = 'Volume'
cellDatatoPointData1Display.ColorArrayName = ['POINTS', 'vorticity']
cellDatatoPointData1Display.LookupTable = vorticityLUT
cellDatatoPointData1Display.ScalarOpacityFunction = vorticityPWF
cellDatatoPointData1Display.ScalarOpacityUnitDistance = 158.07645437184576
# -----------------------------------------------------------------------------
# Batch processing
# -----------------------------------------------------------------------------
dataRange = [0.0, 200.0]
curRange = dataRange[1] - dataRange[0]
pwfPointsArray = []
# We want the first piecewise function to just be the standard linear one
pwfPointsArray.append([ dataRange[0], 0.0, 0.5, 0.0,
dataRange[1], 1.0, 0.5, 0.0 ])
# Create the hat functions
hatFunctions = createHatFunctions()
# Pre-compute the piecewise function points for all of the hat functions
for volumeIdx in range(5):
xPoints = hatFunctions[volumeIdx][0]
yPoints = hatFunctions[volumeIdx][1]
pwfPoints = []
for i in range(len(xPoints)):
pwfPoints.append(dataRange[0] + (xPoints[i] * curRange))
pwfPoints.append(yPoints[i])
pwfPoints.append(0.5)
pwfPoints.append(0.0)
pwfPointsArray.append(pwfPoints)
Render()
# Now we can iterate through the timesteps very simply
for t in range(0, len(file_times), 1):
time = file_times[t]
GetAnimationScene().TimeKeeper.Time = float(time)
UpdatePipeline(time)
print "Moving to timestep ",time
for volumeIdx in range(len(pwfPointsArray)):
pts = pwfPointsArray[volumeIdx]
newPwf = CreatePiecewiseFunction( Points=pts )
cellDatatoPointData1Display.ScalarOpacityFunction = newPwf
fng.update_active_arguments(volumeIdx=volumeIdx)
fng.update_label_arguments(volumeIdx="Idx")
exporter.UpdatePipeline(time)
###############################################################################
# Main script entry point
###############################################################################
if __name__ == "__main__":
description = "Python script to generate volume rendered NE cooling data"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--inputdir", type=str, default="", help="Path to directory where input data files exist")
parser.add_argument("--inputpattern", type=str, default="", help="String pattern containing %d where pattern should be replaced with numbers")
parser.add_argument("--outputdir", type=str, default="", help="Path to directory where cinema dataset should be written")
parser.add_argument("--tstart", type=int, default=0, help="Index of first timestep to process")
parser.add_argument("--tstop", type=int, default=1, help="Index of last timestep to process (all steps up to this one will be processed)")
parser.add_argument("--tstep", type=int, default=1, help="Timestep increment amount")
args = parser.parse_args()
doProcessing(args.inputdir, args.inputpattern, args.outputdir, args.tstart, args.tstop, args.tstep)
|
Kitware/cinema
|
scripts/data_generation/ne-cooling/volume-vorticity-custom.py
|
Python
|
bsd-3-clause
| 8,837
|
[
"ParaView",
"VTK"
] |
0d33d4181238296e22015f99068bde374431ba8f0ab4d3b72ee0fd2cc7f1978c
|
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import sys
from rdkit import RDConfig
from rdkit.Dbase import DbModule
sqlTextTypes = DbModule.sqlTextTypes
sqlIntTypes = DbModule.sqlIntTypes
sqlFloatTypes = DbModule.sqlFloatTypes
sqlBinTypes = DbModule.sqlBinTypes
def GetDbNames(user='sysdba', password='masterkey', dirName='.', dBase='::template1', cn=None):
""" returns a list of databases that are available
**Arguments**
- user: the username for DB access
- password: the password to be used for DB access
**Returns**
- a list of db names (strings)
"""
if DbModule.getDbSql:
if not cn:
try:
cn = DbModule.connect(dBase, user, password)
except Exception:
print('Problems opening database: %s' % (dBase))
return []
c = cn.cursor()
c.execute(DbModule.getDbSql)
if RDConfig.usePgSQL:
names = ['::' + str(x[0]) for x in c.fetchall()]
else:
names = ['::' + str(x[0]) for x in c.fetchall()]
names.remove(dBase)
elif DbModule.fileWildcard:
import os.path
import glob
names = glob.glob(os.path.join(dirName, DbModule.fileWildcard))
else:
names = []
return names
def GetTableNames(dBase, user='sysdba', password='masterkey', includeViews=0, cn=None):
""" returns a list of tables available in a database
**Arguments**
- dBase: the name of the DB file to be used
- user: the username for DB access
- password: the password to be used for DB access
- includeViews: if this is non-null, the views in the db will
also be returned
**Returns**
- a list of table names (strings)
"""
if not cn:
try:
cn = DbModule.connect(dBase, user, password)
except Exception:
print('Problems opening database: %s' % (dBase))
return []
c = cn.cursor()
if not includeViews:
comm = DbModule.getTablesSql
else:
comm = DbModule.getTablesAndViewsSql
c.execute(comm)
names = [str(x[0]).upper() for x in c.fetchall()]
if RDConfig.usePgSQL and 'PG_LOGDIR_LS' in names:
names.remove('PG_LOGDIR_LS')
return names
def GetColumnInfoFromCursor(cursor):
if cursor is None or cursor.description is None:
return []
results = []
if not RDConfig.useSqlLite:
for item in cursor.description:
cName = item[0]
cType = item[1]
if cType in sqlTextTypes:
typeStr = 'string'
elif cType in sqlIntTypes:
typeStr = 'integer'
elif cType in sqlFloatTypes:
typeStr = 'float'
elif cType in sqlBinTypes:
typeStr = 'binary'
else:
sys.stderr.write('odd type in col %s: %s\n' % (cName, str(cType)))
results.append((cName, typeStr))
else:
r = cursor.fetchone()
if not r:
return results
for i, v in enumerate(r):
cName = cursor.description[i][0]
typ = type(v)
if isinstance(v, str):
typeStr = 'string'
elif typ == int:
typeStr = 'integer'
elif typ == float:
typeStr = 'float'
elif typ in (memoryview, bytes):
typeStr = 'binary'
else:
sys.stderr.write('odd type in col %s: %s\n' % (cName, typ))
results.append((cName, typeStr))
return results
def GetColumnNamesAndTypes(dBase, table, user='sysdba', password='masterkey', join='', what='*',
cn=None):
""" gets a list of columns available in a DB table along with their types
**Arguments**
- dBase: the name of the DB file to be used
- table: the name of the table to query
- user: the username for DB access
- password: the password to be used for DB access
- join: an optional join clause (omit the verb 'join')
- what: an optional clause indicating what to select
**Returns**
- a list of 2-tuples containing:
1) column name
2) column type
"""
if not cn:
cn = DbModule.connect(dBase, user, password)
c = cn.cursor()
cmd = 'select %s from %s' % (what, table)
if join:
cmd += ' join %s' % (join)
c.execute(cmd)
return GetColumnInfoFromCursor(c)
def GetColumnNames(dBase, table, user='sysdba', password='masterkey', join='', what='*', cn=None):
""" gets a list of columns available in a DB table
**Arguments**
- dBase: the name of the DB file to be used
- table: the name of the table to query
- user: the username for DB access
- password: the password to be used for DB access
- join: an optional join clause (omit the verb 'join')
- what: an optional clause indicating what to select
**Returns**
- a list of column names
"""
if not cn:
cn = DbModule.connect(dBase, user, password)
c = cn.cursor()
cmd = 'select %s from %s' % (what, table)
if join:
if join.strip().find('join') != 0:
join = 'join %s' % (join)
cmd += ' ' + join
c.execute(cmd)
c.fetchone()
desc = c.description
res = [str(x[0]) for x in desc]
return res
|
ptosco/rdkit
|
rdkit/Dbase/DbInfo.py
|
Python
|
bsd-3-clause
| 5,758
|
[
"RDKit"
] |
20f6c1f0bbb158c90ed30ed8b48c506800eb16b78df9afaabe31e842a668230a
|
# -*- coding: utf-8 -*-
"""
.. _tut-artifact-ica:
Repairing artifacts with ICA
============================
This tutorial covers the basics of independent components analysis (ICA) and
shows how ICA can be used for artifact repair; an extended example illustrates
repair of ocular and heartbeat artifacts. For conceptual background on ICA, see
:ref:`this scikit-learn tutorial
<sphx_glr_auto_examples_decomposition_plot_ica_blind_source_separation.py>`.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. Because ICA can be computationally
intense, we'll also crop the data to 60 seconds; and to save ourselves from
repeatedly typing ``mne.preprocessing`` we'll directly import a few functions
and classes from that submodule:
"""
# %%
import os
import mne
from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs,
corrmap)
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
# Here we'll crop to 60 seconds and drop gradiometer channels for speed
raw.crop(tmax=60.).pick_types(meg='mag', eeg=True, stim=True, eog=True)
raw.load_data()
# %%
# .. note::
# Before applying ICA (or any artifact repair strategy), be sure to observe
# the artifacts in your data to make sure you choose the right repair tool.
# Sometimes the right tool is no tool at all — if the artifacts are small
# enough you may not even need to repair them to get good analysis results.
# See :ref:`tut-artifact-overview` for guidance on detecting and
# visualizing various types of artifact.
#
# What is ICA?
# ^^^^^^^^^^^^
#
# Independent components analysis (ICA) is a technique for estimating
# independent source signals from a set of recordings in which the source
# signals were mixed together in unknown ratios. A common example of this is
# the problem of `blind source separation`_: with 3 musical instruments playing
# in the same room, and 3 microphones recording the performance (each picking
# up all 3 instruments, but at varying levels), can you somehow "unmix" the
# signals recorded by the 3 microphones so that you end up with a separate
# "recording" isolating the sound of each instrument?
#
# It is not hard to see how this analogy applies to EEG/MEG analysis: there are
# many "microphones" (sensor channels) simultaneously recording many
# "instruments" (blinks, heartbeats, activity in different areas of the brain,
# muscular activity from jaw clenching or swallowing, etc). As long as these
# various source signals are `statistically independent`_ and non-gaussian, it
# is usually possible to separate the sources using ICA, and then re-construct
# the sensor signals after excluding the sources that are unwanted.
#
#
# ICA in MNE-Python
# ~~~~~~~~~~~~~~~~~
#
# .. sidebar:: ICA and dimensionality reduction
#
# If you want to perform ICA with *no* dimensionality reduction (other than
# the number of Independent Components (ICs) given in ``n_components``, and
# any subsequent exclusion of ICs you specify in ``ICA.exclude``), simply
# pass ``n_components``.
#
# However, if you *do* want to reduce dimensionality, consider this
# example: if you have 300 sensor channels and you set ``n_components=50``
# during instantiation and pass ``n_pca_components=None`` to
# `~mne.preprocessing.ICA.apply`, then the the first 50
# PCs are sent to the ICA algorithm (yielding 50 ICs), and during
# reconstruction `~mne.preprocessing.ICA.apply` will use the 50 ICs
# plus PCs number 51-300 (the full PCA residual). If instead you specify
# ``n_pca_components=120`` in `~mne.preprocessing.ICA.apply`, it will
# reconstruct using the 50 ICs plus the first 70 PCs in the PCA residual
# (numbers 51-120), thus discarding the smallest 180 components.
#
# **If you have previously been using EEGLAB**'s ``runica()`` and are
# looking for the equivalent of its ``'pca', n`` option to reduce
# dimensionality, set ``n_components=n`` during initialization and pass
# ``n_pca_components=n`` to `~mne.preprocessing.ICA.apply`.
#
# MNE-Python implements three different ICA algorithms: ``fastica`` (the
# default), ``picard``, and ``infomax``. FastICA and Infomax are both in fairly
# widespread use; Picard is a newer (2017) algorithm that is expected to
# converge faster than FastICA and Infomax, and is more robust than other
# algorithms in cases where the sources are not completely independent, which
# typically happens with real EEG/MEG data. See
# :footcite:`AblinEtAl2018` for more information.
#
# The ICA interface in MNE-Python is similar to the interface in
# `scikit-learn`_: some general parameters are specified when creating an
# `~mne.preprocessing.ICA` object, then the `~mne.preprocessing.ICA` object is
# fit to the data using its `~mne.preprocessing.ICA.fit` method. The results of
# the fitting are added to the `~mne.preprocessing.ICA` object as attributes
# that end in an underscore (``_``), such as ``ica.mixing_matrix_`` and
# ``ica.unmixing_matrix_``. After fitting, the ICA component(s) that you want
# to remove must be chosen, and the ICA fit must then be applied to the
# `~mne.io.Raw` or `~mne.Epochs` object using the `~mne.preprocessing.ICA`
# object's `~mne.preprocessing.ICA.apply` method.
#
# As is typically done with ICA, the data are first scaled to unit variance and
# whitened using principal components analysis (PCA) before performing the ICA
# decomposition. This is a two-stage process:
#
# 1. To deal with different channel types having different units
# (e.g., Volts for EEG and Tesla for MEG), data must be pre-whitened.
# If ``noise_cov=None`` (default), all data of a given channel type is
# scaled by the standard deviation across all channels. If ``noise_cov`` is
# a `~mne.Covariance`, the channels are pre-whitened using the covariance.
# 2. The pre-whitened data are then decomposed using PCA.
#
# From the resulting principal components (PCs), the first ``n_components`` are
# then passed to the ICA algorithm if ``n_components`` is an integer number.
# It can also be a float between 0 and 1, specifying the **fraction** of
# explained variance that the PCs should capture; the appropriate number of
# PCs (i.e., just as many PCs as are required to explain the given fraction
# of total variance) is then passed to the ICA.
#
# After visualizing the Independent Components (ICs) and excluding any that
# capture artifacts you want to repair, the sensor signal can be reconstructed
# using the `~mne.preprocessing.ICA` object's
# `~mne.preprocessing.ICA.apply` method. By default, signal
# reconstruction uses all of the ICs (less any ICs listed in ``ICA.exclude``)
# plus all of the PCs that were not included in the ICA decomposition (i.e.,
# the "PCA residual"). If you want to reduce the number of components used at
# the reconstruction stage, it is controlled by the ``n_pca_components``
# parameter (which will in turn reduce the rank of your data; by default
# ``n_pca_components=None`` resulting in no additional dimensionality
# reduction). The fitting and reconstruction procedures and the
# parameters that control dimensionality at various stages are summarized in
# the diagram below:
#
#
# .. raw:: html
#
# <a href=
# "../../_images/graphviz-7483cb1cf41f06e2a4ef451b17f073dbe584ba30.png">
#
# .. graphviz:: ../../_static/diagrams/ica.dot
# :alt: Diagram of ICA procedure in MNE-Python
# :align: left
#
# .. raw:: html
#
# </a>
#
# See the Notes section of the `~mne.preprocessing.ICA` documentation
# for further details. Next we'll walk through an extended example that
# illustrates each of these steps in greater detail.
#
# Example: EOG and ECG artifact repair
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Visualizing the artifacts
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Let's begin by visualizing the artifacts that we want to repair. In this
# dataset they are big enough to see easily in the raw data:
# pick some channels that clearly show heartbeats and blinks
regexp = r'(MEG [12][45][123]1|EEG 00.)'
artifact_picks = mne.pick_channels_regexp(raw.ch_names, regexp=regexp)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
# %%
# We can get a summary of how the ocular artifact manifests across each channel
# type using `~mne.preprocessing.create_eog_epochs` like we did in the
# :ref:`tut-artifact-overview` tutorial:
eog_evoked = create_eog_epochs(raw).average()
eog_evoked.apply_baseline(baseline=(None, -0.2))
eog_evoked.plot_joint()
# %%
# Now we'll do the same for the heartbeat artifacts, using
# `~mne.preprocessing.create_ecg_epochs`:
ecg_evoked = create_ecg_epochs(raw).average()
ecg_evoked.apply_baseline(baseline=(None, -0.2))
ecg_evoked.plot_joint()
# %%
# Filtering to remove slow drifts
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Before we run the ICA, an important step is filtering the data to remove
# low-frequency drifts, which can negatively affect the quality of the ICA fit.
# The slow drifts are problematic because they reduce the independence of the
# assumed-to-be-independent sources (e.g., during a slow upward drift, the
# neural, heartbeat, blink, and other muscular sources will all tend to have
# higher values), making it harder for the algorithm to find an accurate
# solution. A high-pass filter with 1 Hz cutoff frequency is recommended.
# However, because filtering is a linear operation, the ICA solution found from
# the filtered signal can be applied to the unfiltered signal (see
# :footcite:`WinklerEtAl2015` for
# more information), so we'll keep a copy of the unfiltered
# `~mne.io.Raw` object around so we can apply the ICA solution to it
# later.
filt_raw = raw.copy().filter(l_freq=1., h_freq=None)
# %%
# Fitting and plotting the ICA solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. sidebar:: Ignoring the time domain
#
# The ICA algorithms implemented in MNE-Python find patterns across
# channels, but ignore the time domain. This means you can compute ICA on
# discontinuous `~mne.Epochs` or `~mne.Evoked` objects (not
# just continuous `~mne.io.Raw` objects), or only use every Nth
# sample by passing the ``decim`` parameter to ``ICA.fit()``.
#
# .. note:: `~mne.Epochs` used for fitting ICA should not be
# baseline-corrected. Because cleaning the data via ICA may
# introduce DC offsets, we suggest to baseline correct your data
# **after** cleaning (and not before), should you require
# baseline correction.
#
# Now we're ready to set up and fit the ICA. Since we know (from observing our
# raw data) that the EOG and ECG artifacts are fairly strong, we would expect
# those artifacts to be captured in the first few dimensions of the PCA
# decomposition that happens before the ICA. Therefore, we probably don't need
# a huge number of components to do a good job of isolating our artifacts
# (though it is usually preferable to include more components for a more
# accurate solution). As a first guess, we'll run ICA with ``n_components=15``
# (use only the first 15 PCA components to compute the ICA decomposition) — a
# very small number given that our data has over 300 channels, but with the
# advantage that it will run quickly and we will able to tell easily whether it
# worked or not (because we already know what the EOG / ECG artifacts should
# look like).
#
# ICA fitting is not deterministic (e.g., the components may get a sign
# flip on different runs, or may not always be returned in the same order), so
# we'll also specify a `random seed`_ so that we get identical results each
# time this tutorial is built by our web servers.
ica = ICA(n_components=15, max_iter='auto', random_state=97)
ica.fit(filt_raw)
ica
# %%
# Some optional parameters that we could have passed to the
# `~mne.preprocessing.ICA.fit` method include ``decim`` (to use only
# every Nth sample in computing the ICs, which can yield a considerable
# speed-up) and ``reject`` (for providing a rejection dictionary for maximum
# acceptable peak-to-peak amplitudes for each channel type, just like we used
# when creating epoched data in the :ref:`tut-overview` tutorial).
#
# Now we can examine the ICs to see what they captured.
# `~mne.preprocessing.ICA.plot_sources` will show the time series of the
# ICs. Note that in our call to `~mne.preprocessing.ICA.plot_sources` we
# can use the original, unfiltered `~mne.io.Raw` object:
raw.load_data()
ica.plot_sources(raw, show_scrollbars=False)
# %%
# Here we can pretty clearly see that the first component (``ICA000``) captures
# the EOG signal quite well, and the second component (``ICA001``) looks a lot
# like `a heartbeat <qrs_>`_ (for more info on visually identifying Independent
# Components, `this EEGLAB tutorial`_ is a good resource). We can also
# visualize the scalp field distribution of each component using
# `~mne.preprocessing.ICA.plot_components`. These are interpolated based
# on the values in the ICA mixing matrix:
# sphinx_gallery_thumbnail_number = 9
ica.plot_components()
# %%
# .. note::
#
# `~mne.preprocessing.ICA.plot_components` (which plots the scalp
# field topographies for each component) has an optional ``inst`` parameter
# that takes an instance of `~mne.io.Raw` or `~mne.Epochs`.
# Passing ``inst`` makes the scalp topographies interactive: clicking one
# will bring up a diagnostic `~mne.preprocessing.ICA.plot_properties`
# window (see below) for that component.
#
# In the plots above it's fairly obvious which ICs are capturing our EOG and
# ECG artifacts, but there are additional ways visualize them anyway just to
# be sure. First, we can plot an overlay of the original signal against the
# reconstructed signal with the artifactual ICs excluded, using
# `~mne.preprocessing.ICA.plot_overlay`:
# blinks
ica.plot_overlay(raw, exclude=[0], picks='eeg')
# heartbeats
ica.plot_overlay(raw, exclude=[1], picks='mag')
# %%
# We can also plot some diagnostics of each IC using
# `~mne.preprocessing.ICA.plot_properties`:
ica.plot_properties(raw, picks=[0, 1])
# %%
# In the remaining sections, we'll look at different ways of choosing which ICs
# to exclude prior to reconstructing the sensor signals.
#
#
# Selecting ICA components manually
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Once we're certain which components we want to exclude, we can specify that
# manually by setting the ``ica.exclude`` attribute. Similar to marking bad
# channels, merely setting ``ica.exclude`` doesn't do anything immediately (it
# just adds the excluded ICs to a list that will get used later when it's
# needed). Once the exclusions have been set, ICA methods like
# `~mne.preprocessing.ICA.plot_overlay` will exclude those component(s)
# even if no ``exclude`` parameter is passed, and the list of excluded
# components will be preserved when using `mne.preprocessing.ICA.save`
# and `mne.preprocessing.read_ica`.
ica.exclude = [0, 1] # indices chosen based on various plots above
# %%
# Now that the exclusions have been set, we can reconstruct the sensor signals
# with artifacts removed using the `~mne.preprocessing.ICA.apply` method
# (remember, we're applying the ICA solution from the *filtered* data to the
# original *unfiltered* signal). Plotting the original raw data alongside the
# reconstructed data shows that the heartbeat and blink artifacts are repaired.
# ica.apply() changes the Raw object in-place, so let's make a copy first:
reconst_raw = raw.copy()
ica.apply(reconst_raw)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
reconst_raw.plot(order=artifact_picks, n_channels=len(artifact_picks),
show_scrollbars=False)
del reconst_raw
# %%
# Using an EOG channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# It may have seemed easy to review the plots and manually select which ICs to
# exclude, but when processing dozens or hundreds of subjects this can become
# a tedious, rate-limiting step in the analysis pipeline. One alternative is to
# use dedicated EOG or ECG sensors as a "pattern" to check the ICs against, and
# automatically mark for exclusion any ICs that match the EOG/ECG pattern. Here
# we'll use `~mne.preprocessing.ICA.find_bads_eog` to automatically find
# the ICs that best match the EOG signal, then use
# `~mne.preprocessing.ICA.plot_scores` along with our other plotting
# functions to see which ICs it picked. We'll start by resetting
# ``ica.exclude`` back to an empty list:
ica.exclude = []
# find which ICs match the EOG pattern
eog_indices, eog_scores = ica.find_bads_eog(raw)
ica.exclude = eog_indices
# barplot of ICA component "EOG match" scores
ica.plot_scores(eog_scores)
# plot diagnostics
ica.plot_properties(raw, picks=eog_indices)
# plot ICs applied to raw data, with EOG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged EOG epochs, with EOG matches highlighted
ica.plot_sources(eog_evoked)
# %%
# Note that above we used `~mne.preprocessing.ICA.plot_sources` on both
# the original `~mne.io.Raw` instance and also on an
# `~mne.Evoked` instance of the extracted EOG artifacts. This can be
# another way to confirm that `~mne.preprocessing.ICA.find_bads_eog` has
# identified the correct components.
#
#
# Using a simulated channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you don't have an EOG channel,
# `~mne.preprocessing.ICA.find_bads_eog` has a ``ch_name`` parameter that
# you can use as a proxy for EOG. You can use a single channel, or create a
# bipolar reference from frontal EEG sensors and use that as virtual EOG
# channel. This carries a risk however: you must hope that the frontal EEG
# channels only reflect EOG and not brain dynamics in the prefrontal cortex (or
# you must not care about those prefrontal signals).
#
# For ECG, it is easier: `~mne.preprocessing.ICA.find_bads_ecg` can use
# cross-channel averaging of magnetometer or gradiometer channels to construct
# a virtual ECG channel, so if you have MEG channels it is usually not
# necessary to pass a specific channel name.
# `~mne.preprocessing.ICA.find_bads_ecg` also has two options for its
# ``method`` parameter: ``'ctps'`` (cross-trial phase statistics
# :footcite:`DammersEtAl2008`) and
# ``'correlation'`` (Pearson correlation between data and ECG channel).
ica.exclude = []
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
ica.plot_scores(ecg_scores)
# plot diagnostics
ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
ica.plot_sources(ecg_evoked)
# %%
# The last of these plots is especially useful: it shows us that the heartbeat
# artifact is coming through on *two* ICs, and we've only caught one of them.
# In fact, if we look closely at the output of
# `~mne.preprocessing.ICA.plot_sources` (online, you can right-click →
# "view image" to zoom in), it looks like ``ICA014`` has a weak periodic
# component that is in-phase with ``ICA001``. It might be worthwhile to re-run
# the ICA with more components to see if that second heartbeat artifact
# resolves out a little better:
# refit the ICA with 30 components this time
new_ica = ICA(n_components=30, max_iter='auto', random_state=97)
new_ica.fit(filt_raw)
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = new_ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
new_ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
new_ica.plot_scores(ecg_scores)
# plot diagnostics
new_ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
new_ica.plot_sources(raw, show_scrollbars=False)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
new_ica.plot_sources(ecg_evoked)
# %%
# Much better! Now we've captured both ICs that are reflecting the heartbeat
# artifact (and as a result, we got two diagnostic plots: one for each IC that
# reflects the heartbeat). This demonstrates the value of checking the results
# of automated approaches like `~mne.preprocessing.ICA.find_bads_ecg`
# before accepting them.
# clean up memory before moving on
del raw, ica, new_ica
# %%
# Selecting ICA components using template matching
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# When dealing with multiple subjects, it is also possible to manually select
# an IC for exclusion on one subject, and then use that component as a
# *template* for selecting which ICs to exclude from other subjects' data,
# using `mne.preprocessing.corrmap` :footcite:`CamposViolaEtAl2009`.
# The idea behind `~mne.preprocessing.corrmap` is that the artifact patterns
# are similar
# enough across subjects that corresponding ICs can be identified by
# correlating the ICs from each ICA solution with a common template, and
# picking the ICs with the highest correlation strength.
# `~mne.preprocessing.corrmap` takes a list of ICA solutions, and a
# ``template`` parameter that specifies which ICA object and which component
# within it to use as a template.
#
# Since our sample dataset only contains data from one subject, we'll use a
# different dataset with multiple subjects: the EEGBCI dataset
# :footcite:`SchalkEtAl2004,GoldbergerEtAl2000`. The
# dataset has 109 subjects, we'll just download one run (a left/right hand
# movement task) from each of the first 4 subjects:
mapping = {
'Fc5.': 'FC5', 'Fc3.': 'FC3', 'Fc1.': 'FC1', 'Fcz.': 'FCz', 'Fc2.': 'FC2',
'Fc4.': 'FC4', 'Fc6.': 'FC6', 'C5..': 'C5', 'C3..': 'C3', 'C1..': 'C1',
'Cz..': 'Cz', 'C2..': 'C2', 'C4..': 'C4', 'C6..': 'C6', 'Cp5.': 'CP5',
'Cp3.': 'CP3', 'Cp1.': 'CP1', 'Cpz.': 'CPz', 'Cp2.': 'CP2', 'Cp4.': 'CP4',
'Cp6.': 'CP6', 'Fp1.': 'Fp1', 'Fpz.': 'Fpz', 'Fp2.': 'Fp2', 'Af7.': 'AF7',
'Af3.': 'AF3', 'Afz.': 'AFz', 'Af4.': 'AF4', 'Af8.': 'AF8', 'F7..': 'F7',
'F5..': 'F5', 'F3..': 'F3', 'F1..': 'F1', 'Fz..': 'Fz', 'F2..': 'F2',
'F4..': 'F4', 'F6..': 'F6', 'F8..': 'F8', 'Ft7.': 'FT7', 'Ft8.': 'FT8',
'T7..': 'T7', 'T8..': 'T8', 'T9..': 'T9', 'T10.': 'T10', 'Tp7.': 'TP7',
'Tp8.': 'TP8', 'P7..': 'P7', 'P5..': 'P5', 'P3..': 'P3', 'P1..': 'P1',
'Pz..': 'Pz', 'P2..': 'P2', 'P4..': 'P4', 'P6..': 'P6', 'P8..': 'P8',
'Po7.': 'PO7', 'Po3.': 'PO3', 'Poz.': 'POz', 'Po4.': 'PO4', 'Po8.': 'PO8',
'O1..': 'O1', 'Oz..': 'Oz', 'O2..': 'O2', 'Iz..': 'Iz'
}
raws = list()
icas = list()
for subj in range(4):
# EEGBCI subjects are 1-indexed; run 3 is a left/right hand movement task
fname = mne.datasets.eegbci.load_data(subj + 1, runs=[3])[0]
raw = mne.io.read_raw_edf(fname).load_data().resample(50)
# remove trailing `.` from channel names so we can set montage
raw.rename_channels(mapping)
raw.set_montage('standard_1005')
# high-pass filter
raw_filt = raw.copy().load_data().filter(l_freq=1., h_freq=None)
# fit ICA, using low max_iter for speed
ica = ICA(n_components=30, max_iter=100, random_state=97)
ica.fit(raw_filt, verbose='error')
raws.append(raw)
icas.append(ica)
# %%
# Now let's run `~mne.preprocessing.corrmap`:
# use the first subject as template; use Fpz as proxy for EOG
raw = raws[0]
ica = icas[0]
eog_inds, eog_scores = ica.find_bads_eog(raw, ch_name='Fpz')
corrmap(icas, template=(0, eog_inds[0]))
# %%
# The first figure shows the template map, while the second figure shows all
# the maps that were considered a "match" for the template (including the
# template itself). There is one match for each subject, but it's a good idea
# to also double-check the ICA sources for each subject:
for index, (ica, raw) in enumerate(zip(icas, raws)):
fig = ica.plot_sources(raw, show_scrollbars=False)
fig.subplots_adjust(top=0.9) # make space for title
fig.suptitle('Subject {}'.format(index))
# %%
# Notice that subjects 2 and 3 each seem to have *two* ICs that reflect ocular
# activity (components ``ICA000`` and ``ICA002``), but only one was caught by
# `~mne.preprocessing.corrmap`. Let's try setting the threshold manually:
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9)
# %%
# This time it found 2 ICs for each of subjects 2 and 3 (which is good).
# At this point we'll re-run `~mne.preprocessing.corrmap` with
# parameters ``label='blink', plot=False`` to *label* the ICs from each subject
# that capture the blink artifacts (without plotting them again).
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9, label='blink',
plot=False)
print([ica.labels_ for ica in icas])
# %%
# Notice that the first subject has 3 different labels for the IC at index 0:
# "eog/0/Fpz", "eog", and "blink". The first two were added by
# `~mne.preprocessing.ICA.find_bads_eog`; the "blink" label was added by the
# last call to `~mne.preprocessing.corrmap`. Notice also that each subject has
# at least one IC index labelled "blink", and subjects 2 and 3 each have two
# components (0 and 2) labelled "blink" (consistent with the plot of IC sources
# above). The ``labels_`` attribute of `~mne.preprocessing.ICA` objects can
# also be manually edited to annotate the ICs with custom labels. They also
# come in handy when plotting:
icas[3].plot_components(picks=icas[3].labels_['blink'])
icas[3].exclude = icas[3].labels_['blink']
icas[3].plot_sources(raws[3], show_scrollbars=False)
# %%
# As a final note, it is possible to extract ICs numerically using the
# `~mne.preprocessing.ICA.get_components` method of
# `~mne.preprocessing.ICA` objects. This will return a :class:`NumPy
# array <numpy.ndarray>` that can be passed to
# `~mne.preprocessing.corrmap` instead of the :class:`tuple` of
# ``(subject_index, component_index)`` we passed before, and will yield the
# same result:
template_eog_component = icas[0].get_components()[:, eog_inds[0]]
corrmap(icas, template=template_eog_component, threshold=0.9)
print(template_eog_component)
# %%
# An advantage of using this numerical representation of an IC to capture a
# particular artifact pattern is that it can be saved and used as a template
# for future template-matching tasks using `~mne.preprocessing.corrmap`
# without having to load or recompute the ICA solution that yielded the
# template originally. Put another way, when the template is a NumPy array, the
# `~mne.preprocessing.ICA` object containing the template does not need
# to be in the list of ICAs provided to `~mne.preprocessing.corrmap`.
#
# .. LINKS
#
# .. _`blind source separation`:
# https://en.wikipedia.org/wiki/Signal_separation
# .. _`statistically independent`:
# https://en.wikipedia.org/wiki/Independence_(probability_theory)
# .. _`scikit-learn`: https://scikit-learn.org
# .. _`random seed`: https://en.wikipedia.org/wiki/Random_seed
# .. _`regular expression`: https://www.regular-expressions.info/
# .. _`qrs`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`this EEGLAB tutorial`: https://labeling.ucsd.edu/tutorial/labels
# %%
# Compute ICA components on Epochs
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ICA is now fit to epoched MEG data instead of the raw data.
# We assume that the non-stationary EOG artifacts have already been removed.
# The sources matching the ECG are automatically found and displayed.
#
# .. note::
# This example is computationally intensive, so it might take a few minutes
# to complete.
#
# After reading the data, preprocessing consists of:
#
# - MEG channel selection
# - 1-30 Hz band-pass filter
# - epoching -0.2 to 0.5 seconds with respect to events
# - rejection based on peak-to-peak amplitude
#
# Note that we don't baseline correct the epochs here – we'll do this after
# cleaning with ICA is completed. Baseline correction before ICA is not
# recommended by the MNE-Python developers, as it doesn't guarantee optimal
# results.
filt_raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True).load_data()
filt_raw.filter(1, 30, fir_design='firwin')
# peak-to-peak amplitude rejection parameters
reject = dict(mag=4e-12)
# create longer and more epochs for more artifact exposure
events = mne.find_events(filt_raw, stim_channel='STI 014')
# don't baseline correct epochs
epochs = mne.Epochs(filt_raw, events, event_id=None, tmin=-0.2, tmax=0.5,
reject=reject, baseline=None)
# %%
# Fit ICA model using the FastICA algorithm, detect and plot components
# explaining ECG artifacts.
ica = ICA(n_components=15, method='fastica', max_iter="auto").fit(epochs)
ecg_epochs = create_ecg_epochs(filt_raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, threshold='auto')
ica.plot_components(ecg_inds)
# %%
# Plot the properties of the ECG components:
ica.plot_properties(epochs, picks=ecg_inds)
# %%
# Plot the estimated sources of detected ECG related components:
ica.plot_sources(filt_raw, picks=ecg_inds)
# %%
# References
# ^^^^^^^^^^
# .. footbibliography::
|
wmvanvliet/mne-python
|
tutorials/preprocessing/40_artifact_correction_ica.py
|
Python
|
bsd-3-clause
| 29,544
|
[
"Gaussian"
] |
3b395eeb808d9411f02752a615b5db5a50ecefb4b671b336e7b0ce57d4771db2
|
#!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['CRL', 'CRL_Watchpoint', 'Watchpoint', 'Fiber', 'Fiber_Watchpoint2', 'Watchpoint2']
for el_name in names:
if el_name == 'CRL':
# CRL: crl 36.0m
el.append(srwlib.srwl_opt_setup_CRL(
_foc_plane=v.op_CRL_foc_plane,
_delta=v.op_CRL_delta,
_atten_len=v.op_CRL_atten_len,
_shape=v.op_CRL_shape,
_apert_h=v.op_CRL_apert_h,
_apert_v=v.op_CRL_apert_v,
_r_min=v.op_CRL_r_min,
_n=v.op_CRL_n,
_wall_thick=v.op_CRL_wall_thick,
_xc=v.op_CRL_x,
_yc=v.op_CRL_y,
))
pp.append(v.op_CRL_pp)
elif el_name == 'CRL_Watchpoint':
# CRL_Watchpoint: drift 36.0m
el.append(srwlib.SRWLOptD(
_L=v.op_CRL_Watchpoint_L,
))
pp.append(v.op_CRL_Watchpoint_pp)
elif el_name == 'Watchpoint':
# Watchpoint: watch 70.1915m
pass
elif el_name == 'Fiber':
# Fiber: fiber 70.1915m
el.append(srwlib.srwl_opt_setup_cyl_fiber(
_foc_plane=v.op_Fiber_foc_plane,
_delta_ext=v.op_Fiber_delta_ext,
_delta_core=v.op_Fiber_delta_core,
_atten_len_ext=v.op_Fiber_atten_len_ext,
_atten_len_core=v.op_Fiber_atten_len_core,
_diam_ext=v.op_Fiber_externalDiameter,
_diam_core=v.op_Fiber_diam_core,
_xc=v.op_Fiber_xc,
_yc=v.op_Fiber_yc,
))
pp.append(v.op_Fiber_pp)
elif el_name == 'Fiber_Watchpoint2':
# Fiber_Watchpoint2: drift 70.1915m
el.append(srwlib.SRWLOptD(
_L=v.op_Fiber_Watchpoint2_L,
))
pp.append(v.op_Fiber_Watchpoint2_pp)
elif el_name == 'Watchpoint2':
# Watchpoint2: watch 70.85m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = srwl_bl.srwl_uti_ext_options([
['name', 's', 'Boron Fiber CRL with 3 lenses', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', '', 'standard electron beam name'],
['ebm_nms', 's', '', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_e', 'f', 3.0, 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0.0, 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0.0, 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0.0, 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0.0, 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0.0, 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', -1.54, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', 0.00089, 'electron beam relative energy spread'],
['ebm_emx', 'f', 5.5e-10, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', 8e-12, 'electron beam vertical emittance [m]'],
# Definition of the beam through Twiss:
['ebm_betax', 'f', 2.02, 'horizontal beta-function [m]'],
['ebm_betay', 'f', 1.06, 'vertical beta-function [m]'],
['ebm_alphax', 'f', 0.0, 'horizontal alpha-function [rad]'],
['ebm_alphay', 'f', 0.0, 'vertical alpha-function [rad]'],
['ebm_etax', 'f', 0.0, 'horizontal dispersion function [m]'],
['ebm_etay', 'f', 0.0, 'vertical dispersion function [m]'],
['ebm_etaxp', 'f', 0.0, 'horizontal dispersion function derivative [rad]'],
['ebm_etayp', 'f', 0.0, 'vertical dispersion function derivative [rad]'],
#---Undulator
['und_bx', 'f', 0.0, 'undulator horizontal peak magnetic field [T]'],
['und_by', 'f', 0.9316, 'undulator vertical peak magnetic field [T]'],
['und_phx', 'f', 0.0, 'initial phase of the horizontal magnetic field [rad]'],
['und_phy', 'f', 0.0, 'initial phase of the vertical magnetic field [rad]'],
['und_b2e', '', '', 'estimate undulator fundamental photon energy (in [eV]) for the amplitude of sinusoidal magnetic field defined by und_b or und_bx, und_by', 'store_true'],
['und_e2b', '', '', 'estimate undulator field amplitude (in [T]) for the photon energy defined by w_e', 'store_true'],
['und_per', 'f', 0.02, 'undulator period [m]'],
['und_len', 'f', 3.0, 'undulator length [m]'],
['und_zc', 'f', 1.25, 'undulator center longitudinal position [m]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_g', 'f', 6.72, 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
['und_ph', 'f', 0.0, 'shift of magnet arrays [mm] for which the field should be set up'],
['und_mdir', 's', '', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', '', 'name of magnetic measurements for different gaps summary file'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 8500.63, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.0008, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.0007, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 0.7, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'u', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# CRL: crl
['op_CRL_foc_plane', 'f', 2, 'focalPlane'],
['op_CRL_delta', 'f', 4.716943e-06, 'refractiveIndex'],
['op_CRL_atten_len', 'f', 0.006257, 'attenuationLength'],
['op_CRL_shape', 'f', 1, 'shape'],
['op_CRL_apert_h', 'f', 0.001, 'horizontalApertureSize'],
['op_CRL_apert_v', 'f', 0.001, 'verticalApertureSize'],
['op_CRL_r_min', 'f', 0.0005, 'tipRadius'],
['op_CRL_wall_thick', 'f', 8e-05, 'tipWallThickness'],
['op_CRL_x', 'f', 0.0, 'horizontalOffset'],
['op_CRL_y', 'f', 0.0, 'verticalOffset'],
['op_CRL_n', 'i', 3, 'numberOfLenses'],
# CRL_Watchpoint: drift
['op_CRL_Watchpoint_L', 'f', 34.1915, 'length'],
# Fiber: fiber
['op_Fiber_foc_plane', 'f', 2, 'focalPlane'],
['op_Fiber_delta_ext', 'f', 6.228746e-06, 'externalRefractiveIndex'],
['op_Fiber_delta_core', 'f', 4.129923e-05, 'coreRefractiveIndex'],
['op_Fiber_atten_len_ext', 'f', 0.002412, 'externalAttenuationLength'],
['op_Fiber_atten_len_core', 'f', 3.63751e-06, 'coreAttenuationLength'],
['op_Fiber_externalDiameter', 'f', 0.0001, 'externalDiameter'],
['op_Fiber_diam_core', 'f', 1e-05, 'coreDiameter'],
['op_Fiber_xc', 'f', 0.0, 'horizontalCenterPosition'],
['op_Fiber_yc', 'f', 0.0, 'verticalCenterPosition'],
# Fiber_Watchpoint2: drift
['op_Fiber_Watchpoint2_L', 'f', 0.6585, 'length'],
#---Propagation parameters
['op_CRL_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'CRL'],
['op_CRL_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'CRL_Watchpoint'],
['op_Fiber_pp', 'f', [0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Fiber'],
['op_Fiber_Watchpoint2_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'Fiber_Watchpoint2'],
['op_fin_pp', 'f', [0, 0, 1.0, 0, 0, 0.7, 2.0, 0.2, 10.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
])
def main():
v = srwl_bl.srwl_uti_parse_options(varParam, use_sys_argv=True)
op = set_optics(v)
v.ss = True
v.ss_pl = 'e'
v.sm = True
v.sm_pl = 'e'
v.pw = True
v.pw_pl = 'xy'
v.si = True
v.si_pl = 'xy'
v.tr = True
v.tr_pl = 'xz'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
main()
|
mrakitin/sirepo
|
tests/template/srw_generate_data/boron-fiber-crl-with-3-lenses.py
|
Python
|
apache-2.0
| 23,714
|
[
"Gaussian"
] |
00cfbe41fa783676f506cc544d3780c4794e4f53fa54619018e065701a396c66
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.misc.sedfetching Contains the SEDFetcher class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astroquery.vizier import Vizier
from astroquery.vizier.core import TableParseError
from astropy.units import spectral
# Import the relevant PTS classes and modules
from ...core.data.sed import ObservedSED
from ...core.tools import tables
from ...core.tools import filesystem as fs
from ...core.filter.filter import parse_filter
from ...core.basics.log import log
from ...core.basics.errorbar import ErrorBar
from ...dustpedia.data.seds import SEDFetcher as DustPediaSEDFetcher
from ...core.basics.configurable import Configurable
from ..tools import catalogs
from ...modeling.preparation import unitconversion
from ...core.tools import formatting as fmt
from ...core.units.parsing import parse_unit as u
# -----------------------------------------------------------------
# INTERESTING LINK FOR HALPHA IMAGES AND FLUXES for CERTAIN GALAXIES: http://vizier.u-strasbg.fr/viz-bin/VizieR?-source=J/A+AS/137/495&-to=3
# -----------------------------------------------------------------
catalog_names = ["DustPedia", "GALEX", "2MASS", "SINGS", "LVL", "Spitzer", "Spitzer/IRS", "IRAS", "IRAS-FSC", "S4G",
"SINGS Spectroscopy", "Brown", "Planck"]
# -----------------------------------------------------------------
class SEDFetcher(Configurable):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Call the constructor of the base class
super(SEDFetcher, self).__init__(*args, **kwargs)
# -- Attributes --
# Determine the NGC name of the galaxy
self.ngc_name = None
# The Vizier querying object
self.vizier = Vizier(columns=["**"])
self.vizier.ROW_LIMIT = -1
# The observed SED
self.seds = dict()
# The filters
self.filters = dict()
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
"""
# 2. Get the SEDs
self.get()
# List the SEDs
if self.config.list: self.list()
# Writing
if self.config.write: self.write()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(SEDFetcher, self).setup(**kwargs)
# Create a dictionary of filters
keys = ["Ha", "FUV", "NUV", "U", "B", "V", "R", "J", "H", "K", "IRAS 12", "IRAS 25", "IRAS 60", "IRAS 100",
"I1", "I2", "I3", "I4", "MIPS 24", "MIPS 70", "MIPS 160", "SDSS u", "SDSS g", "SDSS r", "SDSS i",
"SDSS z"]
for key in keys: self.filters[key] = parse_filter(key)
# Get the NGC name
if "ngc_name" in kwargs:
self.ngc_name = kwargs.pop("ngc_name")
else:
self.ngc_name = catalogs.get_ngc_name(self.config.galaxy_name)
# -----------------------------------------------------------------
def get(self):
"""
This function ...
:return:
"""
# 2. Get the dustpedia SED
if "DustPedia" in self.config.catalogs: self.get_dustpedia()
# 2. If requested, query the GALEX ultraviolet atlas of nearby galaxies catalog (Gil de Paz+, 2007)
if "GALEX" in self.config.catalogs: self.get_galex()
# 3. If requested, query the 2MASS Extended sources catalog (IPAC/UMass, 2003-2006)
if "2MASS" in self.config.catalogs: self.get_2mass()
# 5. If requested, query the Radial distribution in SINGS galaxies (I.) catalogs (Munoz-Mateos+, 2009)
if "SINGS" in self.config.catalogs: self.get_sings()
# If requested, query the LVL global optical photometry (Cook+, 2014) catalog
if "LVL" in self.config.catalogs: self.get_lvl()
# If requested, query the Spitzer Local Volume Legacy: IR photometry (Dale+, 2009)
if "Spitzer" in self.config.catalogs: self.get_spitzer()
# If requested, query the Spitzer/IRS ATLAS project source (Hernan-Caballero+, 2011)
if "Spitzer/IRS" in self.config.catalogs: self.get_spitzer_irs()
# If requested, query the Compendium of ISO far-IR extragalactic data (Brauher+, 2008)
if "IRAS" in self.config.catalogs: self.get_iras()
# If requested, query the Imperial IRAS-FSC redshift catalogue (IIFSCz) (Wang+, 2009)
if "IRAS-FSC" in self.config.catalogs: self.get_iras_fsc()
# If requested, query the S4G catalog for IRAC fluxes
if "S4G" in self.config.catalogs: self.get_s4g()
# If requested, query the Spectroscopy and abundances of SINGS galaxies (Moustakas+, 2010) catalog
if "SINGS Spectroscopy" in self.config.catalogs: self.get_emission_lines()
# If requested, query the Atlas of UV-to-MIR galaxy SEDs (Brown+, 2014)
if "Brown" in self.config.catalogs: self.get_brown()
# If requested, query the Planck Catalog of Compact Sources Release 1 (Planck, 2013)
if "Planck" in self.config.catalogs: self.get_planck()
# Other interesting catalogs:
# http://vizier.cfa.harvard.edu/viz-bin/VizieR-3?-source=J/ApJS/199/22
# http://vizier.cfa.harvard.edu/viz-bin/VizieR-3?-source=J/ApJS/212/18/sample&-c=NGC%203031&-c.u=arcmin&-c.r=2&-c.eq=J2000&-c.geom=r&-out.max=50&-out.form=HTML%20Table&-oc.form=sexa
# http://vizier.cfa.harvard.edu/viz-bin/VizieR-3?-source=J/ApJS/220/6
# -----------------------------------------------------------------
def get_dustpedia(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting the DustPedia SED for this galaxy ...")
# Create the DustPedia photometry object
dustpedia = DustPediaSEDFetcher()
# Configure
dustpedia.config.galaxy_name = self.config.galaxy_name
dustpedia.config.write = False
# Run
dustpedia.run(ngc_name=self.ngc_name)
# Get the SED
sed = dustpedia.sed
# Add the SED
self.seds["DustPedia"] = sed
# -----------------------------------------------------------------
def fetch_table(self, catalog, index=0, object_name=None):
"""
This function ...
:param catalog:
:param index:
:param object_name:
:return:
"""
# Try fetching the catalog
try:
if object_name is not None: result = self.vizier.query_object(object_name, catalog=catalog)
else: result = self.vizier.get_catalogs(catalog)
except TableParseError:
log.warning("Could not fetch data from the '" + catalog + "' catalog")
return None
# Check the result
if len(result) == 0:
log.warning("The result from the '" + catalog + "' is empty")
return None
# Get the table
table = result[index]
# Return the table
return table
# -----------------------------------------------------------------
def fetch_tables(self, catalog, indices, object_name=None):
"""
This function ...
:param catalog:
:param indices:
:return:
"""
# Try fetching the catalog
try:
if object_name is not None: result = self.vizier.query_object(object_name, catalog=catalog)
else: result = self.vizier.get_catalogs(catalog)
except TableParseError:
log.warning("Could not fetch data from the '" + catalog + "' catalog")
#return None
return [None] * len(indices)
# Check the result
if len(result) == 0:
log.warning("The result from the '" + catalog + "' is empty")
#return None
return [None] * len(indices)
# Get the tables
tables = []
for index in indices: tables.append(result[index])
return tables
# -----------------------------------------------------------------
def get_galex(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting fluxes from the GALEX ultraviolet atlas of nearby galaxies ...")
# Fetch
table = self.fetch_table("J/ApJS/173/185/galex", object_name=self.config.galaxy_name)
if table is None: return
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
# All AB magnitudes
# FUV --
if "asyFUV" in table.colnames and not table["asyFUV"].mask[0]:
fuv_mag = table["asyFUV"][0]
fuv_mag_error = table[0]["e_asyFUV"]
fuv_mag_lower = fuv_mag - fuv_mag_error
fuv_mag_upper = fuv_mag + fuv_mag_error
# flux
fuv = unitconversion.ab_to_jansky(fuv_mag)
fuv_lower = unitconversion.ab_to_jansky(fuv_mag_upper)
fuv_upper = unitconversion.ab_to_jansky(fuv_mag_lower)
fuv_error = ErrorBar(fuv_lower, fuv_upper, at=fuv)
sed.add_point(self.filters["FUV"], fuv, fuv_error)
# NUV --
if "asyNUV" in table.colnames and not table["asyNUV"].mask[0]:
nuv_mag = table[0]["asyNUV"]
nuv_mag_error = table[0]["e_asyNUV"]
nuv_mag_lower = nuv_mag - nuv_mag_error
nuv_mag_upper = nuv_mag + nuv_mag_error
# flux
nuv = unitconversion.ab_to_jansky(nuv_mag)
nuv_lower = unitconversion.ab_to_jansky(nuv_mag_upper)
nuv_upper = unitconversion.ab_to_jansky(nuv_mag_lower)
nuv_error = ErrorBar(nuv_lower, nuv_upper, at=nuv)
sed.add_point(self.filters["NUV"], nuv, nuv_error)
# U band --
if "Umag" in table.colnames and not table["Umag"].mask[0]:
# From Vega magnitude system to AB magnitudes
u_mag = unitconversion.vega_to_ab(table[0]["Umag"], "U")
u_mag_error = unitconversion.vega_to_ab(table[0]["e_Umag"], "U")
u_mag_lower = u_mag - u_mag_error
u_mag_upper = u_mag + u_mag_error
# U band flux
u = unitconversion.ab_to_jansky(u_mag)
u_lower = unitconversion.ab_to_jansky(u_mag_upper)
u_upper = unitconversion.ab_to_jansky(u_mag_lower)
u_error = ErrorBar(u_lower, u_upper, at=u)
sed.add_point(self.filters["U"], u, u_error)
# B band --
if "Bmag" in table.colnames and not table["Bmag"].mask[0]:
b_mag = unitconversion.vega_to_ab(table[0]["Bmag"], "B")
b_mag_error = unitconversion.vega_to_ab(table[0]["e_Bmag"], "B")
b_mag_lower = b_mag - abs(b_mag_error)
b_mag_upper = b_mag + abs(b_mag_error)
# print("bmag", b_mag)
# print("bmagerror", b_mag_error)
# print("bmaglower", b_mag_lower)
# print("bmagupper", b_mag_upper)
# B band flux
b = unitconversion.ab_to_jansky(b_mag)
b_lower = unitconversion.ab_to_jansky(b_mag_upper)
b_upper = unitconversion.ab_to_jansky(b_mag_lower)
b_error = ErrorBar(b_lower, b_upper, at=b)
sed.add_point(self.filters["B"], b, b_error)
# V band --
if "Vmag" in table.colnames and not table["Vmag"].mask[0]:
v_mag = unitconversion.vega_to_ab(table[0]["Vmag"], "V")
v_mag_error = unitconversion.vega_to_ab(table[0]["e_Vmag"], "V")
v_mag_lower = v_mag - v_mag_error
v_mag_upper = v_mag + v_mag_error
# V band flux
v = unitconversion.ab_to_jansky(v_mag)
v_lower = unitconversion.ab_to_jansky(v_mag_upper)
v_upper = unitconversion.ab_to_jansky(v_mag_lower)
v_error = ErrorBar(v_lower, v_upper, at=v)
sed.add_point(self.filters["V"], v, v_error)
# In 2MASS magnitude system -> can be converted directly into Jy (see below)
# J band --
if "Jmag" in table.colnames and not table["Jmag"].mask[0]:
j_mag = table[0]["Jmag"]
j_mag_error = table[0]["e_Jmag"]
j_mag_lower = j_mag - j_mag_error
j_mag_upper = j_mag + j_mag_error
# J band flux
j = unitconversion.photometry_2mass_mag_to_jy(j_mag, "J")
j_lower = unitconversion.photometry_2mass_mag_to_jy(j_mag_upper, "J")
j_upper = unitconversion.photometry_2mass_mag_to_jy(j_mag_lower, "J")
j_error = ErrorBar(j_lower, j_upper, at=j)
sed.add_point(self.filters["J"], j, j_error)
# H band --
if "Hmag" in table.colnames and not table["Hmag"].mask[0]:
h_mag = table[0]["Hmag"]
h_mag_error = table[0]["e_Hmag"]
h_mag_lower = h_mag - h_mag_error
h_mag_upper = h_mag + h_mag_error
# H band flux
h = unitconversion.photometry_2mass_mag_to_jy(h_mag, "H")
h_lower = unitconversion.photometry_2mass_mag_to_jy(h_mag_upper, "H")
h_upper = unitconversion.photometry_2mass_mag_to_jy(h_mag_lower, "H")
h_error = ErrorBar(h_lower, h_upper, at=h)
sed.add_point(self.filters["H"], h, h_error)
# K band --
if "Kmag" in table.colnames and not table["Kmag"].mask[0]:
k_mag = table[0]["Kmag"]
k_mag_error = table[0]["e_Kmag"]
k_mag_lower = k_mag - k_mag_error
k_mag_upper = k_mag + k_mag_error
# K band flux
k = unitconversion.photometry_2mass_mag_to_jy(k_mag, "Ks")
k_lower = unitconversion.photometry_2mass_mag_to_jy(k_mag_upper, "Ks")
k_upper = unitconversion.photometry_2mass_mag_to_jy(k_mag_lower, "Ks")
k_error = ErrorBar(k_lower, k_upper, at=k)
sed.add_point(self.filters["K"], k, k_error)
# F12 band flux
if "F12um" in table.colnames and not table["F12um"].mask[0]:
f12 = table[0]["F12um"]
f12_error = ErrorBar(table[0]["e_F12um"])
sed.add_point(self.filters["IRAS 12"], f12, f12_error)
# F25 band flux
if "F25um" in table.colnames and not table["F25um"].mask[0]:
f25 = table[0]["F25um"]
f25_error = ErrorBar(table[0]["e_F25um"])
sed.add_point(self.filters["IRAS 25"], f25, f25_error)
# F60 band flux
if "F60um" in table.colnames and not table["F60um"].mask[0]:
f60 = table[0]["F60um"]
f60_error = ErrorBar(table[0]["e_F60um"])
sed.add_point(self.filters["IRAS 60"], f60, f60_error)
# F100 band flux
if "F100um" in table.colnames and not table["F100um"].mask[0]:
f100 = table[0]["F100um"]
f100_error = ErrorBar(table[0]["e_F100um"])
sed.add_point(self.filters["IRAS 100"], f100, f100_error)
# Check the number of points
if len(sed) == 0:
log.warning("No photometry found in the GALEX catalog")
return
# Add the SED to the dictionary
self.seds["GALEX"] = sed
# -----------------------------------------------------------------
def get_2mass(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting fluxes from the 2MASS Extended Catalog ...")
# 2MASS Extended Catalog: "VII/233/xsc": J, H, K (2006AJ....131.1163S)
# Get the table
table = self.fetch_table("VII/233/xsc", object_name=self.config.galaxy_name)
if table is None: return
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
# In 2MASS magnitude system -> can be converted directly into Jy (see below)
if "J.ext" in table.colnames:
j_mag = table[0]["J.ext"]
j_mag_error = table[0]["e_J.ext"]
j_mag_lower = j_mag - j_mag_error
j_mag_upper = j_mag + j_mag_error
# J band flux
j = unitconversion.photometry_2mass_mag_to_jy(j_mag, "J")
j_lower = unitconversion.photometry_2mass_mag_to_jy(j_mag_upper, "J")
j_upper = unitconversion.photometry_2mass_mag_to_jy(j_mag_lower, "J")
j_error = ErrorBar(j_lower, j_upper, at=j)
sed.add_point(self.filters["J"], j, j_error)
if "H.ext" in table.colnames:
h_mag = table[0]["H.ext"]
h_mag_error = table[0]["e_H.ext"]
h_mag_lower = h_mag - h_mag_error
h_mag_upper = h_mag + h_mag_error
# H band flux
h = unitconversion.photometry_2mass_mag_to_jy(h_mag, "H")
h_lower = unitconversion.photometry_2mass_mag_to_jy(h_mag_upper, "H")
h_upper = unitconversion.photometry_2mass_mag_to_jy(h_mag_lower, "H")
h_error = ErrorBar(h_lower, h_upper, at=h)
sed.add_point(self.filters["H"], h, h_error)
if "K.ext" in table.colnames:
k_mag = table[0]["K.ext"]
k_mag_error = table[0]["e_K.ext"]
k_mag_lower = k_mag - k_mag_error
k_mag_upper = k_mag + k_mag_error
# K band flux
k = unitconversion.photometry_2mass_mag_to_jy(k_mag, "Ks")
k_lower = unitconversion.photometry_2mass_mag_to_jy(k_mag_upper, "Ks")
k_upper = unitconversion.photometry_2mass_mag_to_jy(k_mag_lower, "Ks")
k_error = ErrorBar(k_lower, k_upper, at=k)
sed.add_point(self.filters["K"], k, k_error)
# Check number of points
if len(sed) == 0:
log.warning("No photometry found in the 2MASS catalog")
return
# Add the SED to the dictionary
self.seds["2MASS"] = sed
# -----------------------------------------------------------------
def get_sings(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting fluxes from the SINGS catalog ...")
# Try getting the tables
table1, table2 = self.fetch_tables("J/ApJ/703/1569", (5,6))
# Result is a TableList with 8 tables (0 to 7)
# We need:
# - Table6 -> index 5
# - Table7 -> index 6
# Find the row index that corresponds with the specified galaxy
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
if table1 is not None:
# Find the index for the galaxy
galaxy_index = tables.find_index(table1, self.ngc_name)
# Check
if galaxy_index is not None:
# FUV
fuv_mag = table1[galaxy_index]["FUV"]
fuv_mag_error = table1[galaxy_index]["e_FUV"]
fuv_mag_lower = fuv_mag - fuv_mag_error
fuv_mag_upper = fuv_mag + fuv_mag_error
# NUV
nuv_mag = table1[galaxy_index]["NUV"]
nuv_mag_error = table1[galaxy_index]["e_NUV"]
nuv_mag_lower = nuv_mag - nuv_mag_error
nuv_mag_upper = nuv_mag + nuv_mag_error
# u
u_mag = table1[galaxy_index]["umag"]
u_mag_error = table1[galaxy_index]["e_umag"]
u_mag_lower = u_mag - u_mag_error
u_mag_upper = u_mag + u_mag_error
# g
g_mag = table1[galaxy_index]["gmag"]
g_mag_error = table1[galaxy_index]["e_gmag"]
g_mag_lower = g_mag - abs(g_mag_error)
g_mag_upper = g_mag + abs(g_mag_error)
# print("gmag", g_mag)
# print("gmagerror", g_mag_error)
# print("gmaglower", g_mag_lower)
# print("gmagupper", g_mag_upper)
# r
r_mag = table1[galaxy_index]["rmag"]
r_mag_error = table1[galaxy_index]["e_rmag"]
r_mag_lower = r_mag - r_mag_error
r_mag_upper = r_mag + r_mag_error
# i
i_mag = table1[galaxy_index]["imag"]
i_mag_error = table1[galaxy_index]["e_imag"]
i_mag_lower = i_mag - i_mag_error
i_mag_upper = i_mag + i_mag_error
# z
z_mag = table1[galaxy_index]["zmag"]
z_mag_error = table1[galaxy_index]["e_zmag"]
z_mag_lower = z_mag - z_mag_error
z_mag_upper = z_mag + z_mag_error
# J
j_mag = table1[galaxy_index]["Jmag"]
j_mag_error = table1[galaxy_index]["e_Jmag"]
j_mag_lower = j_mag - j_mag_error
j_mag_upper = j_mag + j_mag_error
# H
h_mag = table1[galaxy_index]["Hmag"]
h_mag_error = table1[galaxy_index]["e_Hmag"]
h_mag_lower = h_mag - h_mag_error
h_mag_upper = h_mag + h_mag_error
# Ks
k_mag = table1[galaxy_index]["Ksmag"]
k_mag_error = table1[galaxy_index]["e_Ksmag"]
k_mag_lower = k_mag - k_mag_error
k_mag_upper = k_mag + k_mag_error
# FUV
fuv = unitconversion.ab_to_jansky(fuv_mag)
fuv_lower = unitconversion.ab_to_jansky(fuv_mag_upper)
fuv_upper = unitconversion.ab_to_jansky(fuv_mag_lower)
fuv_error = ErrorBar(fuv_lower, fuv_upper, at=fuv)
sed.add_point(self.filters["FUV"], fuv, fuv_error)
# NUV
nuv = unitconversion.ab_to_jansky(nuv_mag)
nuv_lower = unitconversion.ab_to_jansky(nuv_mag_upper)
nuv_upper = unitconversion.ab_to_jansky(nuv_mag_lower)
nuv_error = ErrorBar(nuv_lower, nuv_upper, at=nuv)
sed.add_point(self.filters["NUV"], nuv, nuv_error)
# u
u = unitconversion.ab_to_jansky(u_mag)
u_lower = unitconversion.ab_to_jansky(u_mag_upper)
u_upper = unitconversion.ab_to_jansky(u_mag_lower)
u_error = ErrorBar(u_lower, u_upper, at=u)
sed.add_point(self.filters["SDSS u"], u, u_error)
# g
g = unitconversion.ab_to_jansky(g_mag)
g_lower = unitconversion.ab_to_jansky(g_mag_upper)
g_upper = unitconversion.ab_to_jansky(g_mag_lower)
g_error = ErrorBar(g_lower, g_upper, at=g)
sed.add_point(self.filters["SDSS g"], g, g_error)
# r
r = unitconversion.ab_to_jansky(r_mag)
r_lower = unitconversion.ab_to_jansky(r_mag_upper)
r_upper = unitconversion.ab_to_jansky(r_mag_lower)
r_error = ErrorBar(r_lower, r_upper, at=r)
sed.add_point(self.filters["SDSS r"], r, r_error)
# i
i = unitconversion.ab_to_jansky(i_mag)
i_lower = unitconversion.ab_to_jansky(i_mag_upper)
i_upper = unitconversion.ab_to_jansky(i_mag_lower)
i_error = ErrorBar(i_lower, i_upper, at=i)
sed.add_point(self.filters["SDSS i"], i, i_error)
# z
z = unitconversion.ab_to_jansky(z_mag)
z_lower = unitconversion.ab_to_jansky(z_mag_upper)
z_upper = unitconversion.ab_to_jansky(z_mag_lower)
z_error = ErrorBar(z_lower, z_upper, at=z)
sed.add_point(self.filters["SDSS z"], z, z_error)
# J
j = unitconversion.ab_to_jansky(j_mag)
j_lower = unitconversion.ab_to_jansky(j_mag_upper)
j_upper = unitconversion.ab_to_jansky(j_mag_lower)
j_error = ErrorBar(j_lower, j_upper, at=j)
sed.add_point(self.filters["J"], j, j_error)
# H
h = unitconversion.ab_to_jansky(h_mag)
h_lower = unitconversion.ab_to_jansky(h_mag_upper)
h_upper = unitconversion.ab_to_jansky(h_mag_lower)
h_error = ErrorBar(h_lower, h_upper, at=h)
sed.add_point(self.filters["H"], h, h_error)
# Ks
k = unitconversion.ab_to_jansky(k_mag)
k_lower = unitconversion.ab_to_jansky(k_mag_upper)
k_upper = unitconversion.ab_to_jansky(k_mag_lower)
k_error = ErrorBar(k_lower, k_upper, at=k)
sed.add_point(self.filters["K"], k, k_error)
# Table7: IRAC and MIPS asymptotic magnitudes
# - "logF3.6": Spitzer/IRAC 3.6um flux density [logJy]
# - "e_logF3.6": logF3.6 uncertainty [logJy]
# - "logF4.5": Spitzer/IRAC 4.5um flux density [logJy]
# - "e_logF4.5": logF4.5 uncertainty [logJy]
# - "logF5.8": Spitzer/IRAC 5.8um flux density [logJy]
# - "e_logF5.8": logF5.8 uncertainty [logJy]
# - "logF8.0": Spiter/IRAC 8.0um flux density [logJy]
# - "e_logF8.0": logF8.0 uncertainty [logJy]
# - "logF24": Spiter/MIPS 24um flux density [logJy]
# - "e_logF24": logF24 uncertainty [logJy]
# - "logF70": Spiter/MIPS 70um flux density [logJy]
# - "e_logF70": logF70 uncertainty [logJy]
# - "logF160": Spiter/MIPS 160um flux density [logJy]
# - "e_logF160": logF160 uncertainty [logJy]
# Table7 -> index 6
if table2 is not None:
# Find galaxy index
galaxy_index = tables.find_index(table2, self.ngc_name)
# Check
if galaxy_index is not None:
# 3.6 micron
i1_log = table2[galaxy_index]["logF3.6"]
i1_log_error = table2[galaxy_index]["e_logF3.6"]
i1_log_lower = i1_log - i1_log_error
i1_log_upper = i1_log + i1_log_error
# 4.5 micron
i2_log = table2[galaxy_index]["logF4.5"]
i2_log_error = table2[galaxy_index]["e_logF4.5"]
i2_log_lower = i2_log - i2_log_error
i2_log_upper = i2_log + i2_log_error
# 5.8 micron
i3_log = table2[galaxy_index]["logF5.8"]
i3_log_error = table2[galaxy_index]["e_logF5.8"]
i3_log_lower = i3_log - i3_log_error
i3_log_upper = i3_log + i3_log_error
# 8.0 micron
i4_log = table2[galaxy_index]["logF8.0"]
i4_log_error = table2[galaxy_index]["e_logF8.0"]
i4_log_lower = i4_log - i4_log_error
i4_log_upper = i4_log + i4_log_error
# print("i4log", i4_log)
# print("i4_log_error", i4_log_error)
# print("i4_log_lower", i4_log_lower)
# print("i4_log_upper", i4_log_upper)
# 24 micron
mips24_log = table2[galaxy_index]["logF24"]
mips24_log_error = table2[galaxy_index]["e_logF24"]
mips24_log_lower = mips24_log - mips24_log_error
mips24_log_upper = mips24_log + mips24_log_error
# 70 micron
mips70_log = table2[galaxy_index]["logF70"]
mips70_log_error = table2[galaxy_index]["e_logF70"]
mips70_log_lower = mips70_log - mips70_log_error
mips70_log_upper = mips70_log + mips70_log_error
# 160 micron
mips160_log = table2[galaxy_index]["logF160"]
mips160_log_error = table2[galaxy_index]["e_logF160"]
mips160_log_lower = mips160_log - mips160_log_error
mips160_log_upper = mips160_log + mips160_log_error
# Calculate data points and errobars in Janskys, add to the SED
# 3.6 micron
i1 = 10. ** i1_log
i1_lower = 10. ** i1_log_lower
i1_upper = 10. ** i1_log_upper
i1_error = ErrorBar(i1_lower, i1_upper, at=i1)
sed.add_point(self.filters["I1"], i1, i1_error)
# 4.5 micron
i2 = 10. ** i2_log
i2_lower = 10. ** i2_log_lower
i2_upper = 10. ** i2_log_upper
i2_error = ErrorBar(i2_lower, i2_upper, at=i2)
sed.add_point(self.filters["I2"], i2, i2_error)
# 5.8 micron
i3 = 10. ** i3_log
i3_lower = 10. ** i3_log_lower
i3_upper = 10. ** i3_log_upper
i3_error = ErrorBar(i3_lower, i3_upper, at=i3)
sed.add_point(self.filters["I3"], i3, i3_error)
# 8.0 micron
i4 = 10. ** i4_log
i4_lower = 10. ** i4_log_lower
i4_upper = 10. ** i4_log_upper
i4_error = ErrorBar(i4_lower, i4_upper, at=i4)
sed.add_point(self.filters["I4"], i4, i4_error)
# 24 micron
mips24 = 10. ** mips24_log
mips24_lower = 10. ** mips24_log_lower
mips24_upper = 10. ** mips24_log_upper
mips24_error = ErrorBar(mips24_lower, mips24_upper, at=mips24)
sed.add_point(self.filters["MIPS 24"], mips24, mips24_error)
# 70 micron
mips70 = 10. ** mips70_log
mips70_lower = 10. ** mips70_log_lower
mips70_upper = 10. ** mips70_log_upper
mips70_error = ErrorBar(mips70_lower, mips70_upper, at=mips70)
sed.add_point(self.filters["MIPS 70"], mips70, mips70_error)
# 160 micron
mips160 = 10. ** mips160_log
mips160_lower = 10. ** mips160_log_lower
mips160_upper = 10. ** mips160_log_upper
mips160_error = ErrorBar(mips160_lower, mips160_upper, at=mips160)
sed.add_point(self.filters["MIPS 160"], mips160, mips160_error)
# Check if any points
if len(sed) == 0:
log.warning("No photometry found in the SINGS catalog")
return
# Add the SED to the dictionary
self.seds["SINGS"] = sed
# -----------------------------------------------------------------
def get_lvl(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting fluxes from the LVL catalog ...")
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
# "J/MNRAS/445/881": LVL global optical photometry (Cook+, 2014)
# - "J/MNRAS/445/881/sample": Galaxies of the Spitzer Local Volume Legacy (LVL): properties (table1) and R25 photometry
# - "J/MNRAS/445/881/table3": Photometry within the IR apertures of Dale et al. (2009, Cat. J/ApJ/703/517) (258 rows)
# - "J/MNRAS/445/881/table4": Photometry within the UV apertures of Lee et al. (2011, Cat. J/ApJS/192/6) (258 rows)
#result = self.vizier.query_object(self.config.galaxy_name, catalog="J/MNRAS/445/881/sample")
# ALL IN AB MAGNITUDE SYSTEM Umag Bmag Vmag Rmag umag gmag rmag imag zmag
# On SimBad, only sdss bands are used, from /sample ...
# If nothing is found
#if len(result) == 0: return
# Get the table
table = self.fetch_table("J/MNRAS/445/881/sample", object_name=self.config.galaxy_name)
if table is None: return
# Define columns for bands
relevant_bands = [("U", "U"), ("B", "B"), ("V", "V"), ("R", "R"), ("u", "SDSS u"), ("g", "SDSS g"),
("r", "SDSS r"), ("i", "SDSS i"), ("z", "SDSS z")]
# Loop over the bands
for band_prefix_catalog, filter_name in relevant_bands:
column_name = band_prefix_catalog + "mag"
error_column_name = "e_" + column_name
# COLUMN DOESN't EXIST?
if column_name not in table.colnames: continue
# Skip masked values
if table[column_name].mask[0]: continue
# AB magnitude
magnitude = table[0][column_name]
magnitude_error = table[0][error_column_name]
magnitude_lower = magnitude - magnitude_error
magnitude_upper = magnitude + magnitude_error
# Convert to Jy
fluxdensity = unitconversion.ab_to_jansky(magnitude)
fluxdensity_lower = unitconversion.ab_to_jansky(magnitude_upper)
fluxdensity_upper = unitconversion.ab_to_jansky(magnitude_lower)
fluxdensity_error = ErrorBar(fluxdensity_lower, fluxdensity_upper, at=fluxdensity)
# Add data point to SED
sed.add_point(self.filters[filter_name], fluxdensity, fluxdensity_error)
# Check number of points
if len(sed) == 0:
log.warning("No photometry found in the LVL catalog")
return
# Add the SED to the dictionary
self.seds["LVL"] = sed
# -----------------------------------------------------------------
def get_spitzer(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting fluxes from the Spitzer catalog ...")
# "J/ApJ/703/517": The Spitzer Local Volume Legacy: IR photometry (Dale+, 2009)
# "J/ApJ/703/517/sample": Galaxy sample (table 1) and infrared flux densities (table 2) (258 rows)
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
#result = self.vizier.query_object(self.config.galaxy_name, catalog="J/ApJ/703/517/sample")
# F1.25: 2MASS J band (1.25 micron) flux density [Jy]
# e_F1.25: Uncertainty in F1.25 [Jy]
# F1.65: 2MASS H band (1.65 micron) flux density [Jy]
# e_F1.65: Uncertainty in F1.65 [Jy]
# F2.17: 2MASS Ks band (2.17 micron) flux density [Jy]
# e_F2.17: Uncertainty in F2.17 [Jy]
# F3.6: Spitzer/IRAC 3.5 micron band flux density [Jy]
# e_F3.6: Uncertainty in F3.6 [Jy]
# F4.5: Spitzer/IRAC 4.5 micron band flux density [Jy]
# e_F4.5: Uncertainty in F4.5 [Jy]
# F5.8: Spitzer/IRAC 5.8 micron band flux density [Jy]
# e_F5.8: Uncertainty in F5.8 [Jy]
# F8.0: Spitzer/IRAC 8.0 micron band flux density [Jy]
# e_F8.0: Uncertainty in F8.0 [Jy]
# F24: Spitzer/MIPS 24 micron flux density [Jy]
# e_F24: Uncertainty in F24 [Jy]
# F70: Spitzer/MIPS 70 micron band flux density [Jy]
# e_F70: Uncertainty in F70 [Jy]
# F160: Spitzer/MIPS 160 micron band flux density [Jy]
# e_F160: Uncertainty in F160 [Jy]
# If no results are found
#if len(result) == 0: return
#table = result[0]
# Get the table
table = self.fetch_table("J/ApJ/703/517/sample", object_name=self.config.galaxy_name)
if table is None: return
# Define column names for bands
relevant_bands = [("1.25", "J"), ("1.65", "H"), ("2.17", "K"), ("3.6", "I1"), ("4.5", "I2"), ("5.8", "I3"),
("8.0", "I4"), ("24", "MIPS 24"), ("70", "MIPS 70"), ("160", "MIPS 160")]
# Loop over the bands
for band_prefix_catalog, filter_name in relevant_bands:
column_name = "F" + band_prefix_catalog
error_column_name = "e_" + column_name
# COLUMN DOESN't EXIST?
if column_name not in table.colnames: continue
# Skip masked values
if table[column_name].mask[0]: continue
# Flux and error already in Jy
fluxdensity = table[0][column_name]
fluxdensity_error = ErrorBar(table[0][error_column_name]) if not table[error_column_name].mask[0] else None
# Add data point to SED
sed.add_point(self.filters[filter_name], fluxdensity, fluxdensity_error)
# Check number of points
if len(sed) == 0:
log.warning("No photometry found in the Spitzer catalog")
# Add the SED to the dictionary
self.seds["Spitzer"] = sed
# -----------------------------------------------------------------
def get_spitzer_irs(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting fluxes from the Spitzer/IRS catalog ...")
# "J/MNRAS/414/500": Spitzer/IRS ATLAS project source (Hernan-Caballero+, 2011)
# - "J/MNRAS/414/500/catalog": Spitzer/IRS ATLAS project source catalog, version 1.0 (739 rows)
# !! Parentheses () are converted into underscores _ in the resulting Astropy tables!!
# F(3.6): IRAC1 (3.6um) flux density [Jy]
# e_F(3.6): rms uncertainty on F(3.6) [Jy]
# F(8.0): IRAC4 (8.0um) flux density [Jy]
# e_F(8.0): rms uncertainty on F(8.0) [Jy]
# F(24): 24um flux density [Jy]
# e_F(24): rms uncertainty on F(24) [Jy]
# F(70): 70um or similar band flux density [Jy]
# e_F(70): rms uncertainty on F(70) [Jy]
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
#result = self.vizier.query_object(self.config.galaxy_name, catalog="J/MNRAS/414/500/catalog")
# No results found
#if len(result) == 0: return
#table = result[0]
# Get the table
table = self.fetch_table("J/MNRAS/414/500/catalog", object_name=self.config.galaxy_name)
if table is None: return
# Define the column names for the bands
relevant_bands = [("3.6", "I1"), ("8.0", "I4"), ("24", "MIPS 24"), ("70", "MIPS 70")]
# Loop over the bands
for band_prefix_catalog, filter_name in relevant_bands:
column_name = "F_" + band_prefix_catalog + "_"
error_column_name = "e_" + column_name
# COLUMN DOESN't EXIST?
if column_name not in table.colnames: continue
# Skip masked values
if table[column_name].mask[0]: continue
# Flux and error already in Jy
fluxdensity = table[0][column_name]
fluxdensity_error = ErrorBar(table[0][error_column_name]) if not table[error_column_name].mask[0] else None
# Add data point to SED
sed.add_point(self.filters[filter_name], fluxdensity, fluxdensity_error)
# Check number of points
if len(sed) == 0:
log.warning("No photometry found in the Spitzer-IRS catalog")
return
# Add the SED to the dictionary
self.seds["Spitzer-IRS"] = sed
# -----------------------------------------------------------------
def get_iras(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting fluxes from the IRAS catalog ...")
# "J/ApJS/178/280": Compendium of ISO far-IR extragalactic data (Brauher+, 2008)
# - "J/ApJS/178/280/table1": *Galaxies and properties
# F12: IRAS 12um band flux density [Jy]
# F25: IRAS 25um band flux density [Jy]
# F60: IRAS 60um band flux density [Jy]
# F100: IRAS 100um band flux density [Jy]
# No errors ...
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
#result = self.vizier.query_object(self.config.galaxy_name, catalog="J/ApJS/178/280/table1")
#if len(result) != 0:
# Get table
table = self.fetch_table("J/ApJS/178/280/table1", object_name=self.config.galaxy_name)
if table is None: return
# Define column names for bands
relevant_bands = [("12", "IRAS 12"), ("25", "IRAS 25"), ("60", "IRAS 60"), ("100", "IRAS 100")]
# Loop over the bands
for band_prefix_catalog, filter_name in relevant_bands:
column_name = "F" + band_prefix_catalog
# COLUMN DOESN't EXIST?
if column_name not in table.colnames: continue
# Skip masked values
if table[column_name].mask[0]: continue
# Flux and error already in Jy
fluxdensity = table[0][column_name]
fluxdensity_error = ErrorBar(0.0)
# Add data point to SED
sed.add_point(self.filters[filter_name], fluxdensity, fluxdensity_error)
# result = self.vizier.get_catalogs("J/ApJS/178/280/table2")
# table = result[0]
#
# index = tables.find_index(table, self.ngc_name, "Name")
# # print(index)
# # index = tables.find_index(table, self.config.galaxy_name, "Name")
# # print(index)
# # F170
# # Jy (n) 170 micron band flux density
# # e_F170
# # Jy (n) Uncertainty in F170
# # F158
# # Jy (n) 158 micron band flux density
# # e_F158
# # Jy (n) Uncertainty in F158
# # F145
# # Jy (n) 145 micron band flux density
# # e_F145
# # Jy (n) Uncertainty in F145
# # F122
# # Jy (n) 122 micron band flux density
# # e_F122
# # Jy (n) Uncertainty in F122
# # F88
# # Jy (n) 88 micron band flux density
# # e_F88
# # Jy (n) Uncertainty in F88
# # F63
# # Jy (n) 63 micron band flux density
# # e_F63
# # Jy (n) Uncertainty in F63
# # F57
# # Jy (n) 57 micron band flux density
# # e_F57
# # Jy (n) Uncertainty in F57
# # F52
# # Jy (n) 52 micron band flux density
# # e_F52
# # Jy (n) Uncertainty in F52
# if index is not None: pass
# if len(sed) == 0: return
# Check number of points
if len(sed) == 0:
log.warning("No photometry found in the IRAS catalog")
return
# Add the SED to the dictionary
self.seds["IRAS"] = sed
# -----------------------------------------------------------------
def get_iras_fsc(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting fluxes from the IRAS-FSC catalog ...")
# "J/MNRAS/398/109": Imperial IRAS-FSC redshift catalogue (IIFSCz) (Wang+, 2009)
# - "J/MNRAS/398/109/iifsczv4": IIFSCz Catalogue (MRR+LW 18/04/09)[spectrum/SED] (60303 rows)
# S12um: IRAS-FSC flux at 12um [Jy]
# S25um: IRAS-FSC flux at 25um [Jy]
# S60um: IRAS-FSC flux at 60um [Jy]
# S100um: IRAS-FSC flux at 100um [Jy]
# not used in Simbad:
# umag: SDSS u magnitude [mag: which system??]
# e_umag:
# gmag:
# e_gmag:
# rmag:
# e_rmag:
# imag:
# e_imag:
# zmag:
# e_zmag:
# Jmag: 2MASS J magnitude [mag: which system??]
# e_Jmag: rms uncertainty on Jmag [mag: which system??]
# Hmag: 2MASS H magnitude [mag]
# e_Hmag: rms uncertainty on Hmag [mag]
# Kmag: 2MASS K magnitude [mag]
# e_Kmag rms uncertainty on Kmag [mag]
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
#result = self.vizier.query_object(self.config.galaxy_name, catalog="J/MNRAS/398/109/iifsczv4")
#if len(result) == 0: return
# Get table
table = self.fetch_table("J/MNRAS/398/109/iifsczv4", object_name=self.config.galaxy_name)
if table is None: return
# Define column names for bands
relevant_bands = [("12", "IRAS 12"), ("25", "IRAS 25"), ("60", "IRAS 60"), ("100", "IRAS 100")]
# Loop over the bands
for band_prefix_catalog, filter_name in relevant_bands:
# Define column name
column_name = "S" + band_prefix_catalog + "um"
# COLUMN DOESN't EXIST?
if column_name not in table.colnames: continue
# Flux and error already in Jy
fluxdensity = table[0][column_name]
fluxdensity_error = ErrorBar(0.0)
# Add data point to SED
sed.add_point(self.filters[filter_name], fluxdensity, fluxdensity_error)
# Check the number of points
if len(sed) == 0:
log.warning("No photometry found in the IRAS-FSC catalog")
return
# Add the SED to the dictionary
self.seds["IRAS-FSC"] = sed
# -----------------------------------------------------------------
def get_s4g(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Getting fluxes from the S4G catalog ...")
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
# Get parameters from S4G catalog
#result = self.vizier.query_object(self.config.galaxy_name, catalog=["J/PASP/122/1397/s4g"])
#table = result[0]
# Get table
table = self.fetch_table("J/PASP/122/1397/s4g", object_name=self.config.galaxy_name)
if table is None: return
# I1
if "__3.6_" in table.colnames:
i1_mag = table["__3.6_"][0]
i1_mag_error = table["e__3.6_"][0]
i1_fluxdensity = unitconversion.ab_to_jansky(i1_mag)
i1_fluxdensity_lower = unitconversion.ab_to_jansky(i1_mag + i1_mag_error)
i1_fluxdensity_upper = unitconversion.ab_to_jansky(i1_mag - i1_mag_error)
i1_error = ErrorBar(i1_fluxdensity_lower, i1_fluxdensity_upper, at=i1_fluxdensity)
# Add data point to SED
sed.add_point(self.filters["I1"], i1_fluxdensity, i1_error)
# I2
if "__4.5_" in table.colnames:
i2_mag = table["__4.5_"][0]
i2_mag_error = table["e__4.5_"][0]
i2_fluxdensity = unitconversion.ab_to_jansky(i2_mag)
i2_fluxdensity_lower = unitconversion.ab_to_jansky(i2_mag + i2_mag_error)
i2_fluxdensity_upper = unitconversion.ab_to_jansky(i2_mag - i2_mag_error)
i2_error = ErrorBar(i2_fluxdensity_lower, i2_fluxdensity_upper, at=i2_fluxdensity)
# Add data point to SED
sed.add_point(self.filters["I2"], i2_fluxdensity, i2_error)
# Check number of points
if len(sed) == 0:
log.warning("No photometry found in the S4G catalog")
return
# Add the SED to the dictionary
self.seds["S4G"] = sed
# -----------------------------------------------------------------
def get_brown(self):
"""
This function ...
:return:
"""
# J/ApJS/212/18/sample
# AB magnitudes for the sample with neither foreground nor intrinsic dust extinction corrections, and modeled Milky Way foreground dust extinction
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
# FUV: [12.5/22.9] GALEX FUV AB band magnitude
# e_FUV:
# UVW2:
# e_UVW2:
# UVM2:
# e_UVM2:
# NUV:
# e_NUV:
# UVW1:
# e_UVW1:
# Umag: [11.9/15.7] Swift/UVOT U AB band magnitude
# e_Umag:
# umag:
# e_umag:
# gmag:
# e_gmag:
# Vmag:
# e_Vmag:
# rmag:
# e_rmag:
# imag:
# e_imag:
# zmag:
# e_zmag:
# Jmag:
# e_Jmag:
# Hmag:
# e_Hmag:
# Ksmag:
# e_Ksmag:
# W1mag:
# e_W1mag:
# [3.6]:
# e_[3.6]:
# [4.5]:
# e_[4.5]:
# W2mag:
# e_W2mag:
# [5.8]:
# e_[5.8]:
# [8.0]:
# e_[8.0]:
# W3mag:
# e_W3mag:
# W4mag:
# e_W4mag:
# W4'mag: Corrected WISE W4 AB band magnitude
# e_W4'mag:
# [24]:
# e_[24]:
pass
# -----------------------------------------------------------------
def get_planck(self):
"""
This function ...
:return:
"""
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
# The second release is not yet available ... ??
# -----------------------------------------------------------------
def get_emission_lines(self):
"""
This function ...
:return:
"""
# Create an SED
sed = ObservedSED(photometry_unit="Jy")
# J/ApJS/190/233/Opt
# Get result
#result = self.vizier.get_catalogs("J/ApJS/190/233/Opt")
#table = result[0]
# Get result
table = self.fetch_table("J/ApJS/190/233/Opt")
if table is None: return
galaxy_index = tables.find_index(table, self.ngc_name, "Name")
# FHa: The Hα 6563 Angstrom line flux (aW/m2)
# e_FHa: Uncertainty in Ha (aW/m2)
# FNII: The [NII] 6584Å line flux (aW/m2)
# e_FNII: Uncertainty in NII (aW/m2)
# Nothing found
if galaxy_index is None:
log.warning("No result found for this galaxy in the catalog")
return
# H alpha
ha_flux = table["FHa"][galaxy_index] * u("aW/m2")
ha_flux_error = table["e_FHa"][galaxy_index] * u("aW/m2")
# NII
n2_flux = table["FNII"][galaxy_index] * u("aW/m2")
n2_flux_error = table["e_FNII"][galaxy_index] * u("aW/m2")
ha_filter = self.filters["Ha"]
ha_wavelength = ha_filter.center
ha_frequency = ha_wavelength.to("Hz", equivalencies=spectral())
# Calculate flux density
# print(ha_flux, type(ha_flux))
# print(ha_frequency, type(ha_frequency))
#print(ha_flux / ha_frequency)
ha_fluxdensity = (ha_flux / ha_frequency).to("Jy")
ha_fluxdensity_error = (ha_flux_error / ha_frequency).to("Jy")
ha_errorbar = ErrorBar(ha_fluxdensity_error)
# Add entry
sed.add_point(ha_filter, ha_fluxdensity, ha_errorbar)
# Add the SED to the dictionary
self.seds["Lines"] = sed
# -----------------------------------------------------------------
def list(self):
"""
This function ...
:return:
"""
print(fmt.green + fmt.bold + "Found SEDs: (" + str(len(self.seds)) + ")" + fmt.reset)
print("")
for label in self.seds:
print(fmt.underlined + label + fmt.reset + ": " + str(len(self.seds[label])) + " fluxes")
print("")
for filter_name in self.seds[label].filter_names():
print(" * " + filter_name)
print("")
# print("")
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write out the SEDs
self.write_seds()
# -----------------------------------------------------------------
def write_seds(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the SEDs ...")
# Loop over the different SEDs
for label in self.seds:
# Debugging
log.debug("Writing " + label + " SED ...")
# Determine the path to the new SED file
sed_path = fs.join(self.config.path, label + ".dat")
# Save the SED at the specified location
self.seds[label].saveto(sed_path)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
magic/services/seds.py
|
Python
|
agpl-3.0
| 52,228
|
[
"Galaxy"
] |
e819d1b8cae5bb75b314ca9f4b3fc83a3d31d6a71dd292c0917551ed2d402848
|
##############################
# Bullet Constraints Builder #
##############################
#
# Written within the scope of Inachus FP7 Project (607522):
# "Technological and Methodological Solutions for Integrated
# Wide Area Situation Awareness and Survivor Localisation to
# Support Search and Rescue (USaR) Teams"
# Versions 1 & 2 were developed at the Laurea University of Applied Sciences,
# Finland. Later versions are independently developed.
# Copyright (C) 2015-2021 Kai Kostack
#
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
################################################################################
import bpy
mem = bpy.app.driver_namespace
### Import submodules
from global_props import * # Contains global properties
from global_vars import * # Contains global variables
from build_data import * # Contains build data access functions
from builder import * # Contains constraints builder functions
from builder_fm import * # Contains constraints builder function for Fracture Modifier (custom Blender version required)
from formula import * # Contains formula assistant functions
from formula_props import * # Contains formula assistant properties classes
from monitor import * # Contains baking monitor event handler
from tools import * # Contains smaller independently working tools
################################################################################
class OBJECT_OT_bcb_set_config(bpy.types.Operator):
bl_idname = "bcb.set_config"
bl_label = ""
bl_description = "Stores actual config data in current scene"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
###### Store menu config data in scene
storeConfigDataInScene(scene)
props.menu_gotConfig = 1
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_get_config(bpy.types.Operator):
bl_idname = "bcb.get_config"
bl_label = ""
bl_description = "Loads previous config data from current scene"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
if "bcb_prop_elemGrps" in scene.keys():
###### Get menu config data from scene
warning = getConfigDataFromScene(scene)
if warning != None and len(warning): self.report({'ERROR'}, warning) # Create popup message
props.menu_gotConfig = 1
###### Get build data from scene
#getBuildDataFromScene(scene)
if "bcb_valid" in scene.keys(): props.menu_gotData = 1
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_clear(bpy.types.Operator):
bl_idname = "bcb.clear"
bl_label = ""
bl_description = "Clears constraints from scene and revert back to original state (required to rebuild constraints from scratch)"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
###### Clear all data from scene and delete also constraint empty objects
if "bcb_prop_elemGrps" in scene.keys(): clearAllDataFromScene(scene, qKeepBuildData=0)
props.menu_gotConfig = 0
props.menu_gotData = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_build(bpy.types.Operator):
bl_idname = "bcb.build"
bl_label = "Build"
bl_description = "Starts building process and adds constraints to selected elements"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
# Go to start frame for cache data removal
if scene.frame_current != scene.frame_start:
scene.frame_current = scene.frame_start
bpy.context.screen.scene = scene # Hack to update scene completely (scene.update() is not enough causing the monitor not work correctly when invoking at a frame > 1)
### Free previous bake data
contextFix = bpy.context.copy()
if scene.rigidbody_world != None:
contextFix['point_cache'] = scene.rigidbody_world.point_cache
bpy.ops.ptcache.free_bake(contextFix)
# Invalidate point cache to enforce a full bake without using previous cache data
if "RigidBodyWorld" in bpy.data.groups:
try: obj = bpy.data.groups["RigidBodyWorld"].objects[0]
except: pass
else: obj.location = obj.location
else: # Create new RB world
bpy.ops.rigidbody.world_add()
try: bpy.context.scene.rigidbody_world.group = bpy.data.groups["RigidBodyWorld"]
except: pass
try: bpy.context.scene.rigidbody_world.constraints = bpy.data.groups["RigidBodyConstraints"]
except: pass
###### Execute main building process from scratch
# Display progress bar
bpy.context.window_manager.progress_begin(0, 100)
# Toggle console
#bpy.ops.wm.console_toggle()
### Build
error = build()
if not error: props.menu_gotData = 1
# Terminate progress bar
bpy.context.window_manager.progress_end()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_update(bpy.types.Operator):
bl_idname = "bcb.update"
bl_label = "Update"
bl_description = "Updates constraints generated from a previous built"
def execute(self, context):
OBJECT_OT_bcb_build.execute(self, context)
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_export_config(bpy.types.Operator):
bl_idname = "bcb.export_config"
bl_label = ""
bl_description = 'Exports BCB config data to an external file located in the render output folder'
def execute(self, context):
scene = bpy.context.scene
exportConfigData(scene)
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_import_config(bpy.types.Operator):
bl_idname = "bcb.import_config"
bl_label = ""
bl_description = 'Imports BCB config data from an external file located in the render output folder'
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
error = importConfigData(scene)
elemGrps = mem["elemGrps"]
if props.menu_selectedElemGrp >= len(elemGrps) and len(elemGrps) > 0:
props.menu_selectedElemGrp = len(elemGrps)-1
if not error:
props.menu_gotConfig = 1
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_export_ascii(bpy.types.Operator):
bl_idname = "bcb.export_ascii"
bl_label = "Export to Text"
bl_description = "Exports all constraint data to an ASCII text file within this .blend file instead of creating actual empty objects (only useful for developers at the moment)"
def execute(self, context):
props = context.window_manager.bcb
props.asciiExport = 1
OBJECT_OT_bcb_build.execute(self, context)
props.asciiExport = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_export_ascii_fm(bpy.types.Operator):
bl_idname = "bcb.export_ascii_fm"
bl_label = "Build FM"
bl_description = "Builds and simulates with help of the Fracture Modifier (special Blender version required). 'Build FM' will simulate scientifically like 'Build'; Dynamic: Enables geometry also to shatter for more realistic but non-scientific appearance"
int_ = bpy.props.IntProperty
use_handler = int_(default = 0)
def execute(self, context):
if not hasattr(bpy.types.DATA_PT_modifiers, 'FRACTURE'):
self.report({'ERROR'}, "Fracture Modifier not available in this Blender version. Visit kaikostack.com/fracture for the FM-enabled Blender version.") # Create popup message
else:
###### Execute main building process from scratch
scene = bpy.context.scene
props = context.window_manager.bcb
### Free previous bake data
contextFix = bpy.context.copy()
if scene.rigidbody_world != None:
contextFix['point_cache'] = scene.rigidbody_world.point_cache
bpy.ops.ptcache.free_bake(contextFix)
else: # Create new RB world
bpy.ops.rigidbody.world_add()
try: bpy.context.scene.rigidbody_world.group = bpy.data.groups["RigidBodyWorld"]
except: pass
try: bpy.context.scene.rigidbody_world.constraints = bpy.data.groups["RigidBodyConstraints"]
except: pass
if not props.menu_gotData:
if not "bcb_ext_noBuild" in scene.keys(): # Option for external scripts to prevent building and keep export data
if props.automaticMode and props.preprocTools_aut:
OBJECT_OT_bcb_preprocess_do_all_steps_at_once.execute(self, context)
###### ASCII text export
OBJECT_OT_bcb_export_ascii.execute(self, context)
###### Building FM
build_fm(use_handler=self.use_handler)
if not self.use_handler and asciiExportName +".txt" in bpy.data.texts:
try: bpy.data.texts.remove(bpy.data.texts[asciiExportName +".txt"], do_unlink=1)
except: bpy.data.texts.remove(bpy.data.texts[asciiExportName +".txt"])
if props.menu_gotData:
if props.automaticMode and props.postprocTools_aut:
OBJECT_OT_bcb_postprocess_do_all_steps_at_once.execute(self, context)
### Start baking when building is completed
if props.menu_gotData:
if props.automaticMode and not "bcb_ext_noSimulation" in scene.keys(): # Option for external scripts to prevent simulation
if props.saveBackups: bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath.split('_bake.blend')[0].split('.blend')[0] +'_bake.blend')
# Prepare event handlers
bpy.app.handlers.frame_change_pre.append(monitor_eventHandler)
bpy.app.handlers.frame_change_pre.append(monitor_stop_eventHandler)
monitor_eventHandler(scene) # Init at current frame before starting simulation
# Invoke baking (old method, appears not to work together with the event handler past Blender v2.76 anymore)
#bpy.ops.ptcache.bake(contextFix, bake=True)
if props.automaticMode and props.postprocTools_aut: pass
else:
# Start animation playback and by that the baking process
if not bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_play()
self.use_handler = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_bake(bpy.types.Operator):
bl_idname = "bcb.bake"
bl_label = "Simulate"
bl_description = "Starts the rigid body simulation. A build is invoked beforehand if not already done. Use this button instead of the regular Blender physics baking as the BCB needs to monitor the simulation for constraint detaching"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
# Go to start frame for cache data removal
if scene.frame_current != scene.frame_start:
scene.frame_current = scene.frame_start
bpy.context.screen.scene = scene # Hack to update scene completely (scene.update() is not enough causing the monitor not work correctly when invoking at a frame > 1)
### Free previous bake data
contextFix = bpy.context.copy()
if scene.rigidbody_world != None:
contextFix['point_cache'] = scene.rigidbody_world.point_cache
bpy.ops.ptcache.free_bake(contextFix)
# Invalidate point cache to enforce a full bake without using previous cache data
if "RigidBodyWorld" in bpy.data.groups:
try: obj = bpy.data.groups["RigidBodyWorld"].objects[0]
except: pass
else: obj.location = obj.location
else: # Create new RB world
bpy.ops.rigidbody.world_add()
try: bpy.context.scene.rigidbody_world.group = bpy.data.groups["RigidBodyWorld"]
except: pass
try: bpy.context.scene.rigidbody_world.constraints = bpy.data.groups["RigidBodyConstraints"]
except: pass
### Build constraints if required (menu_gotData will be set afterwards and this operator restarted)
### If asciiExportName exists then the use of Fracture Modifier is assumed and building is skipped
if not props.menu_gotData and not asciiExportName in scene.objects:
if props.automaticMode and props.preprocTools_aut:
OBJECT_OT_bcb_preprocess_do_all_steps_at_once.execute(self, context)
###### Building
OBJECT_OT_bcb_build.execute(self, context)
if props.menu_gotData:
if props.automaticMode and props.postprocTools_aut:
OBJECT_OT_bcb_postprocess_do_all_steps_at_once.execute(self, context)
OBJECT_OT_bcb_bake.execute(self, context)
### Start baking when building is completed
else:
if props.saveBackups: bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath.split('_bake.blend')[0].split('.blend')[0] +'_bake.blend')
# Prepare event handlers
bpy.app.handlers.frame_change_pre.append(monitor_eventHandler)
bpy.app.handlers.frame_change_pre.append(monitor_stop_eventHandler)
monitor_eventHandler(scene) # Init at current frame before starting simulation
# Invoke baking (old method, appears not to work together with the event handler past Blender v2.76 anymore)
#bpy.ops.ptcache.bake(contextFix, bake=True)
if props.automaticMode and props.postprocTools_aut: pass
else:
# Start animation playback and by that the baking process
if not bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_play()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_add(bpy.types.Operator):
bl_idname = "bcb.add"
bl_label = ""
bl_description = "Adds a preset element group to list"
int_ = bpy.props.IntProperty
menuIdx = int_(default = -1)
def execute(self, context):
props = context.window_manager.bcb
elemGrps = mem["elemGrps"]
if len(elemGrps) < maxMenuElementGroupItems:
if self.menuIdx < 0:
# Call menu
bpy.ops.wm.call_menu(name="bcb.add_preset")
else:
props = context.window_manager.bcb
elemGrps = mem["elemGrps"]
# Add element group (syncing element group indices happens on execution)
elemGrps.append(presets[self.menuIdx].copy())
# Update menu selection
props.menu_selectedElemGrp = len(elemGrps) -1
# Update menu related properties from global vars
props.props_update_menu()
self.menuIdx = -1
else: self.report({'ERROR'}, "Maximum allowed element group count reached.") # Create popup message
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_dup(bpy.types.Operator):
bl_idname = "bcb.dup"
bl_label = ""
bl_description = "Duplicates selected element group"
def execute(self, context):
props = context.window_manager.bcb
elemGrps = mem["elemGrps"]
if len(elemGrps) > 0:
if len(elemGrps) < maxMenuElementGroupItems:
# Add element group (syncing element group indices happens on execution)
elemGrps.append(elemGrps[props.menu_selectedElemGrp].copy())
# Update menu selection
props.menu_selectedElemGrp = len(elemGrps) -1
else: self.report({'ERROR'}, "Maximum allowed element group count reached.") # Create popup message
else: self.report({'ERROR'}, "There is no element group to duplicate.") # Create popup message
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_del(bpy.types.Operator):
bl_idname = "bcb.del"
bl_label = ""
bl_description = "Deletes element group from list"
def execute(self, context):
props = context.window_manager.bcb
elemGrps = mem["elemGrps"]
scene = bpy.context.scene
if len(elemGrps) > 0:
# Remove element group (syncing element group indices happens on execution)
del elemGrps[props.menu_selectedElemGrp]
# Update menu selection
if props.menu_selectedElemGrp >= len(elemGrps) and len(elemGrps) > 0:
props.menu_selectedElemGrp = len(elemGrps) -1
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_move_up(bpy.types.Operator):
bl_idname = "bcb.move_up"
bl_label = ""
bl_description = "Moves element group in list"
def execute(self, context):
props = context.window_manager.bcb
elemGrps = mem["elemGrps"]
scene = bpy.context.scene
if props.menu_selectedElemGrp > 0:
swapItem = props.menu_selectedElemGrp -1
# Swap items (syncing element group indices happens on execution)
elemGrps[swapItem], elemGrps[props.menu_selectedElemGrp] = elemGrps[props.menu_selectedElemGrp], elemGrps[swapItem]
# Also move menu selection
props.menu_selectedElemGrp -= 1
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_move_down(bpy.types.Operator):
bl_idname = "bcb.move_down"
bl_label = ""
bl_description = "Moves element group in list"
def execute(self, context):
props = context.window_manager.bcb
elemGrps = mem["elemGrps"]
scene = bpy.context.scene
if props.menu_selectedElemGrp < len(elemGrps) -1:
swapItem = props.menu_selectedElemGrp +1
# Swap items (syncing element group indices happens on execution)
elemGrps[swapItem], elemGrps[props.menu_selectedElemGrp] = elemGrps[props.menu_selectedElemGrp], elemGrps[swapItem]
# Also move menu selection
props.menu_selectedElemGrp += 1
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_up(bpy.types.Operator):
bl_idname = "bcb.up"
bl_label = " Previous"
bl_description = "Selects previous element group from list"
def execute(self, context):
props = context.window_manager.bcb
if props.menu_selectedElemGrp > 0:
props.menu_selectedElemGrp -= 1
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_down(bpy.types.Operator):
bl_idname = "bcb.down"
bl_label = " Next"
bl_description = "Selects next element group from list"
def execute(self, context):
props = context.window_manager.bcb
elemGrps = mem["elemGrps"]
if props.menu_selectedElemGrp < len(elemGrps) -1:
props.menu_selectedElemGrp += 1
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_up_more(bpy.types.Operator):
bl_idname = "bcb.up_more"
bl_label = ""
bl_description = "Selects previous element group from list. (x10)"
def execute(self, context):
props = context.window_manager.bcb
stepSize = 10
if props.menu_selectedElemGrp > 0 +stepSize:
props.menu_selectedElemGrp -= stepSize
else: props.menu_selectedElemGrp = 0
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_down_more(bpy.types.Operator):
bl_idname = "bcb.down_more"
bl_label = ""
bl_description = "Selects next element group from list. (x10)"
def execute(self, context):
props = context.window_manager.bcb
elemGrps = mem["elemGrps"]
stepSize = 10
if props.menu_selectedElemGrp < len(elemGrps) -1 -stepSize:
props.menu_selectedElemGrp += stepSize
else: props.menu_selectedElemGrp = len(elemGrps) -1
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_reset(bpy.types.Operator):
bl_idname = "bcb.reset"
bl_label = ""
bl_description = "Resets element group list to defaults"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
# Overwrite element group with original backup (syncing element group indices happens on execution)
mem["elemGrps"] = elemGrpsBak.copy()
# Update menu selection
props.menu_selectedElemGrp = 0
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_asst_update(bpy.types.Operator):
bl_idname = "bcb.asst_update"
bl_label = "Evaluate"
bl_description = "Combines and evaluates above expressions for constraint breaking threshold calculation. It is recommended to choose a Connection Type with 7x Generic constraints to get the best simulation results"
def execute(self, context):
props = context.window_manager.bcb
###### Execute expression evaluation
combineExpressions()
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_asst_update_all(bpy.types.Operator):
bl_idname = "bcb.asst_update_all"
bl_label = "Evaluate All"
bl_description = "Combines and evaluates expressions for every element groups with active Formula Assistant. Warning: Use this with care as it will overwrite also manually changed breaking thresholds for these element groups"
def execute(self, context):
props = context.window_manager.bcb
elemGrps = mem["elemGrps"]
selElemGrp_bak = props.menu_selectedElemGrp
# Walk over all element groups and evaluate formula expressions
for i in range(len(elemGrps)):
props.menu_selectedElemGrp = i
# Update menu related properties from global vars
props.props_update_menu()
# Only evaluate if Formula Assistant is active
if props.assistant_menu != "None":
###### Execute expression evaluation
combineExpressions()
props.menu_selectedElemGrp = selElemGrp_bak
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
################################################################################
class OBJECT_OT_bcb_tool_estimate_cluster_radius(bpy.types.Operator):
bl_idname = "bcb.tool_estimate_cluster_radius"
bl_label = ""
bl_description = "Estimate optimal cluster radius from selected objects in scene (even if you already have built a BCB structure only selected objects are considered)"
def execute(self, context):
scene = bpy.context.scene
result = tool_estimateClusterRadius(scene)
if result > 0:
props = context.window_manager.bcb
props.clusterRadius = result
# Update menu related properties from global vars
props.props_update_menu()
return{'FINISHED'}
################################################################################
class OBJECT_OT_bcb_tool_select_group(bpy.types.Operator):
bl_idname = "bcb.tool_select_group"
bl_label = ""
bl_description = "Selects objects belonging to this element group in viewport"
def execute(self, context):
scene = bpy.context.scene
tool_selectGroup(scene)
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preprocess_do_all_steps_at_once(bpy.types.Operator):
bl_idname = "bcb.preprocess_do_all_steps_at_once"
bl_label = "Do All Selected Steps At Once!"
bl_description = "Executes all selected tools in the order from top to bottom"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
time_start = time.time()
if props.preprocTools_rps: tool_runPythonScript(scene, props.preprocTools_rps_nam); props.preprocTools_rps = 0
if props.preprocTools_grp: tool_createGroupsFromNames(scene); props.preprocTools_grp = 0
if props.preprocTools_sep: tool_separateLoose(scene); props.preprocTools_sep = 0
if props.preprocTools_mod: tool_applyAllModifiers(scene); props.preprocTools_mod = 0
if props.preprocTools_ctr: tool_centerModel(scene); props.preprocTools_ctr = 0
if props.preprocTools_sep2: tool_separateLoose(scene); props.preprocTools_sep2 = 0
if props.preprocTools_dis: tool_discretize(scene); props.preprocTools_dis = 0
if props.preprocTools_mod2: tool_applyAllModifiers(scene); props.preprocTools_mod2 = 0
if props.preprocTools_rbs: tool_enableRigidBodies(scene); props.preprocTools_rbs = 0
if props.preprocTools_int: tool_removeIntersections(scene); props.preprocTools_int = 0
if props.preprocTools_fix: tool_fixFoundation(scene); props.preprocTools_fix = 0
if props.preprocTools_gnd: tool_groundMotion(scene); props.preprocTools_gnd = 0
props.preprocTools_aut = 0
if not props.automaticMode and not props.preprocTools_int_bol:
### Check for intersections and warn if some are left
count = tool_removeIntersections(scene, mode=4)
if count > 0:
# Throw warning
bpy.context.window_manager.bcb.message = "Warning: Some element intersections could not automatically be resolved, please review selected objects"
bpy.ops.bcb.report('INVOKE_DEFAULT') # Create popup message box
print('-- Time total: %0.2f s' %(time.time()-time_start))
print()
###### Store menu config data in scene
storeConfigDataInScene(scene)
props.menu_gotConfig = 1
if props.saveBackups: bpy.ops.wm.save_as_mainfile(filepath=bpy.data.filepath.split('.blend')[0] +'_prep.blend')
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_run_python_script(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_run_python_script"
bl_label = "Run Python Script"
bl_description = "Executes a user-defined Python script for customizable automatization purposes (e.g. for scene management and modification)"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_runPythonScript(scene, props.preprocTools_rps_nam)
props.preprocTools_rps = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_tool_select_py_file(bpy.types.Operator):
bl_idname = "bcb.tool_select_py_file"
bl_label = "Select"
bl_description = "Search for Python file (.py)"
string_ = bpy.props.StringProperty
filepath = string_(subtype='FILE_PATH')
filter_glob = string_(default="*.py", options={'HIDDEN'})
int_ = bpy.props.IntProperty
opNo = int_(default = 1)
def execute(self, context):
props = context.window_manager.bcb
if self.opNo == 1: props.preprocTools_rps_nam = self.filepath
elif self.opNo == 2: props.postprocTools_rps_nam = self.filepath
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
########################################
class OBJECT_OT_bcb_preproc_tool_create_groups_from_names(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_create_groups_from_names"
bl_label = "Create Groups From Names"
bl_description = "Creates groups for all selected objects based on a specified naming convention and adds them also to the element groups list"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_createGroupsFromNames(scene)
props.preprocTools_grp = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_apply_all_modifiers(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_apply_all_modifiers"
bl_label = "Apply All Modifiers"
bl_description = "Applies all modifiers on all selected objects"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_applyAllModifiers(scene)
props.preprocTools_mod = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_apply_all_modifiers_2(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_apply_all_modifiers_2"
bl_label = "Apply All Modifiers"
bl_description = "Applies all modifiers on all selected objects (use this second 'Apply All' tool instead of the first one in case you are using presubdivided non-manifold meshes with Solidify modifiers, that way they can be discretized as non-manifolds)"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_applyAllModifiers(scene)
props.preprocTools_mod2 = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_center_model(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_center_model"
bl_label = "Center Model"
bl_description = "Shifts all selected objects as a whole to the world center of the scene"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_centerModel(scene)
props.preprocTools_ctr = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_separate_loose(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_separate_loose"
bl_label = "Separate Loose"
bl_description = "Separates all loose (not connected) mesh elements within an object into separate objects, this is done for all selected objects"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_separateLoose(scene)
props.preprocTools_sep = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_separate_loose_2(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_separate_loose_2"
bl_label = "Separate Loose"
bl_description = "Separates all loose (not connected) mesh elements within an object into separate objects, this is done for all selected objects"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_separateLoose(scene)
props.preprocTools_sep2 = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_discretize(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_discretize"
bl_label = "Discretize"
bl_description = "Discretizes (subdivides) all selected objects into smaller segments by splitting them into halves as long as a specified minimum size is reached. Members of an object group 'bcb_noDiscretization' will be skipped if present"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_discretize(scene)
props.preprocTools_dis = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_enable_rigid_bodies(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_enable_rigid_bodies"
bl_label = "Enable Rigid Bodies"
bl_description = "Enables rigid body settings for all selected objects"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_enableRigidBodies(scene)
props.preprocTools_rbs = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_remove_intersections(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_remove_intersections"
bl_label = "Intersection Removal"
bl_description = "Detects and removes intersecting objects (one per found pair). Intesecting objects can be caused by several reasons: accidental object duplication, forgotten boolean cutout objects, careless modeling etc"
int_ = bpy.props.IntProperty
mode = int_(default = 0)
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_removeIntersections(scene, mode=self.mode)
props.preprocTools_int = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_fix_foundation(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_fix_foundation"
bl_label = "Fix Foundation"
bl_description = "Either uses name based search to find foundation objects or creates foundation objects for all objects touching the overall model boundary box. These foundation objects will be set to be 'Passive' rigid bodies"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_fixFoundation(scene)
props.preprocTools_fix = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_preproc_tool_ground_motion(bpy.types.Operator):
bl_idname = "bcb.preproc_tool_ground_motion"
bl_label = "Ground Motion"
bl_description = "Attaches all selected passive rigid body objects to a specified and animated ground object. This can be useful for simulating earthquakes through a pre-animated ground motion object like a virtual shake table"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_groundMotion(scene)
props.preprocTools_gnd = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_tool_select_csv_file(bpy.types.Operator):
bl_idname = "bcb.tool_select_csv_file"
bl_label = "Select"
bl_description = "Select location for .csv time history file"
string_ = bpy.props.StringProperty
filepath = string_(subtype='FILE_PATH')
filter_glob = string_(default="*.csv", options={'HIDDEN'})
int_ = bpy.props.IntProperty
opNo = int_(default = 1)
def execute(self, context):
props = context.window_manager.bcb
if self.opNo == 1: props.preprocTools_gnd_nam = self.filepath
elif self.opNo == 2: props.postprocTools_lox_nam = os.path.dirname(self.filepath)
elif self.opNo == 3: props.postprocTools_fcx_nam = os.path.dirname(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
################################################################################
class OBJECT_OT_bcb_postprocess_do_all_steps_at_once(bpy.types.Operator):
bl_idname = "bcb.postprocess_do_all_steps_at_once"
bl_label = "Do All Selected Steps At Once!"
bl_description = "Executes all selected tools in the order from top to bottom"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
time_start = time.time()
if props.postprocTools_lox: tool_exportLocationHistory(scene); props.postprocTools_lox = 0
if props.postprocTools_fcx: tool_exportForceHistory(scene); props.postprocTools_fcx = 0
if props.postprocTools_fcv: tool_forcesVisualization(scene); props.postprocTools_fcv = 0
if props.postprocTools_cav: tool_cavityDetection(scene); props.postprocTools_cav = 0
if props.postprocTools_rps: tool_runPythonScript(scene, props.postprocTools_rps_nam); props.postprocTools_rps = 0
props.postprocTools_aut = 0
print('-- Time total: %0.2f s' %(time.time()-time_start))
print()
###### Store menu config data in scene
storeConfigDataInScene(scene)
props.menu_gotConfig = 1
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_postproc_tool_export_location_history(bpy.types.Operator):
bl_idname = "bcb.postproc_tool_export_location_history"
bl_label = "Export Location History"
bl_description = "Exports the location time history of an element centroid into a .csv file"
def execute(self, context):
OBJECT_OT_bcb_bake.execute(self, context)
props = context.window_manager.bcb
scene = bpy.context.scene
tool_exportLocationHistory(scene)
props.postprocTools_lox = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_postproc_tool_export_force_history(bpy.types.Operator):
bl_idname = "bcb.postproc_tool_export_force_history"
bl_label = "Export Force History"
bl_description = "Exports the force time history for a constraint into a .csv file"
def execute(self, context):
if not hasattr(bpy.types.DATA_PT_modifiers, 'FRACTURE'):
self.report({'ERROR'}, "This tool requires the Fracture Modifier which is not available in this Blender version. Visit kaikostack.com/fracture for the FM-enabled Blender version.") # Create popup message
else:
OBJECT_OT_bcb_bake.execute(self, context)
props = context.window_manager.bcb
scene = bpy.context.scene
tool_exportForceHistory(scene)
props.postprocTools_fcx = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_postproc_tool_visualize_forces(bpy.types.Operator):
bl_idname = "bcb.postproc_tool_visualize_forces"
bl_label = "Visualize Forces"
bl_description = "Visualizes forces for constraints as spheres to be created in the scene whereby each sphere's radius is normalized to the predefined maximum force. Accurate values can be found in each sphere's properties"
def execute(self, context):
if not hasattr(bpy.types.DATA_PT_modifiers, 'FRACTURE'):
self.report({'ERROR'}, "This tool requires the Fracture Modifier which is not available in this Blender version. Visit kaikostack.com/fracture for the FM-enabled Blender version.") # Create popup message
else:
OBJECT_OT_bcb_bake.execute(self, context)
props = context.window_manager.bcb
scene = bpy.context.scene
tool_forcesVisualization(scene)
props.postprocTools_fcv = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_postproc_tool_detect_cavities(bpy.types.Operator):
bl_idname = "bcb.postproc_tool_detect_cavities"
bl_label = "Detect Cavities"
bl_description = "Visualizes cavities on the selected mesh in form of a cell grid where each cell represents an air pocket large enough to contain the cell"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_cavityDetection(scene)
props.postprocTools_cav = 0
return{'FINISHED'}
########################################
class OBJECT_OT_bcb_postproc_tool_run_python_script(bpy.types.Operator):
bl_idname = "bcb.postproc_tool_run_python_script"
bl_label = "Run Python Script"
bl_description = "Executes a user-defined Python script for customizable automatization purposes (e.g. for scene management and modification)"
def execute(self, context):
props = context.window_manager.bcb
scene = bpy.context.scene
tool_runPythonScript(scene, props.postprocTools_rps_nam)
props.postprocTools_rps = 0
return{'FINISHED'}
|
KaiKostack/bullet-constraints-builder
|
kk_bullet_constraints_builder/gui_buttons.py
|
Python
|
gpl-2.0
| 42,281
|
[
"VisIt"
] |
02e789966f85e103f7272953a86d675db69f15df4a3ef3ec3d1ec91e75c6ea84
|
"""Analyze python import statements."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import os
from .util import (
display,
ApplicationError,
)
from .data import (
data_context,
)
VIRTUAL_PACKAGES = set([
'ansible.module_utils.six',
])
def get_python_module_utils_imports(compile_targets):
"""Return a dictionary of module_utils names mapped to sets of python file paths.
:type compile_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None):
"""Recursively expand module_utils imports from module_utils files.
:type import_name: str
:type depth: int
:type seen: set[str] | None
:rtype set[str]
"""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = set([import_name])
results = set([import_name])
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = os.path.join('lib/', '%s.py' % import_name.replace('.', '/'))
if import_path not in imports_by_target_path:
import_path = os.path.join('lib/', import_name.replace('.', '/'), '__init__.py')
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path in imports_by_target_path:
if module_util in imports_by_target_path[target_path]:
for module_util_import in sorted(module_util_imports):
if module_util_import not in imports_by_target_path[target_path]:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
imports_by_target_path[target_path].add(module_util_import)
imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_python_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix
else:
prefix = 'ansible.module_utils.'
if path.endswith('/__init__.py'):
path = os.path.dirname(path)
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
for path in data_context().content.walk_files(data_context().content.module_utils_path):
ext = os.path.splitext(path)[1]
if path == os.path.join(data_context().content.module_utils_path, '__init__.py'):
continue
if ext != '.py':
continue
module_utils.append(get_python_module_utils_name(path))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
with open(path, 'r') as module_fd:
code = module_fd.read()
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
"""
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node):
"""
:type node: ast.Import
"""
self.generic_visit(node)
for alias in node.names:
if alias.name.startswith('ansible.module_utils.'):
# import ansible.module_utils.MODULE[.MODULE]
self.add_import(alias.name, node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node):
"""
:type node: ast.ImportFrom
"""
self.generic_visit(node)
if not node.module:
return
if node.module == 'ansible.module_utils' or node.module.startswith('ansible.module_utils.'):
for alias in node.names:
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_import('%s.%s' % (node.module, alias.name), node.lineno)
def add_import(self, name, line_number):
"""
:type name: str
:type line_number: int
"""
import_name = name
while len(name) > len('ansible.module_utils.'):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if self.path.startswith('test/'):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
|
cchurch/ansible
|
test/lib/ansible_test/_internal/import_analysis.py
|
Python
|
gpl-3.0
| 9,016
|
[
"VisIt"
] |
fbf508e174ea8a778aa0f5048c763a987f3262d01f1462b64a53296a4c2e1d29
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".paperstyle")
#-------------------------------------------------------------------------
#
# Page orientation
#
#-------------------------------------------------------------------------
PAPER_PORTRAIT = 0
PAPER_LANDSCAPE = 1
#------------------------------------------------------------------------
#
# PaperSize
#
#------------------------------------------------------------------------
class PaperSize:
"""
Defines the dimensions of a sheet of paper. All dimensions are in
centimeters.
"""
def __init__(self, name, height, width):
"""
Create a new paper style with.
:param name: name of the new style
:param height: page height in centimeters
:param width: page width in centimeters
"""
self.name = name
self.height = height
self.width = width
if self.name == 'Letter':
self.trans_pname = _('paper size|Letter')
elif self.name == 'Legal':
self.trans_pname = _('paper size|Legal')
elif self.name == 'Custom Size':
self.trans_pname = _('Custom Size')
else:
self.trans_pname = None
def get_name(self):
"Return the name of the paper style"
return self.name
def get_height(self):
"Return the page height in cm"
return self.height
def set_height(self, height):
"Set the page height in cm"
self.height = height
def get_width(self):
"Return the page width in cm"
return self.width
def set_width(self, width):
"Set the page width in cm"
self.width = width
def get_height_inches(self):
"Return the page height in inches"
return self.height / 2.54
def get_width_inches(self):
"Return the page width in inches"
return self.width / 2.54
#------------------------------------------------------------------------
#
# PaperStyle
#
#------------------------------------------------------------------------
class PaperStyle:
"""
Define the various options for a sheet of paper.
"""
def __init__(self, size, orientation,
lmargin=2.54, rmargin=2.54, tmargin=2.54, bmargin=2.54):
"""
Create a new paper style.
:param size: size of the new style
:type size: :class:`.PaperSize`
:param orientation: page orientation
:type orientation: PAPER_PORTRAIT or PAPER_LANDSCAPE
"""
self.__orientation = orientation
if orientation == PAPER_PORTRAIT:
self.__size = PaperSize(size.get_name(),
size.get_height(),
size.get_width())
else:
self.__size = PaperSize(size.get_name(),
size.get_width(),
size.get_height())
self.__lmargin = lmargin
self.__rmargin = rmargin
self.__tmargin = tmargin
self.__bmargin = bmargin
def get_size(self):
"""
Return the size of the paper.
:returns: object indicating the paper size
:rtype: :class:`.PaperSize`
"""
return self.__size
def get_orientation(self):
"""
Return the orientation of the page.
:returns: PAPER_PORTRIAT or PAPER_LANDSCAPE
:rtype: int
"""
return self.__orientation
def get_usable_width(self):
"""
Return the width of the page area in centimeters.
The value is the page width less the margins.
"""
return self.__size.get_width() - (self.__rmargin + self.__lmargin)
def get_usable_height(self):
"""
Return the height of the page area in centimeters.
The value is the page height less the margins.
"""
return self.__size.get_height() - (self.__tmargin + self.__bmargin)
def get_right_margin(self):
"""
Return the right margin.
:returns: Right margin in centimeters
:rtype: float
"""
return self.__rmargin
def get_left_margin(self):
"""
Return the left margin.
:returns: Left margin in centimeters
:rtype: float
"""
return self.__lmargin
def get_top_margin(self):
"""
Return the top margin.
:returns: Top margin in centimeters
:rtype: float
"""
return self.__tmargin
def get_bottom_margin(self):
"""
Return the bottom margin.
:returns: Bottom margin in centimeters
:rtype: float
"""
return self.__bmargin
|
jralls/gramps
|
gramps/gen/plug/docgen/paperstyle.py
|
Python
|
gpl-2.0
| 6,300
|
[
"Brian"
] |
8db7e1d8b25045ce88b10ae44c7a38fa04496950bf37b8328ea224af276f7db8
|
"""
Unit tests for masquerade.
"""
import json
import pickle
from datetime import datetime
from importlib import import_module
from unittest.mock import patch
import pytest
import ddt
from django.conf import settings
from django.test import TestCase, RequestFactory
from django.urls import reverse
from edx_toggles.toggles.testutils import override_waffle_flag
from pytz import UTC
from xblock.runtime import DictKeyValueStore
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from lms.djangoapps.courseware.masquerade import (
MASQUERADE_SETTINGS_KEY,
CourseMasquerade,
MasqueradingKeyValueStore,
get_masquerading_user_group,
setup_masquerade,
)
from lms.djangoapps.courseware.tests.factories import StaffFactory
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase, MasqueradeMixin, masquerade_as_group_member
from lms.djangoapps.courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference, set_user_preference
from openedx.features.course_experience import DISABLE_UNIFIED_COURSE_TAB_FLAG
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.tests.factories import UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
class MasqueradeTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase, MasqueradeMixin):
"""
Base class for masquerade tests that sets up a test course and enrolls a user in the course.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create(number='masquerade-test', metadata={'start': datetime.now(UTC)})
cls.info_page = ItemFactory.create(
category="course_info", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
cls.chapter = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name="Test Section",
)
cls.sequential_display_name = "Test Masquerade Subsection"
cls.sequential = ItemFactory.create(
parent_location=cls.chapter.location,
category="sequential",
display_name=cls.sequential_display_name,
)
cls.vertical = ItemFactory.create(
parent_location=cls.sequential.location,
category="vertical",
display_name="Test Unit",
)
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
cls.problem_display_name = "TestMasqueradeProblem"
cls.problem = ItemFactory.create(
parent_location=cls.vertical.location,
category='problem',
data=problem_xml,
display_name=cls.problem_display_name
)
def setUp(self):
super().setUp()
self.test_user = self.create_user()
self.login(self.test_user.email, 'test')
self.enroll(self.course, True)
def get_courseware_page(self):
"""
Returns the server response for the courseware page.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': str(self.course.id),
'chapter': self.chapter.location.block_id,
'section': self.sequential.location.block_id,
}
)
return self.client.get(url)
def get_course_info_page(self):
"""
Returns the server response for course info page.
"""
url = reverse(
'info',
kwargs={
'course_id': str(self.course.id),
}
)
return self.client.get(url)
def get_progress_page(self):
"""
Returns the server response for progress page.
"""
url = reverse(
'progress',
kwargs={
'course_id': str(self.course.id),
}
)
return self.client.get(url)
def verify_staff_debug_present(self, staff_debug_expected):
"""
Verifies that the staff debug control visibility is as expected (for staff only).
"""
content = self.get_courseware_page().content.decode('utf-8')
assert self.sequential_display_name in content, 'Subsection should be visible'
assert staff_debug_expected == ('Staff Debug Info' in content)
def get_problem(self):
"""
Returns the JSON content for the problem in the course.
"""
problem_url = reverse(
'xblock_handler',
kwargs={
'course_id': str(self.course.id),
'usage_id': str(self.problem.location),
'handler': 'xmodule_handler',
'suffix': 'problem_get'
}
)
return self.client.get(problem_url)
def verify_show_answer_present(self, show_answer_expected):
"""
Verifies that "Show answer" is only present when expected (for staff only).
"""
problem_html = json.loads(self.get_problem().content.decode('utf-8'))['html']
assert self.problem_display_name in problem_html
assert show_answer_expected == ('Show answer' in problem_html)
def ensure_masquerade_as_group_member(self, partition_id, group_id):
"""
Installs a masquerade for the test_user and test course, to enable the
user to masquerade as belonging to the specific partition/group combination.
Also verifies that the call to install the masquerade was successful.
Arguments:
partition_id (int): the integer partition id, referring to partitions already
configured in the course.
group_id (int); the integer group id, within the specified partition.
"""
assert 200 == masquerade_as_group_member(self.test_user, self.course, partition_id, group_id)
class NormalStudentVisibilityTest(MasqueradeTestCase):
"""
Verify the course displays as expected for a "normal" student (to ensure test setup is correct).
"""
def create_user(self):
"""
Creates a normal student user.
"""
return UserFactory()
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_not_visible(self):
"""
Tests that staff debug control is not present for a student.
"""
self.verify_staff_debug_present(False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_not_visible(self):
"""
Tests that "Show Answer" is not visible for a student.
"""
self.verify_show_answer_present(False)
class StaffMasqueradeTestCase(MasqueradeTestCase):
"""
Base class for tests of the masquerade behavior for a staff member.
"""
def create_user(self):
"""
Creates a staff user.
"""
return StaffFactory(course_key=self.course.id)
class TestStaffMasqueradeAsStudent(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as student.
"""
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_with_masquerade(self):
"""
Tests that staff debug control is not visible when masquerading as a student.
"""
# Verify staff initially can see staff debug
self.verify_staff_debug_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_staff_debug_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_staff_debug_present(True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_for_staff(self):
"""
Tests that "Show Answer" is not visible when masquerading as a student.
"""
# Verify that staff initially can see "Show Answer".
self.verify_show_answer_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_show_answer_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_show_answer_present(True)
@ddt.ddt
class TestStaffMasqueradeAsSpecificStudent(StaffMasqueradeTestCase, ProblemSubmissionTestMixin):
"""
Check for staff being able to masquerade as a specific student.
"""
def setUp(self):
super().setUp()
self.student_user = self.create_user()
self.login_student()
self.enroll(self.course, True)
def login_staff(self):
""" Login as a staff user """
self.logout()
self.login(self.test_user.email, 'test')
def login_student(self):
""" Login as a student """
self.logout()
self.login(self.student_user.email, 'test')
def submit_answer(self, response1, response2):
"""
Submit an answer to the single problem in our test course.
"""
return self.submit_question_answer(
self.problem_display_name,
{'2_1': response1, '2_2': response2}
)
def get_progress_detail(self):
"""
Return the reported progress detail for the problem in our test course.
The return value is a string like u'1/2'.
"""
json_data = json.loads(self.look_at_question(self.problem_display_name).content.decode('utf-8'))
progress = '{}/{}'.format(str(json_data['current_score']), str(json_data['total_possible']))
return progress
def assertExpectedLanguageInPreference(self, user, expected_language_code):
"""
This method is a custom assertion verifies that a given user has expected
language code in the preference and in cookies.
Arguments:
user: User model instance
expected_language_code: string indicating a language code
"""
assert get_user_preference(user, LANGUAGE_KEY) == expected_language_code
assert self.client.cookies[settings.LANGUAGE_COOKIE].value == expected_language_code
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_user_on_self_paced(self):
"""
Test masquerading as a specific user for course info page when self paced configuration
"enable_course_home_improvements" flag is set
Login as a staff user and visit course info page.
set masquerade to view same page as a specific student and revisit the course info page.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
response = self.get_course_info_page()
self.assertContains(response, "OOGIE BLOOGIE")
# Masquerade as the student,enable the self paced configuration, and check we can see the info page.
SelfPacedConfiguration(enable_course_home_improvements=True).save()
self.update_masquerade(role='student', username=self.student_user.username)
response = self.get_course_info_page()
self.assertContains(response, "OOGIE BLOOGIE")
@ddt.data(
'john', # Non-unicode username
'fôô@bar', # Unicode username with @, which is what the ENABLE_UNICODE_USERNAME feature allows
)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student(self, username):
"""
Test masquerading as a specific user.
We answer the problem in our test course as the student and as staff user, and we use the
progress as a proxy to determine who's state we currently see.
"""
student = UserFactory.create(username=username)
CourseEnrollment.enroll(student, self.course.id)
self.logout()
self.login(student.email, 'test')
# Answer correctly as the student, and check progress.
self.submit_answer('Correct', 'Correct')
assert self.get_progress_detail() == '2/2'
# Log in as staff, and check the problem is unanswered.
self.login_staff()
assert self.get_progress_detail() == '0/2'
# Masquerade as the student, and check we can see the student state.
self.update_masquerade(role='student', username=student.username)
assert self.get_progress_detail() == '2/2'
# Temporarily override the student state.
self.submit_answer('Correct', 'Incorrect')
assert self.get_progress_detail() == '1/2'
# Reload the page and check we see the student state again.
self.get_courseware_page()
assert self.get_progress_detail() == '2/2'
# Become the staff user again, and check the problem is still unanswered.
self.update_masquerade(role='staff')
assert self.get_progress_detail() == '0/2'
# Verify the student state did not change.
self.logout()
self.login(student.email, 'test')
assert self.get_progress_detail() == '2/2'
def test_masquerading_with_language_preference(self):
"""
Tests that masquerading as a specific user for the course does not update preference language
for the staff.
Login as a staff user and set user's language preference to english and visit the courseware page.
Set masquerade to view same page as a specific student having different language preference and
revisit the courseware page.
"""
english_language_code = 'en'
set_user_preference(self.test_user, preference_key=LANGUAGE_KEY, preference_value=english_language_code)
self.login_staff()
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
# Set student language preference and set masquerade to view same page the student.
set_user_preference(self.student_user, preference_key=LANGUAGE_KEY, preference_value='es-419')
self.update_masquerade(role='student', username=self.student_user.username)
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student_course_info(self):
"""
Test masquerading as a specific user for course info page.
We login with login_staff and check course info page content if it's working and then we
set masquerade to view same page as a specific student and test if it's working or not.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
content = self.get_course_info_page().content.decode('utf-8')
assert 'OOGIE BLOOGIE' in content
# Masquerade as the student, and check we can see the info page.
self.update_masquerade(role='student', username=self.student_user.username)
content = self.get_course_info_page().content.decode('utf-8')
assert 'OOGIE BLOOGIE' in content
def test_masquerade_as_specific_student_progress(self):
"""
Test masquerading as a specific user for progress page.
"""
# Give the student some correct answers, check their progress page
self.login_student()
self.submit_answer('Correct', 'Correct')
student_progress = self.get_progress_page().content.decode('utf-8')
assert '1 of 2 possible points' not in student_progress
assert '2 of 2 possible points' in student_progress
# Staff answers are slightly different
self.login_staff()
self.submit_answer('Incorrect', 'Correct')
staff_progress = self.get_progress_page().content.decode('utf-8')
assert '2 of 2 possible points' not in staff_progress
assert '1 of 2 possible points' in staff_progress
# Should now see the student's scores
self.update_masquerade(role='student', username=self.student_user.username)
masquerade_progress = self.get_progress_page().content.decode('utf-8')
assert '1 of 2 possible points' not in masquerade_progress
assert '2 of 2 possible points' in masquerade_progress
class TestGetMasqueradingGroupId(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super().setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_get_masquerade_group(self):
"""
Tests that a staff member can masquerade as being in a group in a user partition
"""
# Verify there is no masquerading group initially
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
assert group is None
# Install a masquerading group
self.ensure_masquerade_as_group_member(0, 1)
# Verify that the masquerading group is returned
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
assert group.id == 1
class ReadOnlyKeyValueStore(DictKeyValueStore):
"""
A KeyValueStore that raises an exception on attempts to modify it.
Used to make sure MasqueradingKeyValueStore does not try to modify the underlying KeyValueStore.
"""
def set(self, key, value):
assert False, "ReadOnlyKeyValueStore may not be modified."
def delete(self, key):
assert False, "ReadOnlyKeyValueStore may not be modified."
def set_many(self, update_dict): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
assert False, "ReadOnlyKeyValueStore may not be modified."
class FakeSession(dict):
""" Mock for Django session object. """
modified = False # We need dict semantics with a writable 'modified' property
class MasqueradingKeyValueStoreTest(TestCase):
"""
Unit tests for the MasqueradingKeyValueStore class.
"""
def setUp(self):
super().setUp()
self.ro_kvs = ReadOnlyKeyValueStore({'a': 42, 'b': None, 'c': 'OpenCraft'})
self.session = FakeSession()
self.kvs = MasqueradingKeyValueStore(self.ro_kvs, self.session)
def test_all(self):
assert self.kvs.get('a') == 42
assert self.kvs.get('b') is None
assert self.kvs.get('c') == 'OpenCraft'
with pytest.raises(KeyError):
self.kvs.get('d')
assert self.kvs.has('a')
assert self.kvs.has('b')
assert self.kvs.has('c')
assert not self.kvs.has('d')
self.kvs.set_many({'a': 'Norwegian Blue', 'd': 'Giraffe'})
self.kvs.set('b', 7)
assert self.kvs.get('a') == 'Norwegian Blue'
assert self.kvs.get('b') == 7
assert self.kvs.get('c') == 'OpenCraft'
assert self.kvs.get('d') == 'Giraffe'
for key in 'abd':
assert self.kvs.has(key)
self.kvs.delete(key)
with pytest.raises(KeyError):
self.kvs.get(key)
assert self.kvs.get('c') == 'OpenCraft'
class CourseMasqueradeTest(TestCase):
"""
Unit tests for the CourseMasquerade class.
"""
def test_unpickling_sets_all_attributes(self):
"""
Make sure that old CourseMasquerade objects receive missing attributes when unpickled from
the session.
"""
cmasq = CourseMasquerade(7)
del cmasq.user_name
pickled_cmasq = pickle.dumps(cmasq)
unpickled_cmasq = pickle.loads(pickled_cmasq)
assert unpickled_cmasq.user_name is None
class SetupMasqueradeTests(SharedModuleStoreTestCase,):
"""
Tests for the setup_masquerade function.
"""
def setUp(self):
super().setUp()
self.course = CourseFactory.create(number='setup-masquerade-test', metadata={'start': datetime.now(UTC)})
self.request = RequestFactory().request()
self.staff = StaffFactory(course_key=self.course.id)
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
session_key = "abcdef"
self.request.user = self.staff
self.request.session = import_module(settings.SESSION_ENGINE).SessionStore(session_key)
def test_setup_masquerade(self):
masquerade_settings = {
self.course.id: CourseMasquerade(
course_key=self.course.id,
role='student',
user_name=self.student.username
)
}
self.request.session[MASQUERADE_SETTINGS_KEY] = masquerade_settings
course_masquerade, masquerade_user = setup_masquerade(
self.request,
self.course.id,
staff_access=True
)
# Warning: the SafeSessions middleware relies on the `real_user` attribute to see if a
# user is masquerading as another user. If the name of this attribute is changing, please update
# the check in SafeSessionMiddleware._verify_user as well.
assert masquerade_user.real_user == self.staff
assert masquerade_user == self.student
assert self.request.user.masquerade_settings == masquerade_settings
assert course_masquerade == masquerade_settings[self.course.id]
|
eduNEXT/edunext-platform
|
lms/djangoapps/courseware/tests/test_masquerade.py
|
Python
|
agpl-3.0
| 22,699
|
[
"VisIt"
] |
d6a77521844ae6cefe0a7c31f5b4757cf00560b42531ef7c0fa5f63932ee98ed
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Driver for Microsoft Azure Virtual Machines service.
http://azure.microsoft.com/en-us/services/virtual-machines/
"""
import re
import time
import collections
import random
import sys
import copy
import base64
from datetime import datetime
from xml.dom import minidom
from xml.sax.saxutils import escape as xml_escape
from libcloud.utils.py3 import ET
from libcloud.common.azure import AzureServiceManagementConnection
from libcloud.common.azure import AzureRedirectException
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume
from libcloud.compute.types import NodeState
from libcloud.common.types import LibcloudError
from libcloud.utils.py3 import _real_unicode
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import ensure_string
from libcloud.utils.py3 import urlquote as url_quote
from libcloud.utils.misc import ReprMixin
HTTPSConnection = httplib.HTTPSConnection
if sys.version_info < (3,):
_unicode_type = unicode # NOQA pylint: disable=undefined-variable
def _str(value):
if isinstance(value, unicode): # NOQA pylint: disable=undefined-variable
return value.encode('utf-8')
return str(value)
else:
_str = str
_unicode_type = str
AZURE_SERVICE_MANAGEMENT_HOST = 'management.core.windows.net'
X_MS_VERSION = '2013-08-01'
WINDOWS_SERVER_REGEX = re.compile(
r'Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk'
)
"""
Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them
From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx
Prices are for Linux instances in East US data center. To see what pricing will
actually be, visit:
http://azure.microsoft.com/en-gb/pricing/details/virtual-machines/
"""
AZURE_COMPUTE_INSTANCE_TYPES = {
'A0': {
'id': 'ExtraSmall',
'name': 'Extra Small Instance',
'ram': 768,
'disk': 127,
'bandwidth': None,
'price': '0.0211',
'max_data_disks': 1,
'cores': 'Shared'
},
'A1': {
'id': 'Small',
'name': 'Small Instance',
'ram': 1792,
'disk': 127,
'bandwidth': None,
'price': '0.0633',
'max_data_disks': 2,
'cores': 1
},
'A2': {
'id': 'Medium',
'name': 'Medium Instance',
'ram': 3584,
'disk': 127,
'bandwidth': None,
'price': '0.1266',
'max_data_disks': 4,
'cores': 2
},
'A3': {
'id': 'Large',
'name': 'Large Instance',
'ram': 7168,
'disk': 127,
'bandwidth': None,
'price': '0.2531',
'max_data_disks': 8,
'cores': 4
},
'A4': {
'id': 'ExtraLarge',
'name': 'Extra Large Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.5062',
'max_data_disks': 16,
'cores': 8
},
'A5': {
'id': 'A5',
'name': 'Memory Intensive Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.2637',
'max_data_disks': 4,
'cores': 2
},
'A6': {
'id': 'A6',
'name': 'A6 Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.5273',
'max_data_disks': 8,
'cores': 4
},
'A7': {
'id': 'A7',
'name': 'A7 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '1.0545',
'max_data_disks': 16,
'cores': 8
},
'A8': {
'id': 'A8',
'name': 'A8 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '2.0774',
'max_data_disks': 16,
'cores': 8
},
'A9': {
'id': 'A9',
'name': 'A9 Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '4.7137',
'max_data_disks': 16,
'cores': 16
},
'A10': {
'id': 'A10',
'name': 'A10 Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '1.2233',
'max_data_disks': 16,
'cores': 8
},
'A11': {
'id': 'A11',
'name': 'A11 Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '2.1934',
'max_data_disks': 16,
'cores': 16
},
'D1': {
'id': 'Standard_D1',
'name': 'D1 Faster Compute Instance',
'ram': 3584,
'disk': 127,
'bandwidth': None,
'price': '0.0992',
'max_data_disks': 2,
'cores': 1
},
'D2': {
'id': 'Standard_D2',
'name': 'D2 Faster Compute Instance',
'ram': 7168,
'disk': 127,
'bandwidth': None,
'price': '0.1983',
'max_data_disks': 4,
'cores': 2
},
'D3': {
'id': 'Standard_D3',
'name': 'D3 Faster Compute Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.3965',
'max_data_disks': 8,
'cores': 4
},
'D4': {
'id': 'Standard_D4',
'name': 'D4 Faster Compute Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.793',
'max_data_disks': 16,
'cores': 8
},
'D11': {
'id': 'Standard_D11',
'name': 'D11 Faster Compute Instance',
'ram': 14336,
'disk': 127,
'bandwidth': None,
'price': '0.251',
'max_data_disks': 4,
'cores': 2
},
'D12': {
'id': 'Standard_D12',
'name': 'D12 Faster Compute Instance',
'ram': 28672,
'disk': 127,
'bandwidth': None,
'price': '0.502',
'max_data_disks': 8,
'cores': 4
},
'D13': {
'id': 'Standard_D13',
'name': 'D13 Faster Compute Instance',
'ram': 57344,
'disk': 127,
'bandwidth': None,
'price': '0.9038',
'max_data_disks': 16,
'cores': 8
},
'D14': {
'id': 'Standard_D14',
'name': 'D14 Faster Compute Instance',
'ram': 114688,
'disk': 127,
'bandwidth': None,
'price': '1.6261',
'max_data_disks': 32,
'cores': 16
}
}
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
'os_disk_configuration': 'OSDiskConfiguration',
'is_dns_programmed': 'IsDnsProgrammed'
}
class AzureNodeDriver(NodeDriver):
connectionCls = AzureServiceManagementConnection
name = 'Azure Virtual machines'
website = 'http://azure.microsoft.com/en-us/services/virtual-machines/'
type = Provider.AZURE
_instance_types = AZURE_COMPUTE_INSTANCE_TYPES
_blob_url = ".blob.core.windows.net"
features = {'create_node': ['password']}
service_location = collections.namedtuple(
'service_location',
['is_affinity_group', 'service_location']
)
NODE_STATE_MAP = {
'RoleStateUnknown': NodeState.UNKNOWN,
'CreatingVM': NodeState.PENDING,
'StartingVM': NodeState.PENDING,
'Provisioning': NodeState.PENDING,
'CreatingRole': NodeState.PENDING,
'StartingRole': NodeState.PENDING,
'ReadyRole': NodeState.RUNNING,
'BusyRole': NodeState.PENDING,
'StoppingRole': NodeState.PENDING,
'StoppingVM': NodeState.PENDING,
'DeletingVM': NodeState.PENDING,
'StoppedVM': NodeState.STOPPED,
'RestartingRole': NodeState.REBOOTING,
'CyclingRole': NodeState.TERMINATED,
'FailedStartingRole': NodeState.TERMINATED,
'FailedStartingVM': NodeState.TERMINATED,
'UnresponsiveRole': NodeState.TERMINATED,
'StoppedDeallocated': NodeState.TERMINATED,
}
def __init__(self, subscription_id=None, key_file=None, **kwargs):
"""
subscription_id contains the Azure subscription id in the form of GUID
key_file contains the Azure X509 certificate in .pem form
"""
self.subscription_id = subscription_id
self.key_file = key_file
self.follow_redirects = kwargs.get('follow_redirects', True)
super(AzureNodeDriver, self).__init__(
self.subscription_id,
self.key_file,
secure=True,
**kwargs
)
def list_sizes(self):
"""
Lists all sizes
:rtype: ``list`` of :class:`NodeSize`
"""
sizes = []
for _, values in self._instance_types.items():
node_size = self._to_node_size(copy.deepcopy(values))
sizes.append(node_size)
return sizes
def list_images(self, location=None):
"""
Lists all images
:rtype: ``list`` of :class:`NodeImage`
"""
data = self._perform_get(self._get_image_path(), Images)
custom_image_data = self._perform_get(
self._get_vmimage_path(),
VMImages
)
images = [self._to_image(i) for i in data]
images.extend(self._vm_to_image(j) for j in custom_image_data)
if location is not None:
images = [
image
for image in images
if location in image.extra["location"]
]
return images
def list_locations(self):
"""
Lists all locations
:rtype: ``list`` of :class:`NodeLocation`
"""
data = self._perform_get(
'/' + self.subscription_id + '/locations',
Locations
)
return [self._to_location(l) for l in data]
def list_nodes(self, ex_cloud_service_name):
"""
List all nodes
ex_cloud_service_name parameter is used to scope the request
to a specific Cloud Service. This is a required parameter as
nodes cannot exist outside of a Cloud Service nor be shared
between a Cloud Service within Azure.
:param ex_cloud_service_name: Cloud Service name
:type ex_cloud_service_name: ``str``
:rtype: ``list`` of :class:`Node`
"""
response = self._perform_get(
self._get_hosted_service_path(ex_cloud_service_name) +
'?embed-detail=True',
None
)
self.raise_for_response(response, 200)
data = self._parse_response(response, HostedService)
vips = None
if (len(data.deployments) > 0 and
data.deployments[0].virtual_ips is not None):
vips = [vip.address for vip in data.deployments[0].virtual_ips]
try:
return [
self._to_node(n, ex_cloud_service_name, vips)
for n in data.deployments[0].role_instance_list
]
except IndexError:
return []
def reboot_node(self, node, ex_cloud_service_name=None,
ex_deployment_slot=None):
"""
Reboots a node.
ex_cloud_service_name parameter is used to scope the request
to a specific Cloud Service. This is a required parameter as
nodes cannot exist outside of a Cloud Service nor be shared
between a Cloud Service within Azure.
:param ex_cloud_service_name: Cloud Service name
:type ex_cloud_service_name: ``str``
:param ex_deployment_slot: Options are "production" (default)
or "Staging". (Optional)
:type ex_deployment_slot: ``str``
:rtype: ``bool``
"""
if ex_cloud_service_name is None:
if node.extra is not None:
ex_cloud_service_name = node.extra.get(
'ex_cloud_service_name'
)
if not ex_cloud_service_name:
raise ValueError("ex_cloud_service_name is required.")
if not ex_deployment_slot:
ex_deployment_slot = "Production"
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
try:
response = self._perform_post(
self._get_deployment_path_using_name(
ex_cloud_service_name,
_deployment_name
) + '/roleinstances/' + _str(node.id) + '?comp=reboot',
''
)
self.raise_for_response(response, 202)
if self._parse_response_for_async_op(response):
return True
else:
return False
except Exception:
return False
def list_volumes(self, node=None):
"""
Lists volumes of the disks in the image repository that are
associated with the specified subscription.
Pass Node object to scope the list of volumes to a single
instance.
:rtype: ``list`` of :class:`StorageVolume`
"""
data = self._perform_get(self._get_disk_path(), Disks)
volumes = [self._to_volume(volume=v, node=node) for v in data]
return volumes
def create_node(self, name, size, image, ex_cloud_service_name,
ex_storage_service_name=None, ex_new_deployment=False,
ex_deployment_slot="Production", ex_deployment_name=None,
ex_admin_user_id="azureuser", ex_custom_data=None,
ex_virtual_network_name=None, ex_network_config=None,
auth=None, **kwargs):
"""
Create Azure Virtual Machine
Reference: http://bit.ly/1fIsCb7
[www.windowsazure.com/en-us/documentation/]
We default to:
+ 3389/TCP - RDP - 1st Microsoft instance.
+ RANDOM/TCP - RDP - All succeeding Microsoft instances.
+ 22/TCP - SSH - 1st Linux instance
+ RANDOM/TCP - SSH - All succeeding Linux instances.
The above replicates the standard behavior of the Azure UI.
You can retrieve the assigned ports to each instance by
using the following private function:
_get_endpoint_ports(service_name)
Returns public,private port key pair.
@inherits: :class:`NodeDriver.create_node`
:keyword image: The image to use when creating this node
:type image: `NodeImage`
:keyword size: The size of the instance to create
:type size: `NodeSize`
:keyword ex_cloud_service_name: Required.
Name of the Azure Cloud Service.
:type ex_cloud_service_name: ``str``
:keyword ex_storage_service_name: Optional:
Name of the Azure Storage Service.
:type ex_storage_service_name: ``str``
:keyword ex_new_deployment: Optional. Tells azure to create a
new deployment rather than add to an
existing one.
:type ex_new_deployment: ``boolean``
:keyword ex_deployment_slot: Optional: Valid values: production|
staging.
Defaults to production.
:type ex_deployment_slot: ``str``
:keyword ex_deployment_name: Optional. The name of the
deployment.
If this is not passed in we default
to using the Cloud Service name.
:type ex_deployment_name: ``str``
:type ex_custom_data: ``str``
:keyword ex_custom_data: Optional script or other data which is
injected into the VM when it's beginning
provisioned.
:keyword ex_admin_user_id: Optional. Defaults to 'azureuser'.
:type ex_admin_user_id: ``str``
:keyword ex_virtual_network_name: Optional. If this is not passed
in no virtual network is used.
:type ex_virtual_network_name: ``str``
:keyword ex_network_config: Optional. The ConfigurationSet to use
for network configuration
:type ex_network_config: `ConfigurationSet`
"""
# TODO: Refactor this method to make it more readable, split it into
# multiple smaller methods
auth = self._get_and_check_auth(auth)
password = auth.password
if not isinstance(size, NodeSize):
raise ValueError('Size must be an instance of NodeSize')
if not isinstance(image, NodeImage):
raise ValueError(
"Image must be an instance of NodeImage, "
"produced by list_images()"
)
# Retrieve a list of currently available nodes for the provided cloud
# service
node_list = self.list_nodes(
ex_cloud_service_name=ex_cloud_service_name
)
if ex_network_config is None:
network_config = ConfigurationSet()
else:
network_config = ex_network_config
network_config.configuration_set_type = 'NetworkConfiguration'
# Base64 encode custom data if provided
if ex_custom_data:
ex_custom_data = self._encode_base64(data=ex_custom_data)
# We do this because we need to pass a Configuration to the
# method. This will be either Linux or Windows.
if WINDOWS_SERVER_REGEX.search(image.id, re.I):
machine_config = WindowsConfigurationSet(
computer_name=name,
admin_password=password,
admin_user_name=ex_admin_user_id
)
machine_config.domain_join = None
if not node_list or ex_new_deployment:
port = "3389"
else:
port = random.randint(41952, 65535)
endpoints = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
for instances in endpoints.role_instance_list:
ports = [ep.public_port for ep in
instances.instance_endpoints]
while port in ports:
port = random.randint(41952, 65535)
endpoint = ConfigurationSetInputEndpoint(
name='Remote Desktop',
protocol='tcp',
port=port,
local_port='3389',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
else:
if not node_list or ex_new_deployment:
port = "22"
else:
port = random.randint(41952, 65535)
endpoints = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
for instances in endpoints.role_instance_list:
ports = []
if instances.instance_endpoints is not None:
for ep in instances.instance_endpoints:
ports += [ep.public_port]
while port in ports:
port = random.randint(41952, 65535)
endpoint = ConfigurationSetInputEndpoint(
name='SSH',
protocol='tcp',
port=port,
local_port='22',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
machine_config = LinuxConfigurationSet(
name,
ex_admin_user_id,
password,
False,
ex_custom_data
)
network_config.input_endpoints.items.append(endpoint)
_storage_location = self._get_cloud_service_location(
service_name=ex_cloud_service_name
)
if ex_storage_service_name is None:
ex_storage_service_name = ex_cloud_service_name
ex_storage_service_name = re.sub(
r'[\W_-]+',
'',
ex_storage_service_name.lower(),
flags=re.UNICODE
)
if self._is_storage_service_unique(
service_name=ex_storage_service_name):
self._create_storage_account(
service_name=ex_storage_service_name,
location=_storage_location.service_location,
is_affinity_group=_storage_location.is_affinity_group
)
# OK, bit annoying here. You must create a deployment before
# you can create an instance; however, the deployment function
# creates the first instance, but all subsequent instances
# must be created using the add_role function.
#
# So, yeah, annoying.
if not node_list or ex_new_deployment:
# This is the first node in this cloud service.
if not ex_deployment_name:
ex_deployment_name = ex_cloud_service_name
vm_image_id = None
disk_config = None
if image.extra.get('vm_image', False):
vm_image_id = image.id
# network_config = None
else:
blob_url = "http://%s.blob.core.windows.net" % (
ex_storage_service_name)
# Azure's pattern in the UI.
disk_name = "%s-%s-%s.vhd" % (
ex_cloud_service_name,
name,
time.strftime("%Y-%m-%d")
)
media_link = "%s/vhds/%s" % (blob_url, disk_name)
disk_config = OSVirtualHardDisk(image.id, media_link)
response = self._perform_post(
self._get_deployment_path_using_name(ex_cloud_service_name),
AzureXmlSerializer.virtual_machine_deployment_to_xml(
ex_deployment_name,
ex_deployment_slot,
name,
name,
machine_config,
disk_config,
'PersistentVMRole',
network_config,
None,
None,
size.id,
ex_virtual_network_name,
vm_image_id
)
)
self.raise_for_response(response, 202)
self._ex_complete_async_azure_operation(response)
else:
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
vm_image_id = None
disk_config = None
if image.extra.get('vm_image', False):
vm_image_id = image.id
# network_config = None
else:
blob_url = "http://%s.blob.core.windows.net" % (
ex_storage_service_name
)
disk_name = "%s-%s-%s.vhd" % (
ex_cloud_service_name,
name,
time.strftime("%Y-%m-%d")
)
media_link = "%s/vhds/%s" % (blob_url, disk_name)
disk_config = OSVirtualHardDisk(image.id, media_link)
path = self._get_role_path(ex_cloud_service_name, _deployment_name)
body = AzureXmlSerializer.add_role_to_xml(
name, # role_name
machine_config, # system_config
disk_config, # os_virtual_hard_disk
'PersistentVMRole', # role_type
network_config, # network_config
None, # availability_set_name
None, # data_virtual_hard_disks
vm_image_id, # vm_image
size.id # role_size
)
response = self._perform_post(path, body)
self.raise_for_response(response, 202)
self._ex_complete_async_azure_operation(response)
return Node(
id=name,
name=name,
state=NodeState.PENDING,
public_ips=[],
private_ips=[],
driver=self.connection.driver,
extra={
'ex_cloud_service_name': ex_cloud_service_name
}
)
def destroy_node(self, node, ex_cloud_service_name=None,
ex_deployment_slot="Production"):
"""
Remove Azure Virtual Machine
This removes the instance, but does not
remove the disk. You will need to use destroy_volume.
Azure sometimes has an issue where it will hold onto
a blob lease for an extended amount of time.
:keyword ex_cloud_service_name: Required.
Name of the Azure Cloud Service.
:type ex_cloud_service_name: ``str``
:keyword ex_deployment_slot: Optional: The name of the deployment
slot. If this is not passed in we
default to production.
:type ex_deployment_slot: ``str``
"""
if not isinstance(node, Node):
raise ValueError("A libcloud Node object is required.")
if ex_cloud_service_name is None and node.extra is not None:
ex_cloud_service_name = node.extra.get('ex_cloud_service_name')
if not ex_cloud_service_name:
raise ValueError("Unable to get ex_cloud_service_name from Node.")
_deployment = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
)
_deployment_name = _deployment.name
_server_deployment_count = len(_deployment.role_instance_list)
if _server_deployment_count > 1:
path = self._get_role_path(
ex_cloud_service_name,
_deployment_name,
node.id
)
else:
path = self._get_deployment_path_using_name(
ex_cloud_service_name,
_deployment_name
)
path += '?comp=media'
self._perform_delete(path)
return True
def ex_list_cloud_services(self):
return self._perform_get(
self._get_hosted_service_path(),
HostedServices
)
def ex_create_cloud_service(self, name, location, description=None,
extended_properties=None):
"""
Create an azure cloud service.
:param name: Name of the service to create
:type name: ``str``
:param location: Standard azure location string
:type location: ``str``
:param description: Optional description
:type description: ``str``
:param extended_properties: Optional extended_properties
:type extended_properties: ``dict``
:rtype: ``bool``
"""
response = self._perform_cloud_service_create(
self._get_hosted_service_path(),
AzureXmlSerializer.create_hosted_service_to_xml(
name,
self._encode_base64(name),
description,
location,
None,
extended_properties
)
)
self.raise_for_response(response, 201)
return True
def ex_destroy_cloud_service(self, name):
"""
Delete an azure cloud service.
:param name: Name of the cloud service to destroy.
:type name: ``str``
:rtype: ``bool``
"""
response = self._perform_cloud_service_delete(
self._get_hosted_service_path(name)
)
self.raise_for_response(response, 200)
return True
def ex_add_instance_endpoints(self, node, endpoints,
ex_deployment_slot="Production"):
all_endpoints = [
{
"name": endpoint.name,
"protocol": endpoint.protocol,
"port": endpoint.public_port,
"local_port": endpoint.local_port,
}
for endpoint in node.extra['instance_endpoints']
]
all_endpoints.extend(endpoints)
# pylint: disable=assignment-from-no-return
result = self.ex_set_instance_endpoints(node, all_endpoints,
ex_deployment_slot)
return result
def ex_set_instance_endpoints(self, node, endpoints,
ex_deployment_slot="Production"):
"""
For example::
endpoint = ConfigurationSetInputEndpoint(
name='SSH',
protocol='tcp',
port=port,
local_port='22',
load_balanced_endpoint_set_name=None,
enable_direct_server_return=False
)
{
'name': 'SSH',
'protocol': 'tcp',
'port': port,
'local_port': '22'
}
"""
ex_cloud_service_name = node.extra['ex_cloud_service_name']
vm_role_name = node.name
network_config = ConfigurationSet()
network_config.configuration_set_type = 'NetworkConfiguration'
for endpoint in endpoints:
new_endpoint = ConfigurationSetInputEndpoint(**endpoint)
network_config.input_endpoints.items.append(new_endpoint)
_deployment_name = self._get_deployment(
service_name=ex_cloud_service_name,
deployment_slot=ex_deployment_slot
).name
response = self._perform_put(
self._get_role_path(
ex_cloud_service_name,
_deployment_name,
vm_role_name
),
AzureXmlSerializer.add_role_to_xml(
None, # role_name
None, # system_config
None, # os_virtual_hard_disk
'PersistentVMRole', # role_type
network_config, # network_config
None, # availability_set_name
None, # data_virtual_hard_disks
None, # vm_image
None # role_size
)
)
self.raise_for_response(response, 202)
def ex_create_storage_service(self, name, location,
description=None, affinity_group=None,
extended_properties=None):
"""
Create an azure storage service.
:param name: Name of the service to create
:type name: ``str``
:param location: Standard azure location string
:type location: ``str``
:param description: (Optional) Description of storage service.
:type description: ``str``
:param affinity_group: (Optional) Azure affinity group.
:type affinity_group: ``str``
:param extended_properties: (Optional) Additional configuration
options support by Azure.
:type extended_properties: ``dict``
:rtype: ``bool``
"""
response = self._perform_storage_service_create(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_to_xml(
service_name=name,
label=self._encode_base64(name),
description=description,
location=location,
affinity_group=affinity_group,
extended_properties=extended_properties
)
)
self.raise_for_response(response, 202)
return True
def ex_destroy_storage_service(self, name):
"""
Destroy storage service. Storage service must not have any active
blobs. Sometimes Azure likes to hold onto volumes after they are
deleted for an inordinate amount of time, so sleep before calling
this method after volume deletion.
:param name: Name of storage service.
:type name: ``str``
:rtype: ``bool``
"""
response = self._perform_storage_service_delete(
self._get_storage_service_path(name)
)
self.raise_for_response(response, 200)
return True
"""
Functions not implemented
"""
def create_volume_snapshot(self):
raise NotImplementedError(
'You cannot create snapshots of '
'Azure VMs at this time.'
)
def attach_volume(self):
raise NotImplementedError(
'attach_volume is not supported '
'at this time.'
)
def create_volume(self):
raise NotImplementedError(
'create_volume is not supported '
'at this time.'
)
def detach_volume(self):
raise NotImplementedError(
'detach_volume is not supported '
'at this time.'
)
def destroy_volume(self):
raise NotImplementedError(
'destroy_volume is not supported '
'at this time.'
)
"""
Private Functions
"""
def _perform_cloud_service_create(self, path, data):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = data
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_cloud_service_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_storage_service_create(self, path, data):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = data
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_storage_service_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _to_node(self, data, ex_cloud_service_name=None, virtual_ips=None):
"""
Convert the data from a Azure response object into a Node
"""
remote_desktop_port = ''
ssh_port = ''
public_ips = virtual_ips or []
if data.instance_endpoints is not None:
if len(data.instance_endpoints) >= 1:
public_ips = [data.instance_endpoints[0].vip]
for port in data.instance_endpoints:
if port.name == 'Remote Desktop':
remote_desktop_port = port.public_port
if port.name == "SSH":
ssh_port = port.public_port
return Node(
id=data.role_name,
name=data.role_name,
state=self.NODE_STATE_MAP.get(
data.instance_status,
NodeState.UNKNOWN
),
public_ips=public_ips,
private_ips=[data.ip_address],
driver=self.connection.driver,
extra={
'instance_endpoints': data.instance_endpoints,
'remote_desktop_port': remote_desktop_port,
'ssh_port': ssh_port,
'power_state': data.power_state,
'instance_size': data.instance_size,
'ex_cloud_service_name': ex_cloud_service_name
}
)
def _to_location(self, data):
"""
Convert the data from a Azure response object into a location
"""
country = data.display_name
if "Asia" in data.display_name:
country = "Asia"
if "Europe" in data.display_name:
country = "Europe"
if "US" in data.display_name:
country = "US"
if "Japan" in data.display_name:
country = "Japan"
if "Brazil" in data.display_name:
country = "Brazil"
vm_role_sizes = data.compute_capabilities.virtual_machines_role_sizes
return AzureNodeLocation(
id=data.name,
name=data.display_name,
country=country,
driver=self.connection.driver,
available_services=data.available_services,
virtual_machine_role_sizes=vm_role_sizes
)
def _to_node_size(self, data):
"""
Convert the AZURE_COMPUTE_INSTANCE_TYPES into NodeSize
"""
return NodeSize(
id=data["id"],
name=data["name"],
ram=data["ram"],
disk=data["disk"],
bandwidth=data["bandwidth"],
price=data["price"],
driver=self.connection.driver,
extra={
'max_data_disks': data["max_data_disks"],
'cores': data["cores"]
}
)
def _to_image(self, data):
return NodeImage(
id=data.name,
name=data.label,
driver=self.connection.driver,
extra={
'os': data.os,
'category': data.category,
'description': data.description,
'location': data.location,
'affinity_group': data.affinity_group,
'media_link': data.media_link,
'vm_image': False
}
)
def _vm_to_image(self, data):
return NodeImage(
id=data.name,
name=data.label,
driver=self.connection.driver,
extra={
'os': data.os_disk_configuration.os,
'category': data.category,
'location': data.location,
'media_link': data.os_disk_configuration.media_link,
'affinity_group': data.affinity_group,
'deployment_name': data.deployment_name,
'vm_image': True
}
)
def _to_volume(self, volume, node):
extra = {
'affinity_group': volume.affinity_group,
'os': volume.os,
'location': volume.location,
'media_link': volume.media_link,
'source_image_name': volume.source_image_name
}
role_name = getattr(volume.attached_to, 'role_name', None)
hosted_service_name = getattr(
volume.attached_to,
'hosted_service_name',
None
)
deployment_name = getattr(
volume.attached_to,
'deployment_name',
None
)
if role_name is not None:
extra['role_name'] = role_name
if hosted_service_name is not None:
extra['hosted_service_name'] = hosted_service_name
if deployment_name is not None:
extra['deployment_name'] = deployment_name
if node:
if role_name is not None and role_name == node.id:
return StorageVolume(
id=volume.name,
name=volume.name,
size=int(volume.logical_disk_size_in_gb),
driver=self.connection.driver,
extra=extra
)
else:
return StorageVolume(
id=volume.name,
name=volume.name,
size=int(volume.logical_disk_size_in_gb),
driver=self.connection.driver,
extra=extra
)
def _get_deployment(self, **kwargs):
_service_name = kwargs['service_name']
_deployment_slot = kwargs['deployment_slot']
response = self._perform_get(
self._get_deployment_path_using_slot(
_service_name,
_deployment_slot
),
None
)
self.raise_for_response(response, 200)
return self._parse_response(response, Deployment)
def _get_cloud_service_location(self, service_name=None):
if not service_name:
raise ValueError("service_name is required.")
res = self._perform_get(
'%s?embed-detail=False' % (
self._get_hosted_service_path(service_name)
),
HostedService
)
_affinity_group = res.hosted_service_properties.affinity_group
_cloud_service_location = res.hosted_service_properties.location
if _affinity_group is not None and _affinity_group != '':
return self.service_location(True, _affinity_group)
elif _cloud_service_location is not None:
return self.service_location(False, _cloud_service_location)
else:
return None
def _is_storage_service_unique(self, service_name=None):
if not service_name:
raise ValueError("service_name is required.")
_check_availability = self._perform_get(
'%s/operations/isavailable/%s%s' % (
self._get_storage_service_path(),
_str(service_name),
''
),
AvailabilityResponse
)
self.raise_for_response(_check_availability, 200)
return _check_availability.result
def _create_storage_account(self, **kwargs):
if kwargs['is_affinity_group'] is True:
response = self._perform_post(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_input_to_xml(
kwargs['service_name'],
kwargs['service_name'],
self._encode_base64(kwargs['service_name']),
kwargs['location'],
None, # Location
True, # geo_replication_enabled
None # extended_properties
)
)
self.raise_for_response(response, 202)
else:
response = self._perform_post(
self._get_storage_service_path(),
AzureXmlSerializer.create_storage_service_input_to_xml(
kwargs['service_name'],
kwargs['service_name'],
self._encode_base64(kwargs['service_name']),
None, # Affinity Group
kwargs['location'], # Location
True, # geo_replication_enabled
None # extended_properties
)
)
self.raise_for_response(response, 202)
# We need to wait for this to be created before we can
# create the storage container and the instance.
self._ex_complete_async_azure_operation(
response,
"create_storage_account"
)
def _get_operation_status(self, request_id):
return self._perform_get(
'/' + self.subscription_id + '/operations/' + _str(request_id),
Operation
)
def _perform_get(self, path, response_type):
request = AzureHTTPRequest()
request.method = 'GET'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return self._parse_response(response, response_type)
return response
def _perform_post(self, path, body, response_type=None):
request = AzureHTTPRequest()
request.method = 'POST'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = ensure_string(self._get_request_body(body))
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_put(self, path, body, response_type=None):
request = AzureHTTPRequest()
request.method = 'PUT'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.body = ensure_string(self._get_request_body(body))
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
return response
def _perform_delete(self, path):
request = AzureHTTPRequest()
request.method = 'DELETE'
request.host = AZURE_SERVICE_MANAGEMENT_HOST
request.path = path
request.path, request.query = self._update_request_uri_query(request)
request.headers = self._update_management_header(request)
response = self._perform_request(request)
self.raise_for_response(response, 202)
def _perform_request(self, request):
try:
return self.connection.request(
action=request.path,
data=request.body,
headers=request.headers,
method=request.method
)
except AzureRedirectException as e:
parsed_url = urlparse.urlparse(e.location)
request.host = parsed_url.netloc
return self._perform_request(request)
except Exception as e:
raise e
def _update_request_uri_query(self, request):
"""
pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters
"""
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += '%s=%s%s' % (
name,
url_quote(value, '/()$=\','),
'&'
)
request.path = request.path[:-1]
return request.path, request.query
def _update_management_header(self, request):
"""
Add additional headers for management.
"""
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers['Content-Length'] = str(len(request.body))
# append additional headers base on the service
# request.headers.append(('x-ms-version', X_MS_VERSION))
# if it is not GET or HEAD request, must set content-type.
if request.method not in ['GET', 'HEAD']:
for key in request.headers:
if 'content-type' == key.lower():
break
else:
request.headers['Content-Type'] = 'application/xml'
return request.headers
def _parse_response(self, response, return_type):
"""
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
"""
return self._parse_response_body_from_xml_text(
response=response,
return_type=return_type
)
def _parse_response_body_from_xml_text(self, response, return_type):
"""
parse the xml and fill all the data into a class of return_type
"""
respbody = response.body
doc = minidom.parseString(respbody)
return_obj = return_type()
for node in self._get_child_nodes(doc, return_type.__name__):
self._fill_data_to_return_object(node, return_obj)
# Note: We always explicitly assign status code to the custom return
# type object
return_obj.status = response.status
return return_obj
def _get_child_nodes(self, node, tag_name):
return [childNode for childNode in node.getElementsByTagName(tag_name)
if childNode.parentNode == node]
def _fill_data_to_return_object(self, node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _ListOf):
setattr(
return_obj,
name,
self._fill_list_of(
node,
value.list_type,
value.xml_element_name
)
)
elif isinstance(value, ScalarListOf):
setattr(
return_obj,
name,
self._fill_scalar_list_of(
node,
value.list_type,
self._get_serialization_name(name),
value.xml_element_name
)
)
elif isinstance(value, _DictOf):
setattr(
return_obj,
name,
self._fill_dict_of(
node,
self._get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name
)
)
elif isinstance(value, WindowsAzureData):
setattr(
return_obj,
name,
self._fill_instance_child(node, name, value.__class__)
)
elif isinstance(value, dict):
setattr(
return_obj,
name,
self._fill_dict(
node,
self._get_serialization_name(name)
)
)
elif isinstance(value, _Base64String):
value = self._fill_data_minidom(node, name, '')
if value is not None:
value = self._decode_base64_to_text(value)
# always set the attribute,
# so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = self._fill_data_minidom(node, name, value)
if value is not None:
setattr(return_obj, name, value)
def _fill_list_of(self, xmldoc, element_type, xml_element_name):
xmlelements = self._get_child_nodes(xmldoc, xml_element_name)
return [
self._parse_response_body_from_xml_node(xmlelement, element_type)
for xmlelement in xmlelements
]
def _parse_response_body_from_xml_node(self, node, return_type):
"""
parse the xml and fill all the data into a class of return_type
"""
return_obj = return_type()
self._fill_data_to_return_object(node, return_obj)
return return_obj
def _fill_scalar_list_of(self,
xmldoc,
element_type,
parent_xml_element_name,
xml_element_name):
xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = self._get_child_nodes(
xmlelements[0],
xml_element_name
)
return [
self._get_node_value(xmlelement, element_type)
for xmlelement in xmlelements
]
def _get_node_value(self, xmlelement, data_type):
value = xmlelement.firstChild.nodeValue
if data_type is datetime:
return self._to_datetime(value)
elif data_type is bool:
return value.lower() != 'false'
else:
return data_type(value)
def _get_serialization_name(self, element_name):
"""
Converts a Python name into a serializable name.
"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
def _fill_dict_of(self, xmldoc, parent_xml_element_name,
pair_xml_element_name, key_xml_element_name,
value_xml_element_name):
return_obj = {}
xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name)
if xmlelements:
xmlelements = self._get_child_nodes(
xmlelements[0],
pair_xml_element_name
)
for pair in xmlelements:
keys = self._get_child_nodes(pair, key_xml_element_name)
values = self._get_child_nodes(pair, value_xml_element_name)
if keys and values:
key = keys[0].firstChild.nodeValue
value = values[0].firstChild.nodeValue
return_obj[key] = value
return return_obj
def _fill_instance_child(self, xmldoc, element_name, return_type):
"""
Converts a child of the current dom element to the specified type.
"""
xmlelements = self._get_child_nodes(
xmldoc,
self._get_serialization_name(element_name)
)
if not xmlelements:
return None
return_obj = return_type()
self._fill_data_to_return_object(xmlelements[0], return_obj)
return return_obj
def _fill_dict(self, xmldoc, element_name):
xmlelements = self._get_child_nodes(xmldoc, element_name)
if xmlelements:
return_obj = {}
for child in xmlelements[0].childNodes:
if child.firstChild:
return_obj[child.nodeName] = child.firstChild.nodeValue
return return_obj
def _encode_base64(self, data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
encoded = base64.b64encode(data)
return encoded.decode('utf-8')
def _decode_base64_to_bytes(self, data):
if isinstance(data, _unicode_type):
data = data.encode('utf-8')
return base64.b64decode(data)
def _decode_base64_to_text(self, data):
decoded_bytes = self._decode_base64_to_bytes(data)
return decoded_bytes.decode('utf-8')
def _fill_data_minidom(self, xmldoc, element_name, data_member):
xmlelements = self._get_child_nodes(
xmldoc,
self._get_serialization_name(element_name)
)
if not xmlelements or not xmlelements[0].childNodes:
return None
value = xmlelements[0].firstChild.nodeValue
if data_member is None:
return value
elif isinstance(data_member, datetime):
return self._to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
elif type(data_member) is str:
return _real_unicode(value)
else:
return type(data_member)(value)
def _to_datetime(self, strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
def _get_request_body(self, request_body):
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = self._convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _convert_class_to_xml(self, source, xml_prefix=True):
root = ET.Element()
doc = self._construct_element_tree(source, root)
result = ensure_string(ET.tostring(doc, encoding='utf-8',
method='xml'))
return result
def _construct_element_tree(self, source, etree):
if source is None:
return ET.Element()
if isinstance(source, list):
for value in source:
etree.append(self._construct_element_tree(value, etree))
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
etree.append(ET.Element(class_name))
for name, value in vars(source).items():
if value is not None:
if (isinstance(value, list) or
isinstance(value, WindowsAzureData)):
etree.append(
self._construct_element_tree(value, etree)
)
else:
ele = ET.Element(self._get_serialization_name(name))
ele.text = xml_escape(str(value))
etree.append(ele)
etree.append(ET.Element(class_name))
return etree
def _parse_response_for_async_op(self, response):
if response is None:
return None
result = AsynchronousOperationResult()
if response.headers:
for name, value in response.headers.items():
if name.lower() == 'x-ms-request-id':
result.request_id = value
return result
def _get_deployment_path_using_name(self, service_name,
deployment_name=None):
components = [
'services/hostedservices/',
_str(service_name),
'/deployments'
]
resource = ''.join(components)
return self._get_path(resource, deployment_name)
def _get_path(self, resource, name):
path = '/' + self.subscription_id + '/' + resource
if name is not None:
path += '/' + _str(name)
return path
def _get_image_path(self, image_name=None):
return self._get_path('services/images', image_name)
def _get_vmimage_path(self, image_name=None):
return self._get_path('services/vmimages', image_name)
def _get_hosted_service_path(self, service_name=None):
return self._get_path('services/hostedservices', service_name)
def _get_deployment_path_using_slot(self, service_name, slot=None):
return self._get_path(
'services/hostedservices/%s/deploymentslots' % (
_str(service_name)
),
slot
)
def _get_disk_path(self, disk_name=None):
return self._get_path('services/disks', disk_name)
def _get_role_path(self, service_name, deployment_name, role_name=None):
components = [
'services/hostedservices/',
_str(service_name),
'/deployments/',
deployment_name,
'/roles'
]
resource = ''.join(components)
return self._get_path(resource, role_name)
def _get_storage_service_path(self, service_name=None):
return self._get_path('services/storageservices', service_name)
def _ex_complete_async_azure_operation(self, response=None,
operation_type='create_node'):
request_id = self._parse_response_for_async_op(response)
operation_status = self._get_operation_status(request_id.request_id)
timeout = 60 * 5
waittime = 0
interval = 5
while operation_status.status == "InProgress" and waittime < timeout:
operation_status = self._get_operation_status(request_id)
if operation_status.status == "Succeeded":
break
waittime += interval
time.sleep(interval)
if operation_status.status == 'Failed':
raise LibcloudError(
'Message: Async request for operation %s has failed' %
operation_type,
driver=self.connection.driver
)
def raise_for_response(self, response, valid_response):
if response.status != valid_response:
values = (response.error, response.body, response.status)
message = 'Message: %s, Body: %s, Status code: %s' % (values)
raise LibcloudError(message, driver=self)
"""
XML Serializer
Borrowed from the Azure SDK for Python which is licensed under Apache 2.0.
https://github.com/Azure/azure-sdk-for-python
"""
def _lower(text):
return text.lower()
class AzureXmlSerializer(object):
@staticmethod
def create_storage_service_input_to_xml(service_name,
description,
label,
affinity_group,
location,
geo_replication_enabled,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'CreateStorageServiceInput',
[
('ServiceName', service_name),
('Description', description),
('Label', label),
('AffinityGroup', affinity_group),
('Location', location),
('GeoReplicationEnabled', geo_replication_enabled, _lower)
],
extended_properties
)
@staticmethod
def update_storage_service_input_to_xml(description,
label,
geo_replication_enabled,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpdateStorageServiceInput',
[
('Description', description),
('Label', label, AzureNodeDriver._encode_base64),
('GeoReplicationEnabled', geo_replication_enabled, _lower)
],
extended_properties
)
@staticmethod
def regenerate_keys_to_xml(key_type):
return AzureXmlSerializer.doc_from_data(
'RegenerateKeys',
[('KeyType', key_type)]
)
@staticmethod
def update_hosted_service_to_xml(label, description, extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpdateHostedService',
[
('Label', label, AzureNodeDriver._encode_base64),
('Description', description)
],
extended_properties
)
@staticmethod
def create_hosted_service_to_xml(service_name,
label,
description,
location,
affinity_group=None,
extended_properties=None):
if affinity_group:
return AzureXmlSerializer.doc_from_data(
'CreateHostedService',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('AffinityGroup', affinity_group),
],
extended_properties
)
return AzureXmlSerializer.doc_from_data(
'CreateHostedService',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('Location', location),
],
extended_properties
)
@staticmethod
def create_storage_service_to_xml(service_name,
label,
description,
location,
affinity_group,
extended_properties=None):
return AzureXmlSerializer.doc_from_data(
'CreateStorageServiceInput',
[
('ServiceName', service_name),
('Label', label),
('Description', description),
('Location', location),
('AffinityGroup', affinity_group)
],
extended_properties
)
@staticmethod
def create_deployment_to_xml(name,
package_url,
label,
configuration,
start_deployment,
treat_warnings_as_error,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'CreateDeployment',
[
('Name', name),
('PackageUrl', package_url),
('Label', label, AzureNodeDriver._encode_base64),
('Configuration', configuration),
('StartDeployment', start_deployment, _lower),
('TreatWarningsAsError', treat_warnings_as_error, _lower)
],
extended_properties
)
@staticmethod
def swap_deployment_to_xml(production, source_deployment):
return AzureXmlSerializer.doc_from_data(
'Swap',
[
('Production', production),
('SourceDeployment', source_deployment)
]
)
@staticmethod
def update_deployment_status_to_xml(status):
return AzureXmlSerializer.doc_from_data(
'UpdateDeploymentStatus',
[('Status', status)]
)
@staticmethod
def change_deployment_to_xml(configuration,
treat_warnings_as_error,
mode,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'ChangeConfiguration',
[
('Configuration', configuration),
('TreatWarningsAsError', treat_warnings_as_error, _lower),
('Mode', mode)
],
extended_properties
)
@staticmethod
def upgrade_deployment_to_xml(mode,
package_url,
configuration,
label,
role_to_upgrade,
force,
extended_properties):
return AzureXmlSerializer.doc_from_data(
'UpgradeDeployment',
[
('Mode', mode),
('PackageUrl', package_url),
('Configuration', configuration),
('Label', label, AzureNodeDriver._encode_base64),
('RoleToUpgrade', role_to_upgrade),
('Force', force, _lower)
],
extended_properties
)
@staticmethod
def rollback_upgrade_to_xml(mode, force):
return AzureXmlSerializer.doc_from_data(
'RollbackUpdateOrUpgrade',
[
('Mode', mode),
('Force', force, _lower)
]
)
@staticmethod
def walk_upgrade_domain_to_xml(upgrade_domain):
return AzureXmlSerializer.doc_from_data(
'WalkUpgradeDomain',
[('UpgradeDomain', upgrade_domain)]
)
@staticmethod
def certificate_file_to_xml(data, certificate_format, password):
return AzureXmlSerializer.doc_from_data(
'CertificateFile',
[
('Data', data),
('CertificateFormat', certificate_format),
('Password', password)
]
)
@staticmethod
def create_affinity_group_to_xml(name, label, description, location):
return AzureXmlSerializer.doc_from_data(
'CreateAffinityGroup',
[
('Name', name),
('Label', label, AzureNodeDriver._encode_base64),
('Description', description),
('Location', location)
]
)
@staticmethod
def update_affinity_group_to_xml(label, description):
return AzureXmlSerializer.doc_from_data(
'UpdateAffinityGroup',
[
('Label', label, AzureNodeDriver._encode_base64),
('Description', description)
]
)
@staticmethod
def subscription_certificate_to_xml(public_key, thumbprint, data):
return AzureXmlSerializer.doc_from_data(
'SubscriptionCertificate',
[
('SubscriptionCertificatePublicKey', public_key),
('SubscriptionCertificateThumbprint', thumbprint),
('SubscriptionCertificateData', data)
]
)
@staticmethod
def os_image_to_xml(label, media_link, name, os):
return AzureXmlSerializer.doc_from_data(
'OSImage',
[
('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)
]
)
@staticmethod
def data_virtual_hard_disk_to_xml(host_caching,
disk_label,
disk_name,
lun,
logical_disk_size_in_gb,
media_link,
source_media_link):
return AzureXmlSerializer.doc_from_data(
'DataVirtualHardDisk',
[
('HostCaching', host_caching),
('DiskLabel', disk_label),
('DiskName', disk_name),
('Lun', lun),
('LogicalDiskSizeInGB', logical_disk_size_in_gb),
('MediaLink', media_link),
('SourceMediaLink', source_media_link)
]
)
@staticmethod
def disk_to_xml(has_operating_system, label, media_link, name, os):
return AzureXmlSerializer.doc_from_data(
'Disk',
[
('HasOperatingSystem', has_operating_system, _lower),
('Label', label),
('MediaLink', media_link),
('Name', name),
('OS', os)
]
)
@staticmethod
def restart_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "RestartRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'RestartRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def shutdown_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "ShutdownRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'ShutdownRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def start_role_operation_to_xml():
xml = ET.Element("OperationType")
xml.text = "StartRoleOperation"
doc = AzureXmlSerializer.doc_from_xml(
'StartRoleOperation',
xml
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def windows_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
AzureXmlSerializer.data_to_xml(
[('ComputerName', configuration.computer_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('AdminPassword', configuration.admin_password)],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'ResetPasswordOnFirstLogon',
configuration.reset_password_on_first_logon,
_lower
)
],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'EnableAutomaticUpdates',
configuration.enable_automatic_updates,
_lower
)
],
xml
)
AzureXmlSerializer.data_to_xml(
[('TimeZone', configuration.time_zone)],
xml
)
if configuration.domain_join is not None:
domain = ET.xml("DomainJoin") # pylint: disable=no-member
creds = ET.xml("Credentials") # pylint: disable=no-member
domain.appemnd(creds)
xml.append(domain)
AzureXmlSerializer.data_to_xml(
[('Domain', configuration.domain_join.credentials.domain)],
creds
)
AzureXmlSerializer.data_to_xml(
[
(
'Username',
configuration.domain_join.credentials.username
)
],
creds
)
AzureXmlSerializer.data_to_xml(
[
(
'Password',
configuration.domain_join.credentials.password
)
],
creds
)
AzureXmlSerializer.data_to_xml(
[('JoinDomain', configuration.domain_join.join_domain)],
domain
)
AzureXmlSerializer.data_to_xml(
[
(
'MachineObjectOU',
configuration.domain_join.machine_object_ou
)
],
domain
)
if configuration.stored_certificate_settings is not None:
cert_settings = ET.Element("StoredCertificateSettings")
xml.append(cert_settings)
for cert in configuration.stored_certificate_settings:
cert_setting = ET.Element("CertificateSetting")
cert_settings.append(cert_setting)
cert_setting.append(AzureXmlSerializer.data_to_xml(
[('StoreLocation', cert.store_location)])
)
AzureXmlSerializer.data_to_xml(
[('StoreName', cert.store_name)],
cert_setting
)
AzureXmlSerializer.data_to_xml(
[('Thumbprint', cert.thumbprint)],
cert_setting
)
AzureXmlSerializer.data_to_xml(
[('AdminUsername', configuration.admin_user_name)],
xml
)
return xml
@staticmethod
def linux_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
AzureXmlSerializer.data_to_xml(
[('HostName', configuration.host_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('UserName', configuration.user_name)],
xml
)
AzureXmlSerializer.data_to_xml(
[('UserPassword', configuration.user_password)],
xml
)
AzureXmlSerializer.data_to_xml(
[
(
'DisableSshPasswordAuthentication',
configuration.disable_ssh_password_authentication,
_lower
)
],
xml
)
if configuration.ssh is not None:
ssh = ET.Element("SSH")
pkeys = ET.Element("PublicKeys")
kpairs = ET.Element("KeyPairs")
ssh.append(pkeys)
ssh.append(kpairs)
xml.append(ssh)
for key in configuration.ssh.public_keys:
pkey = ET.Element("PublicKey")
pkeys.append(pkey)
AzureXmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint)],
pkey
)
AzureXmlSerializer.data_to_xml([('Path', key.path)], pkey)
for key in configuration.ssh.key_pairs:
kpair = ET.Element("KeyPair")
kpairs.append(kpair)
AzureXmlSerializer.data_to_xml(
[('Fingerprint', key.fingerprint)],
kpair
)
AzureXmlSerializer.data_to_xml([('Path', key.path)], kpair)
if configuration.custom_data is not None:
AzureXmlSerializer.data_to_xml(
[('CustomData', configuration.custom_data)],
xml
)
return xml
@staticmethod
def network_configuration_to_xml(configuration, xml):
AzureXmlSerializer.data_to_xml(
[('ConfigurationSetType', configuration.configuration_set_type)],
xml
)
input_endpoints = ET.Element("InputEndpoints")
xml.append(input_endpoints)
for endpoint in configuration.input_endpoints:
input_endpoint = ET.Element("InputEndpoint")
input_endpoints.append(input_endpoint)
AzureXmlSerializer.data_to_xml(
[
(
'LoadBalancedEndpointSetName',
endpoint.load_balanced_endpoint_set_name
)
],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('LocalPort', endpoint.local_port)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('Name', endpoint.name)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[('Port', endpoint.port)],
input_endpoint
)
if (endpoint.load_balancer_probe.path or
endpoint.load_balancer_probe.port or
endpoint.load_balancer_probe.protocol):
load_balancer_probe = ET.Element("LoadBalancerProbe")
input_endpoint.append(load_balancer_probe)
AzureXmlSerializer.data_to_xml(
[('Path', endpoint.load_balancer_probe.path)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Port', endpoint.load_balancer_probe.port)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Protocol', endpoint.load_balancer_probe.protocol)],
load_balancer_probe
)
AzureXmlSerializer.data_to_xml(
[('Protocol', endpoint.protocol)],
input_endpoint
)
AzureXmlSerializer.data_to_xml(
[
(
'EnableDirectServerReturn',
endpoint.enable_direct_server_return,
_lower
)
],
input_endpoint
)
subnet_names = ET.Element("SubnetNames")
xml.append(subnet_names)
for name in configuration.subnet_names:
AzureXmlSerializer.data_to_xml(
[('SubnetName', name)],
subnet_names
)
return xml
@staticmethod
def role_to_xml(availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
xml):
AzureXmlSerializer.data_to_xml([('RoleName', role_name)], xml)
AzureXmlSerializer.data_to_xml([('RoleType', role_type)], xml)
config_sets = ET.Element("ConfigurationSets")
xml.append(config_sets)
if system_configuration_set is not None:
config_set = ET.Element("ConfigurationSet")
config_sets.append(config_set)
if isinstance(system_configuration_set, WindowsConfigurationSet):
AzureXmlSerializer.windows_configuration_to_xml(
system_configuration_set,
config_set
)
elif isinstance(system_configuration_set, LinuxConfigurationSet):
AzureXmlSerializer.linux_configuration_to_xml(
system_configuration_set,
config_set
)
if network_configuration_set is not None:
config_set = ET.Element("ConfigurationSet")
config_sets.append(config_set)
AzureXmlSerializer.network_configuration_to_xml(
network_configuration_set,
config_set
)
if availability_set_name is not None:
AzureXmlSerializer.data_to_xml(
[('AvailabilitySetName', availability_set_name)],
xml
)
if data_virtual_hard_disks is not None:
vhds = ET.Element("DataVirtualHardDisks")
xml.append(vhds)
for hd in data_virtual_hard_disks:
vhd = ET.Element("DataVirtualHardDisk")
vhds.append(vhd)
AzureXmlSerializer.data_to_xml(
[('HostCaching', hd.host_caching)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('DiskLabel', hd.disk_label)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('DiskName', hd.disk_name)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('Lun', hd.lun)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb)],
vhd
)
AzureXmlSerializer.data_to_xml(
[('MediaLink', hd.media_link)],
vhd
)
if os_virtual_hard_disk is not None:
hd = ET.Element("OSVirtualHardDisk")
xml.append(hd)
AzureXmlSerializer.data_to_xml(
[('HostCaching', os_virtual_hard_disk.host_caching)],
hd
)
AzureXmlSerializer.data_to_xml(
[('DiskLabel', os_virtual_hard_disk.disk_label)],
hd
)
AzureXmlSerializer.data_to_xml(
[('DiskName', os_virtual_hard_disk.disk_name)],
hd
)
AzureXmlSerializer.data_to_xml(
[('MediaLink', os_virtual_hard_disk.media_link)],
hd
)
AzureXmlSerializer.data_to_xml(
[('SourceImageName', os_virtual_hard_disk.source_image_name)],
hd
)
if vm_image_name is not None:
AzureXmlSerializer.data_to_xml(
[('VMImageName', vm_image_name)],
xml
)
if role_size is not None:
AzureXmlSerializer.data_to_xml([('RoleSize', role_size)], xml)
return xml
@staticmethod
def add_role_to_xml(role_name,
system_configuration_set,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
vm_image_name,
role_size):
doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole')
xml = AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
doc
)
result = ensure_string(ET.tostring(xml, encoding='utf-8'))
return result
@staticmethod
def update_role_to_xml(role_name,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
vm_image_name,
role_size):
doc = AzureXmlSerializer.doc_from_xml('PersistentVMRole')
AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
None,
doc
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def capture_role_to_xml(post_capture_action,
target_image_name,
target_image_label,
provisioning_configuration):
xml = AzureXmlSerializer.data_to_xml(
[('OperationType', 'CaptureRoleOperation')]
)
AzureXmlSerializer.data_to_xml(
[('PostCaptureAction', post_capture_action)],
xml
)
if provisioning_configuration is not None:
provisioning_config = ET.Element("ProvisioningConfiguration")
xml.append(provisioning_config)
if isinstance(provisioning_configuration, WindowsConfigurationSet):
AzureXmlSerializer.windows_configuration_to_xml(
provisioning_configuration,
provisioning_config
)
elif isinstance(provisioning_configuration, LinuxConfigurationSet):
AzureXmlSerializer.linux_configuration_to_xml(
provisioning_configuration,
provisioning_config
)
AzureXmlSerializer.data_to_xml(
[('TargetImageLabel', target_image_label)],
xml
)
AzureXmlSerializer.data_to_xml(
[('TargetImageName', target_image_name)],
xml
)
doc = AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def virtual_machine_deployment_to_xml(deployment_name,
deployment_slot,
label,
role_name,
system_configuration_set,
os_virtual_hard_disk,
role_type,
network_configuration_set,
availability_set_name,
data_virtual_hard_disks,
role_size,
virtual_network_name,
vm_image_name):
doc = AzureXmlSerializer.doc_from_xml('Deployment')
AzureXmlSerializer.data_to_xml([('Name', deployment_name)], doc)
AzureXmlSerializer.data_to_xml(
[('DeploymentSlot', deployment_slot)],
doc
)
AzureXmlSerializer.data_to_xml([('Label', label)], doc)
role_list = ET.Element("RoleList")
role = ET.Element("Role")
role_list.append(role)
doc.append(role_list)
AzureXmlSerializer.role_to_xml(
availability_set_name,
data_virtual_hard_disks,
network_configuration_set,
os_virtual_hard_disk,
vm_image_name,
role_name,
role_size,
role_type,
system_configuration_set,
role
)
if virtual_network_name is not None:
doc.append(
AzureXmlSerializer.data_to_xml(
[('VirtualNetworkName', virtual_network_name)]
)
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def data_to_xml(data, xml=None):
"""
Creates an xml fragment from the specified data.
data: Array of tuples, where first: xml element name
second: xml element text
third: conversion function
"""
for element in data:
name = element[0]
val = element[1]
if len(element) > 2:
converter = element[2]
else:
converter = None
if val is not None:
if converter is not None:
text = _str(converter(_str(val)))
else:
text = _str(val)
entry = ET.Element(name)
entry.text = text
if xml is not None:
xml.append(entry)
else:
return entry
return xml
@staticmethod
def doc_from_xml(document_element_name, inner_xml=None):
"""
Wraps the specified xml in an xml root element with default azure
namespaces
"""
# Note: Namespaces don't work consistency in Python 2 and 3.
"""
nsmap = {
None: "http://www.w3.org/2001/XMLSchema-instance",
"i": "http://www.w3.org/2001/XMLSchema-instance"
}
xml.attrib["xmlns:i"] = "http://www.w3.org/2001/XMLSchema-instance"
xml.attrib["xmlns"] = "http://schemas.microsoft.com/windowsazure"
"""
xml = ET.Element(document_element_name)
xml.set("xmlns", "http://schemas.microsoft.com/windowsazure")
if inner_xml is not None:
xml.append(inner_xml)
return xml
@staticmethod
def doc_from_data(document_element_name, data, extended_properties=None):
doc = AzureXmlSerializer.doc_from_xml(document_element_name)
AzureXmlSerializer.data_to_xml(data, doc)
if extended_properties is not None:
doc.append(
AzureXmlSerializer.extended_properties_dict_to_xml_fragment(
extended_properties
)
)
result = ensure_string(ET.tostring(doc, encoding='utf-8'))
return result
@staticmethod
def extended_properties_dict_to_xml_fragment(extended_properties):
if extended_properties is not None and len(extended_properties) > 0:
xml = ET.Element("ExtendedProperties")
for key, val in extended_properties.items():
extended_property = ET.Element("ExtendedProperty")
name = ET.Element("Name")
name.text = _str(key)
value = ET.Element("Value")
value.text = _str(val)
extended_property.append(name)
extended_property.append(value)
xml.append(extended_property)
return xml
"""
Data Classes
Borrowed from the Azure SDK for Python.
"""
class WindowsAzureData(object):
"""
This is the base of data class.
It is only used to check whether it is instance or not.
"""
pass
class WindowsAzureDataTypedList(WindowsAzureData):
list_type = None
xml_element_name = None
def __init__(self):
self.items = _ListOf(self.list_type, self.xml_element_name)
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
class OSVirtualHardDisk(WindowsAzureData):
def __init__(self, source_image_name=None, media_link=None,
host_caching=None, disk_label=None, disk_name=None):
self.source_image_name = source_image_name
self.media_link = media_link
self.host_caching = host_caching
self.disk_label = disk_label
self.disk_name = disk_name
self.os = '' # undocumented, not used when adding a role
class LinuxConfigurationSet(WindowsAzureData):
def __init__(self,
host_name=None,
user_name=None,
user_password=None,
disable_ssh_password_authentication=None,
custom_data=None):
self.configuration_set_type = 'LinuxProvisioningConfiguration'
self.host_name = host_name
self.user_name = user_name
self.user_password = user_password
self.disable_ssh_password_authentication = \
disable_ssh_password_authentication
self.ssh = SSH()
self.custom_data = custom_data
class WindowsConfigurationSet(WindowsAzureData):
def __init__(self,
computer_name=None,
admin_password=None,
reset_password_on_first_logon=None,
enable_automatic_updates=None,
time_zone=None,
admin_user_name=None):
self.configuration_set_type = 'WindowsProvisioningConfiguration'
self.computer_name = computer_name
self.admin_password = admin_password
self.reset_password_on_first_logon = reset_password_on_first_logon
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.admin_user_name = admin_user_name
self.domain_join = DomainJoin()
self.stored_certificate_settings = StoredCertificateSettings()
class DomainJoin(WindowsAzureData):
def __init__(self):
self.credentials = Credentials()
self.join_domain = ''
self.machine_object_ou = ''
class Credentials(WindowsAzureData):
def __init__(self):
self.domain = ''
self.username = ''
self.password = ''
class CertificateSetting(WindowsAzureData):
"""
Initializes a certificate setting.
thumbprint:
Specifies the thumbprint of the certificate to be provisioned. The
thumbprint must specify an existing service certificate.
store_name:
Specifies the name of the certificate store from which retrieve
certificate.
store_location:
Specifies the target certificate store location on the virtual machine
The only supported value is LocalMachine.
"""
def __init__(self, thumbprint='', store_name='', store_location=''):
self.thumbprint = thumbprint
self.store_name = store_name
self.store_location = store_location
class StoredCertificateSettings(WindowsAzureDataTypedList):
list_type = CertificateSetting
_repr_attributes = [
'items'
]
class SSH(WindowsAzureData):
def __init__(self):
self.public_keys = PublicKeys()
self.key_pairs = KeyPairs()
class PublicKey(WindowsAzureData):
def __init__(self, fingerprint='', path=''):
self.fingerprint = fingerprint
self.path = path
class PublicKeys(WindowsAzureDataTypedList):
list_type = PublicKey
_repr_attributes = [
'items'
]
class AzureKeyPair(WindowsAzureData):
def __init__(self, fingerprint='', path=''):
self.fingerprint = fingerprint
self.path = path
class KeyPairs(WindowsAzureDataTypedList):
list_type = AzureKeyPair
_repr_attributes = [
'items'
]
class LoadBalancerProbe(WindowsAzureData):
def __init__(self):
self.path = ''
self.port = ''
self.protocol = ''
class ConfigurationSet(WindowsAzureData):
def __init__(self):
self.configuration_set_type = ''
self.role_type = ''
self.input_endpoints = ConfigurationSetInputEndpoints()
self.subnet_names = ScalarListOf(str, 'SubnetName')
class ConfigurationSets(WindowsAzureDataTypedList):
list_type = ConfigurationSet
_repr_attributes = [
'items'
]
class ConfigurationSetInputEndpoint(WindowsAzureData):
def __init__(self,
name='',
protocol='',
port='',
local_port='',
load_balanced_endpoint_set_name='',
enable_direct_server_return=False):
self.enable_direct_server_return = enable_direct_server_return
self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name
self.local_port = local_port
self.name = name
self.port = port
self.load_balancer_probe = LoadBalancerProbe()
self.protocol = protocol
class ConfigurationSetInputEndpoints(WindowsAzureDataTypedList):
list_type = ConfigurationSetInputEndpoint
xml_element_name = 'InputEndpoint'
_repr_attributes = [
'items'
]
class Location(WindowsAzureData):
def __init__(self):
self.name = ''
self.display_name = ''
self.available_services = ScalarListOf(str, 'AvailableService')
self.compute_capabilities = ComputeCapability()
class Locations(WindowsAzureDataTypedList):
list_type = Location
_repr_attributes = [
'items'
]
class ComputeCapability(WindowsAzureData):
def __init__(self):
self.virtual_machines_role_sizes = ScalarListOf(str, 'RoleSize')
class VirtualMachinesRoleSizes(WindowsAzureData):
def __init__(self):
self.role_size = ScalarListOf(str, 'RoleSize')
class OSImage(WindowsAzureData):
def __init__(self):
self.affinity_group = ''
self.category = ''
self.location = ''
self.logical_size_in_gb = 0
self.label = ''
self.media_link = ''
self.name = ''
self.os = ''
self.eula = ''
self.description = ''
class Images(WindowsAzureDataTypedList):
list_type = OSImage
_repr_attributes = [
'items'
]
class VMImage(WindowsAzureData):
def __init__(self):
self.name = ''
self.label = ''
self.category = ''
self.os_disk_configuration = OSDiskConfiguration()
self.service_name = ''
self.deployment_name = ''
self.role_name = ''
self.location = ''
self.affinity_group = ''
class VMImages(WindowsAzureDataTypedList):
list_type = VMImage
_repr_attributes = [
'items'
]
class VirtualIP(WindowsAzureData):
def __init__(self):
self.address = ''
self.is_dns_programmed = ''
self.name = ''
class VirtualIPs(WindowsAzureDataTypedList):
list_type = VirtualIP
_repr_attributes = [
'items'
]
class HostedService(WindowsAzureData, ReprMixin):
_repr_attributes = [
'service_name',
'url'
]
def __init__(self):
self.url = ''
self.service_name = ''
self.hosted_service_properties = HostedServiceProperties()
self.deployments = Deployments()
class HostedServices(WindowsAzureDataTypedList, ReprMixin):
list_type = HostedService
_repr_attributes = [
'items'
]
class HostedServiceProperties(WindowsAzureData):
def __init__(self):
self.description = ''
self.location = ''
self.affinity_group = ''
self.label = _Base64String()
self.status = ''
self.date_created = ''
self.date_last_modified = ''
self.extended_properties = _DictOf(
'ExtendedProperty',
'Name',
'Value'
)
class Deployment(WindowsAzureData):
def __init__(self):
self.name = ''
self.deployment_slot = ''
self.private_id = ''
self.status = ''
self.label = _Base64String()
self.url = ''
self.configuration = _Base64String()
self.role_instance_list = RoleInstanceList()
self.upgrade_status = UpgradeStatus()
self.upgrade_domain_count = ''
self.role_list = RoleList()
self.sdk_version = ''
self.input_endpoint_list = InputEndpoints()
self.locked = False
self.rollback_allowed = False
self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()
self.created_time = ''
self.last_modified_time = ''
self.extended_properties = _DictOf(
'ExtendedProperty',
'Name',
'Value'
)
self.virtual_ips = VirtualIPs()
class Deployments(WindowsAzureDataTypedList):
list_type = Deployment
_repr_attributes = [
'items'
]
class UpgradeStatus(WindowsAzureData):
def __init__(self):
self.upgrade_type = ''
self.current_upgrade_domain_state = ''
self.current_upgrade_domain = ''
class RoleInstance(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.instance_name = ''
self.instance_status = ''
self.instance_upgrade_domain = 0
self.instance_fault_domain = 0
self.instance_size = ''
self.instance_state_details = ''
self.instance_error_code = ''
self.ip_address = ''
self.instance_endpoints = InstanceEndpoints()
self.power_state = ''
self.fqdn = ''
self.host_name = ''
class RoleInstanceList(WindowsAzureDataTypedList):
list_type = RoleInstance
_repr_attributes = [
'items'
]
class InstanceEndpoint(WindowsAzureData):
def __init__(self):
self.name = ''
self.vip = ''
self.public_port = ''
self.local_port = ''
self.protocol = ''
class InstanceEndpoints(WindowsAzureDataTypedList):
list_type = InstanceEndpoint
_repr_attributes = [
'items'
]
class InputEndpoint(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.vip = ''
self.port = ''
class InputEndpoints(WindowsAzureDataTypedList):
list_type = InputEndpoint
_repr_attributes = [
'items'
]
class Role(WindowsAzureData):
def __init__(self):
self.role_name = ''
self.os_version = ''
class RoleList(WindowsAzureDataTypedList):
list_type = Role
_repr_attributes = [
'items'
]
class PersistentVMDowntimeInfo(WindowsAzureData):
def __init__(self):
self.start_time = ''
self.end_time = ''
self.status = ''
class AsynchronousOperationResult(WindowsAzureData):
def __init__(self, request_id=None):
self.request_id = request_id
class Disk(WindowsAzureData):
def __init__(self):
self.affinity_group = ''
self.attached_to = AttachedTo()
self.has_operating_system = ''
self.is_corrupted = ''
self.location = ''
self.logical_disk_size_in_gb = 0
self.label = ''
self.media_link = ''
self.name = ''
self.os = ''
self.source_image_name = ''
class Disks(WindowsAzureDataTypedList):
list_type = Disk
_repr_attributes = [
'items'
]
class AttachedTo(WindowsAzureData):
def __init__(self):
self.hosted_service_name = ''
self.deployment_name = ''
self.role_name = ''
class OperationError(WindowsAzureData):
def __init__(self):
self.code = ''
self.message = ''
class Operation(WindowsAzureData):
def __init__(self):
self.id = ''
self.status = ''
self.http_status_code = ''
self.error = OperationError()
class OperatingSystem(WindowsAzureData):
def __init__(self):
self.version = ''
self.label = _Base64String()
self.is_default = True
self.is_active = True
self.family = 0
self.family_label = _Base64String()
class OSDiskConfiguration(WindowsAzureData):
def __init__(self):
self.name = ''
self.host_caching = ''
self.os_state = ''
self.os = ''
self.media_link = ''
self.logical_disk_size_in_gb = 0
class OperatingSystems(WindowsAzureDataTypedList):
list_type = OperatingSystem
_repr_attributes = [
'items'
]
class OperatingSystemFamily(WindowsAzureData):
def __init__(self):
self.name = ''
self.label = _Base64String()
self.operating_systems = OperatingSystems()
class OperatingSystemFamilies(WindowsAzureDataTypedList):
list_type = OperatingSystemFamily
_repr_attributes = [
'items'
]
class Subscription(WindowsAzureData):
def __init__(self):
self.subscription_id = ''
self.subscription_name = ''
self.subscription_status = ''
self.account_admin_live_email_id = ''
self.service_admin_live_email_id = ''
self.max_core_count = 0
self.max_storage_accounts = 0
self.max_hosted_services = 0
self.current_core_count = 0
self.current_hosted_services = 0
self.current_storage_accounts = 0
self.max_virtual_network_sites = 0
self.max_local_network_sites = 0
self.max_dns_servers = 0
class AvailabilityResponse(WindowsAzureData):
def __init__(self):
self.result = False
class SubscriptionCertificate(WindowsAzureData):
def __init__(self):
self.subscription_certificate_public_key = ''
self.subscription_certificate_thumbprint = ''
self.subscription_certificate_data = ''
self.created = ''
class SubscriptionCertificates(WindowsAzureDataTypedList):
list_type = SubscriptionCertificate
_repr_attributes = [
'items'
]
class AzureHTTPRequest(object):
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = [] # list of (name, value)
self.headers = {} # list of (header name, header value)
self.body = ''
self.protocol_override = None
class AzureHTTPResponse(object):
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
"""
Helper classes and functions.
"""
class _Base64String(str):
pass
class _ListOf(list):
"""
A list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists
"""
def __init__(self, list_type, xml_element_name=None):
self.list_type = list_type
if xml_element_name is None:
self.xml_element_name = list_type.__name__
else:
self.xml_element_name = xml_element_name
super(_ListOf, self).__init__()
class ScalarListOf(list):
"""
A list of scalar types which carries with it the type that's
expected to go in it along with its xml element name.
Used for deserializaion and construction of the lists
"""
def __init__(self, list_type, xml_element_name):
self.list_type = list_type
self.xml_element_name = xml_element_name
super(ScalarListOf, self).__init__()
class _DictOf(dict):
"""
A dict which carries with it the xml element names for key,val.
Used for deserializaion and construction of the lists
"""
def __init__(self,
pair_xml_element_name,
key_xml_element_name,
value_xml_element_name):
self.pair_xml_element_name = pair_xml_element_name
self.key_xml_element_name = key_xml_element_name
self.value_xml_element_name = value_xml_element_name
super(_DictOf, self).__init__()
class AzureNodeLocation(NodeLocation):
# we can also have something in here for available services which is an
# extra to the API with Azure
def __init__(self, id, name, country, driver, available_services,
virtual_machine_role_sizes):
super(AzureNodeLocation, self).__init__(id, name, country, driver)
self.available_services = available_services
self.virtual_machine_role_sizes = virtual_machine_role_sizes
def __repr__(self):
return (
(
'<AzureNodeLocation: id=%s, name=%s, country=%s, '
'driver=%s services=%s virtualMachineRoleSizes=%s >'
) % (
self.id,
self.name,
self.country,
self.driver.name,
','.join(self.available_services),
','.join(self.virtual_machine_role_sizes)
)
)
|
andrewsomething/libcloud
|
libcloud/compute/drivers/azure.py
|
Python
|
apache-2.0
| 113,227
|
[
"VisIt"
] |
4b2e2a383974b55da89f56c160c6a9676ae0cc9814c9de8d02c4e97c65d6b432
|
"""A base class for RPC services and proxies.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012. Brian Granger, Min Ragan-Kelley
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import zmq
from zmq.eventloop.ioloop import IOLoop
from .serializer import PickleSerializer
#-----------------------------------------------------------------------------
# RPC base class
#-----------------------------------------------------------------------------
class RPCBase(object):
def __init__(self, loop=None, context=None, serializer=None):
"""Base class for RPC service and proxy.
Parameters
==========
loop : IOLoop
An existing IOLoop instance, if not passed, then IOLoop.instance()
will be used.
context : Context
An existing Context instance, if not passed, the Context.instance()
will be used.
serializer : Serializer
An instance of a Serializer subclass that will be used to serialize
and deserialize args, kwargs and the result.
"""
self.loop = loop if loop is not None else IOLoop.instance()
self.context = context if context is not None else zmq.Context.instance()
self.socket = None
self.stream = None
self._serializer = serializer if serializer is not None else PickleSerializer()
self.reset()
#-------------------------------------------------------------------------
# Public API
#-------------------------------------------------------------------------
def reset(self):
"""Reset the socket/stream."""
if isinstance(self.socket, zmq.Socket):
self.socket.close()
self._create_socket()
self.urls = []
def bind(self, url):
"""Bind the service to a url of the form proto://ip:port."""
self.urls.append(url)
self.socket.bind(url)
def connect(self, url):
"""Connect the service to a url of the form proto://ip:port."""
self.urls.append(url)
self.socket.connect(url)
|
ellisonbg/zpyrpc
|
zpyrpc/base.py
|
Python
|
bsd-3-clause
| 2,496
|
[
"Brian"
] |
b55d007ed023d29477f32d47e6a66e46c11530a53b80993b135b68cd49f92d82
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet-50 on ImageNet using SNGP.
Spectral-normalized neural GP (SNGP) [1] is a simple method to improve
a deterministic neural network's uncertainty by applying spectral
normalization to hidden weights, and then replace the dense output layer with
a Gaussian process.
## Note:
Different from the paper, this implementation computes the posterior using the
Laplace approximation based on the Gaussian likelihood (i.e., squared loss)
rather than that based on cross-entropy loss. As a result, the logits for all
classes share the same covariance. In the experiments, this approach is shown to
perform better and computationally more scalable when the number of output
classes are large.
## References:
[1]: Jeremiah Liu et al. Simple and Principled Uncertainty Estimation with
Deterministic Deep Learning via Distance Awareness.
_arXiv preprint arXiv:2006.10108_, 2020.
https://arxiv.org/abs/2006.10108
[2]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal
Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.
https://arxiv.org/abs/2006.07584
[3]: Felix Xinnan Yu et al. Orthogonal Random Features. In _Neural Information
Processing Systems_, 2016.
https://papers.nips.cc/paper/6246-orthogonal-random-features.pdf
"""
import os
import time
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import utils # local file import from baselines.imagenet
from tensorboard.plugins.hparams import api as hp
flags.DEFINE_integer('per_core_batch_size', 128, 'Batch size per TPU core/GPU.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_float('base_learning_rate', 0.1,
'Base learning rate when train batch size is 256.')
flags.DEFINE_float('one_minus_momentum', 0.1, 'Optimizer momentum.')
flags.DEFINE_float('l2', 1e-4, 'L2 coefficient.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where the model weights and '
'training/evaluation summaries are stored.')
flags.DEFINE_integer('train_epochs', 90, 'Number of training epochs.')
flags.DEFINE_integer('corruptions_interval', 90,
'Number of epochs between evaluating on the corrupted '
'test data. Use -1 to never evaluate.')
flags.DEFINE_integer(
'checkpoint_interval', -1,
'Number of epochs between saving checkpoints. Use -1 to '
'only save the last checkpoints.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
flags.DEFINE_float('train_proportion', default=1.0,
help='only use a proportion of training set and use the'
'rest for validation instead of the test set.')
flags.register_validator('train_proportion',
lambda tp: tp > 0.0 and tp <= 1.0,
message='--train_proportion must be in (0, 1].')
# Dropout flags.
flags.DEFINE_bool('use_mc_dropout', False,
'Whether to use Monte Carlo dropout during inference.')
flags.DEFINE_float('dropout_rate', 0., 'Dropout rate.')
flags.DEFINE_bool(
'filterwise_dropout', True, 'Dropout whole convolutional'
'filters instead of individual values in the feature map.')
flags.DEFINE_integer('num_dropout_samples', 1,
'Number of samples to use for MC Dropout prediction.')
# Spectral normalization flags.
flags.DEFINE_bool('use_spec_norm', True,
'Whether to apply spectral normalization.')
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Gaussian process flags.
flags.DEFINE_bool('use_gp_layer', True,
'Whether to use Gaussian process as the output layer.')
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 1.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number of '
'random features used for the approximation.')
flags.DEFINE_bool(
'gp_input_normalization', False,
'Whether to normalize the input for GP layer using LayerNorm. This is '
'similar to applying automatic relevance determination (ARD) in the '
'classic GP literature.')
flags.DEFINE_string(
'gp_random_feature_type', 'orf',
'The type of random feature to use. One of "rff" (random Fourier feature), '
'"orf" (orthogonal random feature) [3].')
flags.DEFINE_float('gp_cov_ridge_penalty', 1.,
'Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', -1.,
'The discount factor to compute the moving average of precision matrix.'
'If -1 then instead compute the exact covariance at the lastest epoch.')
flags.DEFINE_float(
'gp_mean_field_factor', 1.,
'The tunable multiplicative factor used in the mean-field approximation '
'for the posterior mean of softmax Gaussian process. If -1 then use '
'posterior mode instead of posterior mean. See [2] for detail.')
flags.DEFINE_bool(
'gp_output_imagenet_initializer', True,
'Whether to initialize GP output layer using Gaussian with small '
'standard deviation (sd=0.01).')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
# TODO(jereliu): Support use_bfloat16=True which currently raises error with
# spectral normalization.
flags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 32, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
# Number of images in ImageNet-1k train dataset.
APPROX_IMAGENET_TRAIN_IMAGES = int(1281167 * FLAGS.train_proportion)
NUM_CLASSES = 1000
def main(argv):
del argv # unused arg
# Number of images in eval dataset.
if FLAGS.train_proportion != 1.:
imagenet_validation_images = 1281167 - APPROX_IMAGENET_TRAIN_IMAGES
else:
imagenet_validation_images = 50000
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
steps_per_eval = imagenet_validation_images // batch_size
data_dir = FLAGS.data_dir
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
train_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TRAIN,
use_bfloat16=FLAGS.use_bfloat16,
validation_percent=1. - FLAGS.train_proportion,
data_dir=data_dir)
train_dataset = train_builder.load(batch_size=batch_size, strategy=strategy)
if FLAGS.train_proportion != 1.:
test_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.VALIDATION,
use_bfloat16=FLAGS.use_bfloat16,
validation_percent=1. - FLAGS.train_proportion,
data_dir=data_dir)
else:
test_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TEST,
use_bfloat16=FLAGS.use_bfloat16,
data_dir=data_dir)
clean_test_dataset = test_builder.load(
batch_size=batch_size, strategy=strategy)
test_datasets = {
'clean': clean_test_dataset
}
if FLAGS.corruptions_interval > 0:
corruption_types, max_intensity = utils.load_corrupted_test_info()
for name in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset_name = '{0}_{1}'.format(name, intensity)
dataset = utils.load_corrupted_test_dataset(
batch_size=batch_size,
corruption_name=name,
corruption_intensity=intensity,
use_bfloat16=FLAGS.use_bfloat16)
test_datasets[dataset_name] = (
strategy.experimental_distribute_dataset(dataset))
if FLAGS.use_bfloat16:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')
with strategy.scope():
logging.info('Building Keras ResNet-50 model')
model = ub.models.resnet50_sngp(
input_shape=(224, 224, 3),
batch_size=None,
num_classes=NUM_CLASSES,
use_mc_dropout=FLAGS.use_mc_dropout,
dropout_rate=FLAGS.dropout_rate,
filterwise_dropout=FLAGS.filterwise_dropout,
use_gp_layer=FLAGS.use_gp_layer,
gp_hidden_dim=FLAGS.gp_hidden_dim,
gp_scale=FLAGS.gp_scale,
gp_bias=FLAGS.gp_bias,
gp_input_normalization=FLAGS.gp_input_normalization,
gp_random_feature_type=FLAGS.gp_random_feature_type,
gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
gp_output_imagenet_initializer=FLAGS.gp_output_imagenet_initializer,
use_spec_norm=FLAGS.use_spec_norm,
spec_norm_iteration=FLAGS.spec_norm_iteration,
spec_norm_bound=FLAGS.spec_norm_bound)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Scale learning rate and decay epochs by vanilla settings.
base_lr = FLAGS.base_learning_rate * batch_size / 256
decay_epochs = [
(FLAGS.train_epochs * 30) // 90,
(FLAGS.train_epochs * 60) // 90,
(FLAGS.train_epochs * 80) // 90,
]
learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule(
steps_per_epoch=steps_per_epoch,
base_learning_rate=base_lr,
decay_ratio=0.1,
decay_epochs=decay_epochs,
warmup_epochs=5)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
momentum=1.0 - FLAGS.one_minus_momentum,
nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/loss': tf.keras.metrics.Mean(),
'train/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/stddev': tf.keras.metrics.Mean(),
}
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, max_intensity + 1):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
logging.info('Finished building Keras ResNet-50 model')
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
@tf.function
def train_step(iterator):
"""Training StepFn."""
def step_fn(inputs, step):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
if tf.equal(step, 0) and FLAGS.gp_cov_discount_factor < 0:
# Reset covaraince estimator at the begining of a new epoch.
if FLAGS.use_gp_layer:
model.layers[-1].reset_covariance_matrix()
with tf.GradientTape() as tape:
logits = model(images, training=True)
if isinstance(logits, (list, tuple)):
# If model returns a tuple of (logits, covmat), extract logits
logits, _ = logits
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels,
logits,
from_logits=True))
filtered_variables = []
for var in model.trainable_variables:
# Apply l2 on the weights. This excludes BN parameters and biases, but
# pay caution to their naming scheme.
if 'kernel' in var.name or 'bias' in var.name:
filtered_variables.append(tf.reshape(var, (-1,)))
l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
# Scale the loss given the TPUStrategy will reduce sum all gradients.
loss = negative_log_likelihood + l2_loss
scaled_loss = loss / strategy.num_replicas_in_sync
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
probs = tf.nn.softmax(logits)
metrics['train/ece'].add_batch(probs, label=labels)
metrics['train/loss'].update_state(loss)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/accuracy'].update_state(labels, logits)
for step in tf.range(tf.cast(steps_per_epoch, tf.int32)):
strategy.run(step_fn, args=(next(iterator), step))
@tf.function
def test_step(iterator, dataset_name):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
logits_list = []
stddev_list = []
for _ in range(FLAGS.num_dropout_samples):
logits = model(images, training=False)
if isinstance(logits, (list, tuple)):
# If model returns a tuple of (logits, covmat), extract both
logits, covmat = logits
else:
covmat = tf.eye(FLAGS.per_core_batch_size)
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
logits = ed.layers.utils.mean_field_logits(
logits, covmat, mean_field_factor=FLAGS.gp_mean_field_factor)
stddev = tf.sqrt(tf.linalg.diag_part(covmat))
stddev_list.append(stddev)
logits_list.append(logits)
# Logits dimension is (num_samples, batch_size, num_classes).
logits_list = tf.stack(logits_list, axis=0)
stddev_list = tf.stack(stddev_list, axis=0)
stddev = tf.reduce_mean(stddev_list, axis=0)
probs_list = tf.nn.softmax(logits_list)
probs = tf.reduce_mean(probs_list, axis=0)
labels_broadcasted = tf.broadcast_to(
labels, [FLAGS.num_dropout_samples, tf.shape(labels)[0]])
log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
labels_broadcasted, logits_list, from_logits=True)
negative_log_likelihood = tf.reduce_mean(
-tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
tf.math.log(float(FLAGS.num_dropout_samples)))
if dataset_name == 'clean':
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].add_batch(probs, label=labels)
metrics['test/stddev'].update_state(stddev)
else:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
probs, label=labels)
corrupt_metrics['test/stddev_{}'.format(dataset_name)].update_state(
stddev)
for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
train_step(train_iterator)
current_step = (epoch + 1) * steps_per_epoch
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
logging.info(message)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
test_iterator = iter(test_dataset)
logging.info('Testing on dataset %s', dataset_name)
logging.info('Starting to run eval at epoch: %s', epoch)
test_start_time = time.time()
test_step(test_iterator, dataset_name)
ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
metrics['test/ms_per_example'].update_state(ms_per_example)
logging.info('Done with testing on %s', dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(
corrupt_metrics, corruption_types, max_intensity,
FLAGS.alexnet_errors_path)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
# Metrics from Robustness Metrics (like ECE) will return a dict with a
# single key/value, instead of a scalar.
total_results = {
k: (list(v.values())[0] if isinstance(v, dict) else v)
for k, v in total_results.items()
}
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(os.path.join(
FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
# Save final checkpoint.
final_checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved last checkpoint to %s', final_checkpoint_name)
# Export final model as SavedModel.
final_save_name = os.path.join(FLAGS.output_dir, 'model')
model.save(final_save_name)
logging.info('Saved model to %s', final_save_name)
with summary_writer.as_default():
hp.hparams({
'base_learning_rate': FLAGS.base_learning_rate,
'one_minus_momentum': FLAGS.one_minus_momentum,
'l2': FLAGS.l2,
'gp_mean_field_factor': FLAGS.gp_mean_field_factor,
})
if __name__ == '__main__':
app.run(main)
|
google/uncertainty-baselines
|
baselines/imagenet/sngp.py
|
Python
|
apache-2.0
| 21,753
|
[
"Gaussian"
] |
80f5a3545846994894917a6ff14bf4e1790c91b80bf72b7613c8d2657840e437
|
""" Main module
"""
import os
import re
import json
import pprint
import datetime
import functools
import traceback
from hashlib import md5
from concurrent.futures import ThreadPoolExecutor
import tornado.web
import tornado.websocket
from tornado import gen
from tornado.web import HTTPError
from tornado.ioloop import IOLoop
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.JEncode import DATETIME_DEFAULT_FORMAT
from DIRAC.Core.Utilities.Decorators import deprecated
from DIRAC.Core.DISET.AuthManager import AuthManager
from DIRAC.Core.DISET.ThreadConfig import ThreadConfig
from DIRAC.Core.Tornado.Server.TornadoREST import (
TornadoREST,
authorization,
TornadoResponse,
) # pylint: disable=no-name-in-module
from DIRAC.FrameworkSystem.private.authorization.utils.Tokens import OAuth2Token
from WebAppDIRAC.Lib import Conf
from WebAppDIRAC.Lib.SessionData import SessionData
global gThreadPool
gThreadPool = ThreadPoolExecutor(100)
sLog = gLogger.getSubLogger(__name__)
class UserCredentials:
"""With this class you can make RPC calls using user credantials
Usage:
def post_job(self):
with UserCredentials(**self.credDict):
# Do stuff
"""
__disetConfig = ThreadConfig()
def __init__(self, **kwargs):
self.dn = kwargs["DN"]
self.group = kwargs["group"]
self.setup = kwargs["setup"]
def __enter__(self):
self.__disetConfig.load((self.dn, self.group, self.setup))
return self.__disetConfig
def __exit__(self, type, value, traceback):
self.__disetConfig.reset()
class FileResponse(TornadoResponse):
"""This class provide logic for CSV and PNG formats.
Usage example::
def web_myMethod(self):
# Generate CSV data
...
return FileResponse(data, 'filename', 'csv')
"""
def __init__(self, payload, fileName: str, ext: str = "", cache: bool = True):
"""C'or
:param payload: response body
:param fileName: CSV name
:param ext: file type
:param cache: use cache
"""
name, _ext = os.path.splitext(fileName)
self.ext = (ext or _ext).lower()
# Generate file name
self.fileHash = md5(name.encode()).hexdigest() # MD5 take a bytes
self.cache = cache
super().__init__(payload, 200)
def _runActions(self, reqObj):
"""Calling methods in the order of their registration
:param reqObj: RequestHandler instance
"""
# Set content type
if self.ext == "csv":
reqObj.set_header("Content-type", "text/csv")
elif self.ext == "png":
reqObj.set_header("Content-Transfer-Encoding", "Binary")
reqObj.set_header("Content-type", "image/png")
else:
reqObj.set_header("Content-type", "text/plain")
reqObj.set_header("Content-Disposition", f'attachment; filename="{self.fileHash}.{self.ext}"')
reqObj.set_header("Content-Length", len(self.payload))
if not self.cache:
# Disable cache
reqObj.set_header("Cache-Control", "no-cache, no-store, must-revalidate, max-age=0")
reqObj.set_header("Pragma", "no-cache")
reqObj.set_header(
"Expires",
(datetime.datetime.utcnow() - datetime.timedelta(minutes=-10)).strftime("%d %b %Y %H:%M:%S GMT"),
)
super()._runActions(reqObj)
class WErr(HTTPError):
def __init__(self, code, msg="", **kwargs):
super().__init__(code, str(msg) or None)
for k in kwargs:
setattr(self, k, kwargs[k])
self.msg = msg
self.kwargs = kwargs
@classmethod
def fromSERROR(cls, result):
"""Prevent major problem with % in the message"""
return cls(500, result["Message"].replace("%", ""))
def asyncWithCallback(method):
return tornado.web.asynchronous(method)
def asyncGen(method):
return gen.coroutine(method)
def defaultEncoder(data):
"""Encode
- datetime to ISO format string
- set to list
:param data: value to encode
:return: encoded value
"""
if isinstance(data, (datetime.date, datetime.datetime)):
return data.strftime(DATETIME_DEFAULT_FORMAT)
if isinstance(data, (set)):
return list(data)
raise TypeError("Object of type {} is not JSON serializable".format(data.__class__.__name__))
class _WebHandler(TornadoREST):
__session = None
__sessionData = None
__disetConfig = ThreadConfig()
DEFAULT_AUTHENTICATION = ["SSL", "SESSION", "VISITOR"]
# Auth requirements DEFAULT_AUTHORIZATION
DEFAULT_AUTHORIZATION = None
# Base URL prefix
BASE_URL = None
# Location of the handler in the URL
DEFAULT_LOCATION = ""
# RE to extract group and setup
PATH_RE = None
# Prefix of methods names
METHOD_PREFIX = "web_"
SUPPORTED_METHODS = (
"POST",
"GET",
)
# for backward compatibility
LOCATION = ""
AUTH_PROPS = None
# pylint: disable=no-member
@classmethod
def _pre_initialize(cls):
# For backward compatibility
cls.LOCATION = cls.LOCATION or cls.DEFAULT_LOCATION
cls.AUTH_PROPS = cls.AUTH_PROPS or cls.DEFAULT_AUTHORIZATION
cls.DEFAULT_LOCATION = cls.DEFAULT_LOCATION or cls.LOCATION
cls.DEFAULT_AUTHORIZATION = cls.DEFAULT_AUTHORIZATION or cls.AUTH_PROPS
# Get tornado URLs
urls = super()._pre_initialize()
# Define base path regex to know setup/group
cls.PATH_RE = re.compile(f"{cls.BASE_URL}(.*)")
return urls
def finish(self, data=None, *args, **kwargs):
"""Finishes this response, ending the HTTP request. More detailes:
https://www.tornadoweb.org/en/stable/_modules/tornado/web.html#RequestHandler.finish
"""
if data and isinstance(data, dict):
self.set_header("Content-Type", "application/json")
data = json.dumps(data, default=defaultEncoder)
return super().finish(data, *args, **kwargs)
@classmethod
def _getCSAuthorizarionSection(cls, handler):
"""Search endpoint auth section.
:param str handler: API name, see :py:meth:`_getFullComponentName`
:return: str
"""
return Conf.getAuthSectionForHandler(handler)
def _getMethodArgs(self, args: tuple, kwargs: dict):
"""Decode args.
:return: tuple(list, dict)
"""
return super()._getMethodArgs(args=args[3:], kwargs=kwargs)
async def prepare(self):
"""Prepare the request. It reads certificates and check authorizations.
We make the assumption that there is always going to be a ``method`` argument
regardless of the HTTP method used
"""
# Parse request URI
match = self.PATH_RE.match(self.request.path)
groups = match.groups()
self.__setup = groups[0] or Conf.setup()
self.__group = groups[1]
# Reset DISET settings
self.__disetConfig.reset()
self.__disetConfig.setDecorator(self.__disetBlockDecor)
self.__disetDump = self.__disetConfig.dump()
try:
await super().prepare()
except HTTPError as e:
raise WErr(e.status_code, e.log_message)
# Configure DISET with user creds
if self.getUserDN():
self.__disetConfig.setDN(self.getUserDN())
if self.getUserGroup(): # pylint: disable=no-value-for-parameter
self.__disetConfig.setGroup(self.getUserGroup()) # pylint: disable=no-value-for-parameter
self.__disetConfig.setSetup(self.__setup)
self.__disetDump = self.__disetConfig.dump()
self.__forceRefreshCS()
def __disetBlockDecor(self, func):
def wrapper(*args, **kwargs):
raise RuntimeError("All DISET calls must be made from inside a Threaded Task!")
return wrapper
def __forceRefreshCS(self):
"""Force refresh configuration from master configuration server"""
if self.request.headers.get("X-RefreshConfiguration") == "True":
self.log.debug("Initialize force refresh..")
if not AuthManager("").authQuery("", dict(self.credDict), "CSAdministrator"):
raise WErr(401, "Cannot initialize force refresh, request not authenticated")
result = gConfig.forceRefresh()
if not result["OK"]:
raise WErr(501, result["Message"])
def _gatherPeerCredentials(self):
"""
Load client certchain in DIRAC and extract informations.
The dictionary returned is designed to work with the AuthManager,
already written for DISET and re-used for HTTPS.
:returns: a dict containing the return of :py:meth:`DIRAC.Core.Security.X509Chain.X509Chain.getCredentials`
(not a DIRAC structure !)
"""
# Authorization type
self.__authGrant = ["VISITOR"]
if self.request.protocol == "https":
# First of all we try to authZ with what is specified in cookies, and if attempt is unsuccessful authZ as visitor
self.__authGrant.insert(0, self.get_cookie("authGrant", "SSL").replace("Certificate", "SSL"))
credDict = super()._gatherPeerCredentials(grants=self.__authGrant)
# Add a group if it present in the request path
if credDict and self.__group:
credDict["validGroup"] = False
credDict["group"] = self.__group
return credDict
def _authzSESSION(self):
"""Fill credentionals from session
:return: S_OK(dict)
"""
credDict = {}
# Session
sessionID = self.get_secure_cookie("session_id")
if not sessionID:
self.clear_cookie("authGrant")
return S_OK(credDict)
# Each session depends on the tokens
try:
sLog.debug("Load session tokens..")
token = OAuth2Token(sessionID.decode())
sLog.debug("Found session tokens:\n", pprint.pformat(token))
try:
return self._authzJWT(token["access_token"])
except Exception as e:
sLog.debug("Cannot check access token %s, try to fetch.." % repr(e))
# Try to refresh access_token and refresh_token
result = self._idps.getIdProvider("DIRACWeb")
if not result["OK"]:
return result
cli = result["Value"]
token = cli.refreshToken(token["refresh_token"])
# store it to the secure cookie
self.set_secure_cookie("session_id", json.dumps(token), secure=True, httponly=True)
return self._authzJWT(token["access_token"])
except Exception as e:
sLog.debug(repr(e))
# if attempt is unsuccessful expire session
self.clear_cookie("session_id")
self.set_cookie("session_id", "expired")
self.set_cookie("authGrant", "Visitor")
return S_ERROR(repr(e))
@property
def log(self):
return sLog
@classmethod
def getLog(cls):
return sLog
def getCurrentSession(self):
return self.__session
def getUserSetup(self):
return self.__setup
def getSessionData(self):
if not self.__sessionData:
self.__sessionData = SessionData(self.credDict, self.__setup)
return self.__sessionData.getData()
def getAppSettings(self, app=None):
return Conf.getAppSettings(app or self.__class__.__name__.replace("Handler", "")).get("Value") or {}
def write_error(self, status_code, **kwargs):
self.set_status(status_code)
cType = "text/plain"
data = self._reason
if "exc_info" in kwargs:
ex = kwargs["exc_info"][1]
trace = traceback.format_exception(*kwargs["exc_info"])
if not isinstance(ex, WErr):
data += "\n".join(trace)
else:
if self.settings.get("debug"):
self.log.error("Request ended in error:\n %s" % "\n ".join(trace))
data = ex.msg
if isinstance(data, dict):
cType = "application/json"
data = json.dumps(data)
self.set_header("Content-Type", cType)
self.finish(data)
@deprecated("Should be deprecated for v5+, use FileResponse class instead")
def finishWithImage(self, data, plotImageFile, disableCaching=False):
# Set headers
self.set_header("Content-Type", "image/png")
self.set_header(
"Content-Disposition", 'attachment; filename="%s.png"' % md5(plotImageFile.encode()).hexdigest()
)
self.set_header("Content-Length", len(data))
self.set_header("Content-Transfer-Encoding", "Binary")
if disableCaching:
self.set_header("Cache-Control", "no-cache, no-store, must-revalidate, max-age=0")
self.set_header("Pragma", "no-cache")
self.set_header(
"Expires",
(datetime.datetime.utcnow() - datetime.timedelta(minutes=-10)).strftime("%d %b %Y %H:%M:%S GMT"),
)
# Return the data
self.finish(data)
class WebHandler(_WebHandler):
"""Old WebHandler"""
@classmethod
def _pre_initialize(cls):
# Get tornado URLs
urls = super()._pre_initialize()
# Add a pattern that points to the target method.
# Note that there are handlers with an index method.
# It responds to the request without specifying a method.
# The special characters "*" helps to take into account such a case,
# see https://docs.python.org/3/library/re.html#regular-expression-syntax.
# E.g .: /DIRAC/ -> RootHandler.web_index
cls.PATH_RE = re.compile(f"{cls.BASE_URL}({cls.LOCATION}/[A-z0-9_]*)")
return urls
def get(self, setup, group, *pathArgs):
self.initializeRequest()
return self._getMethod()(*pathArgs)
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
def threadTask(self, method, *args, **kwargs):
def threadJob(*targs, **tkwargs):
args = targs[0]
disetConf = targs[1]
self.__disetConfig.reset()
self.__disetConfig.load(disetConf)
return method(*args, **tkwargs)
targs = (args, self.__disetDump)
return IOLoop.current().run_in_executor(gThreadPool, functools.partial(threadJob, *targs, **kwargs))
class WebSocketHandler(tornado.websocket.WebSocketHandler, WebHandler):
def __init__(self, *args, **kwargs):
WebHandler.__init__(self, *args, **kwargs)
tornado.websocket.WebSocketHandler.__init__(self, *args, **kwargs)
@classmethod
def _pre_initialize(cls):
# For backward compatibility
cls.LOCATION = cls.LOCATION or cls.DEFAULT_LOCATION
cls.AUTH_PROPS = cls.AUTH_PROPS or cls.DEFAULT_AUTHORIZATION
cls.DEFAULT_LOCATION = cls.DEFAULT_LOCATION or cls.LOCATION
cls.DEFAULT_AUTHORIZATION = cls.DEFAULT_AUTHORIZATION or cls.AUTH_PROPS
# Define base path regex to know setup/group
cls.PATH_RE = re.compile(url := f"{cls.BASE_URL}({cls.LOCATION})")
sLog.verbose(f" - WebSocket {cls.LOCATION} -> {cls.__name__}")
sLog.debug(f" * {url}")
return [(url, cls)]
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened, read more in tornado `docs.\
<https://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.open>`_
"""
return self.on_open()
def on_open(self):
"""Developer should implement this method"""
raise NotImplementedError('"on_open" method is not implemented')
def _getMethod(self):
"""Get method function to call."""
return self.on_open
|
DIRACGrid/WebAppDIRAC
|
src/WebAppDIRAC/Lib/WebHandler.py
|
Python
|
gpl-3.0
| 16,064
|
[
"DIRAC"
] |
72c785c6630ccba7ce32e20684594c233773635acd4a64b9683b64780f5ec788
|
#!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenType-related data."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
from nototools import unicode_data
OMPL = {}
def _set_ompl():
"""Set up OMPL.
OMPL is defined to be the list of mirrored pairs in Unicode 5.1:
http://www.microsoft.com/typography/otspec/ttochap1.htm#ltrrtl
"""
global OMPL
unicode_data.load_data()
bmg_data = unicode_data._bidi_mirroring_glyph_data
OMPL = {char:bmg for (char, bmg) in bmg_data.items()
if float(unicode_data.age(char)) <= 5.1}
ZWSP = [0x200B]
JOINERS = [0x200C, 0x200D]
BIDI_MARKS = [0x200E, 0x200F]
DOTTED_CIRCLE = [0x25CC]
# From the various script-specific specs at
# http://www.microsoft.com/typography/SpecificationsOverview.mspx
SPECIAL_CHARACTERS_NEEDED = {
'Arab': JOINERS + BIDI_MARKS + DOTTED_CIRCLE,
'Beng': ZWSP + JOINERS + DOTTED_CIRCLE,
'Bugi': ZWSP + JOINERS + DOTTED_CIRCLE,
'Deva': ZWSP + JOINERS + DOTTED_CIRCLE,
'Gujr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Guru': ZWSP + JOINERS + DOTTED_CIRCLE,
# Hangul may not need the special characters:
# https://code.google.com/p/noto/issues/detail?id=147#c2
# 'Hang': ZWSP + JOINERS,
'Hebr': BIDI_MARKS + DOTTED_CIRCLE,
'Java': ZWSP + JOINERS + DOTTED_CIRCLE,
'Khmr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Knda': ZWSP + JOINERS + DOTTED_CIRCLE,
'Laoo': ZWSP + DOTTED_CIRCLE,
'Mlym': ZWSP + JOINERS + DOTTED_CIRCLE,
'Mymr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Orya': ZWSP + JOINERS + DOTTED_CIRCLE,
'Sinh': ZWSP + JOINERS + DOTTED_CIRCLE,
'Syrc': JOINERS + BIDI_MARKS + DOTTED_CIRCLE,
'Taml': ZWSP + JOINERS + DOTTED_CIRCLE,
'Telu': ZWSP + JOINERS + DOTTED_CIRCLE,
'Thaa': BIDI_MARKS + DOTTED_CIRCLE,
'Thai': ZWSP + DOTTED_CIRCLE,
'Tibt': ZWSP + JOINERS + DOTTED_CIRCLE,
}
# www.microsoft.com/typography/otspec/os2.html#ur
# bit, block name, block range
_unicoderange_data = """0\tBasic Latin\t0000-007F
1\tLatin-1 Supplement\t0080-00FF
2\tLatin Extended-A\t0100-017F
3\tLatin Extended-B\t0180-024F
4\tIPA Extensions\t0250-02AF
\tPhonetic Extensions\t1D00-1D7F
\tPhonetic Extensions Supplement\t1D80-1DBF
5\tSpacing Modifier Letters\t02B0-02FF
\tModifier Tone Letters\tA700-A71F
6\tCombining Diacritical Marks\t0300-036F
\tCombining Diacritical Marks Supplement\t1DC0-1DFF
7\tGreek and Coptic\t0370-03FF
8\tCoptic\t2C80-2CFF
9\tCyrillic\t0400-04FF
\tCyrillic Supplement\t0500-052F
\tCyrillic Extended-A\t2DE0-2DFF
\tCyrillic Extended-B\tA640-A69F
10\tArmenian\t0530-058F
11\tHebrew\t0590-05FF
12\tVai\tA500-A63F
13\tArabic\t0600-06FF
\tArabic Supplement\t0750-077F
14\tNKo\t07C0-07FF
15\tDevanagari\t0900-097F
16\tBengali\t0980-09FF
17\tGurmukhi\t0A00-0A7F
18\tGujarati\t0A80-0AFF
19\tOriya\t0B00-0B7F
20\tTamil\t0B80-0BFF
21\tTelugu\t0C00-0C7F
22\tKannada\t0C80-0CFF
23\tMalayalam\t0D00-0D7F
24\tThai\t0E00-0E7F
25\tLao\t0E80-0EFF
26\tGeorgian\t10A0-10FF
\tGeorgian Supplement\t2D00-2D2F
27\tBalinese\t1B00-1B7F
28\tHangul Jamo\t1100-11FF
29\tLatin Extended Additional\t1E00-1EFF
\tLatin Extended-C\t2C60-2C7F
\tLatin Extended-D\tA720-A7FF
30\tGreek Extended\t1F00-1FFF
31\tGeneral Punctuation\t2000-206F
\tSupplemental Punctuation\t2E00-2E7F
32\tSuperscripts And Subscripts\t2070-209F
33\tCurrency Symbols\t20A0-20CF
34\tCombining Diacritical Marks For Symbols\t20D0-20FF
35\tLetterlike Symbols\t2100-214F
36\tNumber Forms\t2150-218F
37\tArrows\t2190-21FF
\tSupplemental Arrows-A\t27F0-27FF
\tSupplemental Arrows-B\t2900-297F
\tMiscellaneous Symbols and Arrows\t2B00-2BFF
38\tMathematical Operators\t2200-22FF
\tSupplemental Mathematical Operators\t2A00-2AFF
\tMiscellaneous Mathematical Symbols-A\t27C0-27EF
\tMiscellaneous Mathematical Symbols-B\t2980-29FF
39\tMiscellaneous Technical\t2300-23FF
40\tControl Pictures\t2400-243F
41\tOptical Character Recognition\t2440-245F
42\tEnclosed Alphanumerics\t2460-24FF
43\tBox Drawing\t2500-257F
44\tBlock Elements\t2580-259F
45\tGeometric Shapes\t25A0-25FF
46\tMiscellaneous Symbols\t2600-26FF
47\tDingbats\t2700-27BF
48\tCJK Symbols And Punctuation\t3000-303F
49\tHiragana\t3040-309F
50\tKatakana\t30A0-30FF
\tKatakana Phonetic Extensions\t31F0-31FF
51\tBopomofo\t3100-312F
\tBopomofo Extended\t31A0-31BF
52\tHangul Compatibility Jamo\t3130-318F
53\tPhags-pa\tA840-A87F
54\tEnclosed CJK Letters And Months\t3200-32FF
55\tCJK Compatibility\t3300-33FF
56\tHangul Syllables\tAC00-D7AF
57\tNon-Plane 0 *\tD800-DFFF
58\tPhoenician\t10900-1091F
59\tCJK Unified Ideographs\t4E00-9FFF
\tCJK Radicals Supplement\t2E80-2EFF
\tKangxi Radicals\t2F00-2FDF
\tIdeographic Description Characters\t2FF0-2FFF
\tCJK Unified Ideographs Extension A\t3400-4DBF
\tCJK Unified Ideographs Extension B\t20000-2A6DF
\tKanbun\t3190-319F
60\tPrivate Use Area (plane 0)\tE000-F8FF
61\tCJK Strokes\t31C0-31EF
\tCJK Compatibility Ideographs\tF900-FAFF
\tCJK Compatibility Ideographs Supplement\t2F800-2FA1F
62\tAlphabetic Presentation Forms\tFB00-FB4F
63\tArabic Presentation Forms-A\tFB50-FDFF
64\tCombining Half Marks\tFE20-FE2F
65\tVertical Forms\tFE10-FE1F
\tCJK Compatibility Forms\tFE30-FE4F
66\tSmall Form Variants\tFE50-FE6F
67\tArabic Presentation Forms-B\tFE70-FEFF
68\tHalfwidth And Fullwidth Forms\tFF00-FFEF
69\tSpecials\tFFF0-FFFF
70\tTibetan\t0F00-0FFF
71\tSyriac\t0700-074F
72\tThaana\t0780-07BF
73\tSinhala\t0D80-0DFF
74\tMyanmar\t1000-109F
75\tEthiopic\t1200-137F
\tEthiopic Supplement\t1380-139F
\tEthiopic Extended\t2D80-2DDF
76\tCherokee\t13A0-13FF
77\tUnified Canadian Aboriginal Syllabics\t1400-167F
78\tOgham\t1680-169F
79\tRunic\t16A0-16FF
80\tKhmer\t1780-17FF
\tKhmer Symbols\t19E0-19FF
81\tMongolian\t1800-18AF
82\tBraille Patterns\t2800-28FF
83\tYi Syllables\tA000-A48F
\tYi Radicals\tA490-A4CF
84\tTagalog\t1700-171F
\tHanunoo\t1720-173F
\tBuhid\t1740-175F
\tTagbanwa\t1760-177F
85\tOld Italic\t10300-1032F
86\tGothic\t10330-1034F
87\tDeseret\t10400-1044F
88\tByzantine Musical Symbols\t1D000-1D0FF
\tMusical Symbols\t1D100-1D1FF
\tAncient Greek Musical Notation\t1D200-1D24F
89\tMathematical Alphanumeric Symbols\t1D400-1D7FF
90\tPrivate Use (plane 15)\tFF000-FFFFD
\tPrivate Use (plane 16)\t100000-10FFFD
91\tVariation Selectors\tFE00-FE0F
\tVariation Selectors Supplement\tE0100-E01EF
92\tTags\tE0000-E007F
93\tLimbu\t1900-194F
94\tTai Le\t1950-197F
95\tNew Tai Lue\t1980-19DF
96\tBuginese\t1A00-1A1F
97\tGlagolitic\t2C00-2C5F
98\tTifinagh\t2D30-2D7F
99\tYijing Hexagram Symbols\t4DC0-4DFF
100\tSyloti Nagri\tA800-A82F
101\tLinear B Syllabary\t10000-1007F
\tLinear B Ideograms\t10080-100FF
\tAegean Numbers\t10100-1013F
102\tAncient Greek Numbers\t10140-1018F
103\tUgaritic\t10380-1039F
104\tOld Persian\t103A0-103DF
105\tShavian\t10450-1047F
106\tOsmanya\t10480-104AF
107\tCypriot Syllabary\t10800-1083F
108\tKharoshthi\t10A00-10A5F
109\tTai Xuan Jing Symbols\t1D300-1D35F
110\tCuneiform\t12000-123FF
\tCuneiform Numbers and Punctuation\t12400-1247F
111\tCounting Rod Numerals\t1D360-1D37F
112\tSundanese\t1B80-1BBF
113\tLepcha\t1C00-1C4F
114\tOl Chiki\t1C50-1C7F
115\tSaurashtra\tA880-A8DF
116\tKayah Li\tA900-A92F
117\tRejang\tA930-A95F
118\tCham\tAA00-AA5F
119\tAncient Symbols\t10190-101CF
120\tPhaistos Disc\t101D0-101FF
121\tCarian\t102A0-102DF
\tLycian\t10280-1029F
\tLydian\t10920-1093F
122\tDomino Tiles\t1F030-1F09F
\tMahjong Tiles\t1F000-1F02F
"""
ur_data = []
ur_bucket_info = [[] for i in range(128)]
def _setup_unicoderange_data():
"""The unicoderange data used in the os/2 table consists of slightly under
128 'buckets', each of which consists of one or more 'ranges' of codepoints.
Each range has a name, start, and end. Bucket 57 is special, it consists of
all non-BMP codepoints and overlaps the other ranges, though in the data it
corresponds to the high and low UTF-16 surrogate code units. The other ranges
are all disjoint.
We build two tables. ur_data is a list of the ranges, consisting of the
start, end, bucket index, and name. It is sorted by range start. ur_bucket_info
is a list of buckets in bucket index order; each entry is a list of the tuples
in ur_data that belong to that bucket.
This is called by functions that require these tables. On first use it builds
ur_data and ur_bucket_info, which should remain unchanged thereafter."""
if ur_data:
return
index = 0
for line in _unicoderange_data.splitlines():
index_str, name, urange = line.split('\t')
range_start_str, range_end_str = urange.split('-')
range_start = int(range_start_str, 16)
range_end = int(range_end_str, 16)
if index_str:
index = int(index_str)
tup = (range_start, range_end, index, name)
ur_data.append(tup)
ur_bucket_info[index].append(tup)
ur_data.sort()
def collect_unicoderange_info(cmap):
"""Return a list of 2-tuples, the first element a count of the characters in a
range, the second element the 4-tuple of information about that range: start,
end, bucket number, and name. Only ranges for which the cmap has a character
are included."""
_setup_unicoderange_data()
range_count = 0
index = 0
limit = len(ur_data)
result = []
for cp in sorted(cmap):
while index < limit:
tup = ur_data[index]
if cp <= tup[1]:
# the ranges are disjoint and some characters fall into no
# range, e.g. Javanese.
if cp >= tup[0]:
range_count += 1
break
if range_count:
result.append((range_count, ur_data[index]))
range_count = 0
index += 1
if range_count:
result.append((range_count, ur_data[index]))
return result
def unicoderange_bucket_info_name(bucket_info):
return ', '.join(t[3] for t in bucket_info)
def unicoderange_bucket_info_size(bucket_info):
return sum(t[1] - t[0] + 1 for t in bucket_info)
def unicoderange_bucket_index_to_info(bucket_index):
if bucket_index < 0 or bucket_index >= 128:
raise ValueError('bucket_index %s out of range' % bucket_index)
_setup_unicoderange_data()
return ur_bucket_info[bucket_index]
def unicoderange_bucket_index_to_name(bucket_index):
return unicoderange_bucket_info_name(unicoderange_bucket_index_to_info(bucket_index))
if not OMPL:
_set_ompl()
|
anthrotype/nototools
|
nototools/opentype_data.py
|
Python
|
apache-2.0
| 10,931
|
[
"FEFF"
] |
3480eebe8d4b651ad370038b30367e15f7c3e0413e9e20d40fef2945b5b2ccc9
|
# -*- coding: utf-8 -*-
#
# biseqt documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 8 14:22:42 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import subprocess
# Mock dependencies that require arbitrary C binaries.
from mock import Mock as MagicMock
# Since Python 3.3 this can be done via:
# from unittest.mock import MagicMock
def setup(app):
app.add_stylesheet('theme_hacks.css')
app.add_javascript('js_hacks.js')
app.add_stylesheet('lightbox.min.css')
app.add_javascript('lightbox.min.js')
# Hacks to make it work on readthedocs.org
if os.environ.get('READTHEDOCS', None) == 'True':
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['cffi', 'numpy', 'apsw', 'pysam', 'termcolor', 'Bio',
'matplotlib', 'matplotlib.figure', 'matplotlib.backends.backend_agg',
'mpl_toolkits.axes_grid1', 'mpl_toolkits.mplot3d',
'scipy', 'scipy.special', 'scipy.stats', 'scipy.spatial', 'igraph',
'scipy.ndimage',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
autodoc_mock_imports = MOCK_MODULES
# Call doxygen
root = os.path.dirname(os.path.dirname(__file__))
cmd = 'cd %s && rm -rf docs/_build && make docs/doxygen' % root
subprocess.call(cmd, shell=True)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxcontrib.wiki',
'breathe',
]
# version requirements
needs_extensions = {
'sphinxcontrib.wiki': '0.5.0',
}
autodoc_member_order = 'bysource'
autoclass_content = 'both' # appends `__init__.__doc__` to class docstring.
# wiki configuration
wiki_enabled = True
# Breathe configuration
breathe_projects = {"biseqt": "doxygen/xml"}
breathe_default_project = "biseqt"
breathe_domain_by_extension = {"h": "c", "c": "c"}
breathe_default_members = ('members', 'undoc-members')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'biseqt'
copyright = u'2018, Amir Kadivar'
author = u'Amir Kadivar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {'*': ['searchbox.html', 'navigation.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'biseqt.tex', u'biseqt Documentation',
u'Amir Kadivar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'biseqt', u'biseqt Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'biseqt', u'biseqt Documentation',
author, 'Amir Kadivar', 'BiSeqt is a biological sequence alignment tool.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
amirkdv/biseqt
|
docs/conf.py
|
Python
|
bsd-3-clause
| 11,019
|
[
"pysam"
] |
75357687f5d86a3353f0d143878571c786e484c6162383d098e0db109e120cc0
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Dict
from typing import Union
from ORCA.scripts.BaseScript import cBaseScript
import ORCA.Globals as Globals
class cKeyhandlerTemplate(cBaseScript):
""" template class for discover scripts """
def __init__(self):
cBaseScript.__init__(self)
self.uType:str = u'KEYHANDLER'
self.iHash:int = 0
def RunScript(self, *args, **kwargs) -> Union[Dict,None]:
""" main entry point to run the script """
if 'register' in args or kwargs.get("caller")=="appstart":
return self.Register(*args,**kwargs)
elif "unregister" in args:
return self.UnRegister(*args,**kwargs)
return None
# noinspection PyUnusedLocal
def Register(self,*args,**kwargs) -> None:
self.iHash=Globals.oNotifications.RegisterNotification(uNotification="on_key",fNotifyFunction=self.HandleKey,uDescription=self.uObjectName, aValueLinks=[{"in":"key","out":"key"}])
return None
# noinspection PyUnusedLocal
def UnRegister(self,*args,**kwargs) -> None:
Globals.oNotifications.UnRegisterNotification_ByHash(iHash=self.iHash)
return None
def HandleKey(self,**kwargs) -> Dict[str,str]:
return {}
|
thica/ORCA-Remote
|
src/ORCA/scripttemplates/Template_Keyhandler.py
|
Python
|
gpl-3.0
| 2,115
|
[
"ORCA"
] |
40472989ef6300896d554933b2cef04c3f15cad7e3329c54ca465b9dcf72ca6f
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.eagle.starformation Post processing data from EAGLE simulation results.
#
# This module incorporates post processing functions to handle EAGLE output, for use in PTS.
#
# James Trayford - 26/01/2015
# -----------------------------------------------------------------
# Import standard modules
import numpy as np
# -----------------------------------------------------------------
## Reads a hdf5 field's attributes into a python dictionary.
def readAttrs(hdf, field='Header'):
fieldobj = hdf[field]
headkeys = list(fieldobj.attrs)
values = []
for i in headkeys:
values.append(fieldobj.attrs[str(i)])
return dict(zip(headkeys, values))
# -----------------------------------------------------------------
## Reads the Schmidt parameters into a python structure.
def schmidtParameters(consts, runpars):
# extract relevent unit conversions
CM_PER_MPC = consts['CM_PER_MPC']
GAMMA = consts['GAMMA']
GRAV = consts['GRAVITY']
K_B = consts['BOLTZMANN']
M_PROTON = consts['PROTONMASS']
M_SUN = consts['SOLAR_MASS']
SEC_PER_YEAR = consts['SEC_PER_YEAR']
# extract relevent runtime parameters used to create EAGLE snapshot
GammaEff = runpars['EOS_Jeans_GammaEffective']
InitH = runpars['InitAbundance_Hydrogen']
RhoHi = runpars['SF_SchmidtLawHighDensThresh_HpCM3']
RhoNorm = runpars['EOS_NormPhysDens_HpCM3']
SchmidtCoeff = runpars['SF_SchmidtLawCoeff_MSUNpYRpKPC2']
SchmidtExp = runpars['SF_SchmidtLawExponent']
SchmidtExpHi = runpars['SF_SchmidtLawHighDensExponent']
T_JeansNorm = runpars['EOS_Jeans_TempNorm_K']
# Normalisation in cgs units
Norm_cgs = SchmidtCoeff * pow(pow(CM_PER_MPC / 1.e6, 2) / M_SUN , SchmidtExp - 1) / (1.e6 * SEC_PER_YEAR)
# High density Threshold
RhoHi_cgs = RhoHi * M_PROTON / InitH
# Density normalisation in cgs
RhoNorm_cgs = RhoNorm * M_PROTON / InitH
# Min total Pressure
P_totc = RhoNorm * T_JeansNorm * K_B / (InitH * 1.22)
# Pressure at high density Schmidt law break
PBreak_cgs = P_totc * (RhoHi/RhoNorm) ** GammaEff
# Assume f_g = 1
NormHi_cgs = Norm_cgs * (GAMMA * PBreak_cgs / GRAV) ** ((SchmidtExp - SchmidtExpHi) * 0.5)
# tuple of universal SF parameters
sfparams = RhoNorm_cgs, RhoHi_cgs, P_totc, PBreak_cgs, GammaEff
# tuples of high and low pressure SF parameters
sf_lo = Norm_cgs, GAMMA/GRAV, SchmidtExp
sf_hi = NormHi_cgs, GAMMA/GRAV, SchmidtExpHi
return sfparams, sf_lo, sf_hi
# -----------------------------------------------------------------
## Function to obtain SFR of gas from which star particles formed.
#
# Inputs:
# - rho_form: gas density at formation of star particle
# - mass: mass of star particle
# - schmidtpars: parameters for implementing Schmidt law from schmidtParameters()
#
# Outputs:
# - SFR = Star formation rate for gas particle in input mass units per year
#
def getSFR(rho_form, mass, schmidtpars):
# unpack universal SF law parameters
RhoNorm_cgs, RhoHi_cgs, P_totc, PBreak_cgs, GammaEff = schmidtpars[0]
# Pressure at star formation
P_form = P_totc * (rho_form / RhoNorm_cgs) ** GammaEff
# unpack high and low pressure SF law parameters
sf_lo, sf_hi = schmidtpars[1:]
# calculate SFR
if type(rho_form) == np.ndarray:
hidx = rho_form > RhoHi_cgs
SFR = np.zeros(rho_form.size)
if np.any(hidx):
SFR[hidx] = mass[hidx] * sf_hi[0] * (sf_hi[1] * P_form[hidx]) ** ((sf_hi[2] - 1) * 0.5)
if np.any(-hidx):
SFR[-hidx] = mass[-hidx] * sf_lo[0] * (sf_lo[1] * P_form[-hidx]) ** ((sf_lo[2] - 1) * 0.5)
else:
if rho_form > RhoHi_cgs:
SFR = mass * sf_hi[0] * (sf_hi[1] * P_form) ** ((sf_hi[2] - 1) * 0.5)
else:
SFR = mass * sf_lo[0] * (sf_lo[1] * P_form) ** ((sf_lo[2] - 1) * 0.5)
# return SFR converted to input mass units per year from per second
return np.array(SFR) * 3.15569e7
# -----------------------------------------------------------------
## Function to obtain ambient pressure of gas from which star particles formed.
#
# Inputs:
# - rho: gas density of star forming particle
# - schmidtpars: parameters for implementing Schmidt law from schmidtParameters()
#
# Outputs:
# - P_tot: Ambient pressure from polytropic effective EoS (Schaye & Dalla Vecchia (2004))
#
def getPtot(rho, schmidtpars):
RhoNorm_cgs, RhoHi_cgs, P_totc, PBreak_cgs, GammaEff = schmidtpars[0]
P_form = P_totc * (rho / RhoNorm_cgs) ** GammaEff
return P_form
# -----------------------------------------------------------------
## Function to sample a star forming gas particles into a number of sub-particles.
#
# Inputs:
# - sfr: star formation rate in solar masses per yr
# - m_gas: particle mass in solar masses
#
# Outputs:
# - nested arrays with a list of subparticles for each parent input particle:
# - ms: sub-particle stellar masses in solar masses
# - ts: lookback times of sub-particle formation
# - idxs: index of the sub-particle's parent particle in input array
# - mdiffs: mass of parent particles locked up in new stars; this can be subtracted from the parent gas
# particles for mass conservation
#
def stochResamp(sfr, m_gas):
# mass resampling parameters (see Kennicutt & Evans 2012 section 2.5)
m_min = 700 # minimum mass of sub-particle in M_solar
m_max = 1e6 # maximum mass of sub-particle in M_solar
alpha = 1.8 # exponent of power-law mass function
alpha1 = 1. - alpha
# age resampling parameters
thresh_age = 1e8 # period over which to resample in yr (100 Myr)
# initialise lists for output
ms = [[]]
ts = [[]]
idxs = [[]]
mdiffs = []
# for each parent particle, determine the star-forming sub-particles
for i in range(sfr.size):
sfri = sfr[i]
mi = m_gas[i]
# determine the maximum number of sub-particles based on the minimum sub-particle mass
N = int(max(1,np.ceil(mi/m_min)))
# generate random sub-particle masses from a power-law distribution between min and max values
X = np.random.random(N)
m = (m_min**alpha1 + X*(m_max**alpha1-m_min**alpha1))**(1./alpha1)
# limit and normalize the list of sub-particles to the total mass of the parent
mlim = m[np.cumsum(m)<=mi]
if len(mlim)<1: mlim = m[:1]
m = mi/mlim.sum() * mlim
N = len(m)
# generate random decay lookback time for each sub-particle
X = np.random.random(N) # X in range (0,1]
t = thresh_age + mi/sfri * np.log(1-X)
# determine mask for sub-particles that form stars by present day
issf = t > 0.
# add star-forming sub-particles to the output lists
ms.append(m[issf])
ts.append(t[issf])
idxs.append([i]*np.count_nonzero(issf))
mdiffs.append(m[issf].sum())
# convert sub-particle lists into numpy arrays
ms = np.hstack(ms)
ts = np.hstack(ts)
idxs = np.hstack(idxs).astype(int)
mdiffs = np.array(mdiffs)
return ms, ts, idxs, mdiffs
# -----------------------------------------------------------------
## Function to randomly shift the positions of HII region sub-particles within the smoothing sphere of their parent
#
# Arguments:
# - r: parent positions; updated by this function to the shifted positions
# - h: the smoothing lengths of the parents
# - h_mapp: the smoothing lengths of the sub-particles
#
def stochShiftPos(r, h, h_mapp):
# the offset sampling smoothing length is determined so that in the limit of infinite particles,
# the light distribution is the same as the parent particle kernel;
# assuming Gaussian kernels this means h_sampling**2 + h_mapp**2 = h**2.
h_sampling = np.sqrt(np.maximum(0,h*h - h_mapp*h_mapp))
# sample the offset from a scaled gaussian that resembles a cubic spline kernel
# (see the documentation of the SPHDustDistribution class in SKIRT)
r[:,0] += h_sampling * np.random.normal(scale=0.29, size=h_sampling.shape)
r[:,1] += h_sampling * np.random.normal(scale=0.29, size=h_sampling.shape)
r[:,2] += h_sampling * np.random.normal(scale=0.29, size=h_sampling.shape)
# -----------------------------------------------------------------
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/eagle/starformation.py
|
Python
|
mit
| 8,784
|
[
"Gaussian"
] |
bc08a7efb117da64ec4dacbc36cc941790814da16bc5bb2d2a5d3569e57d2e49
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import tests_common
import espressomd
import espressomd.interactions
class Non_bonded_interactionsTests(ut.TestCase):
system = espressomd.System(box_l=[20.0, 20.0, 20.0])
def intersMatch(self, inType, outInter, inParams, outParams, msg_long):
"""Check, if the interaction type set and gotten back as well as the
bond parameters set and gotten back match. Only check keys present in
``inParams``.
"""
self.assertIsInstance(outInter, inType)
tests_common.assert_params_match(self, inParams, outParams, msg_long)
def parameterKeys(self, interObject):
"""
Check :meth:`~espressomd.interactions.NonBondedInteraction.valid_keys`
and :meth:`~espressomd.interactions.NonBondedInteraction.required_keys`
return sets, and that
:meth:`~espressomd.interactions.NonBondedInteraction.default_params`
returns a dictionary with the correct keys.
Parameters
----------
interObject: instance of a class derived from :class:`espressomd.interactions.NonBondedInteraction`
Object of the interaction to test, e.g.
:class:`~espressomd.interactions.LennardJonesInteraction`
"""
classname = interObject.__class__.__name__
valid_keys = interObject.valid_keys()
required_keys = interObject.required_keys()
default_keys = set(interObject.default_params().keys())
self.assertIsInstance(valid_keys, set,
"{}.valid_keys() must return a set".format(
classname))
self.assertIsInstance(required_keys, set,
"{}.required_keys() must return a set".format(
classname))
self.assertTrue(default_keys.issubset(valid_keys),
"{}.default_params() has unknown parameters: {}".format(
classname, default_keys.difference(valid_keys)))
self.assertTrue(default_keys.isdisjoint(required_keys),
"{}.default_params() has extra parameters: {}".format(
classname, default_keys.intersection(required_keys)))
self.assertSetEqual(default_keys, valid_keys - required_keys,
"{}.default_params() should have keys: {}, got: {}".format(
classname, valid_keys - required_keys, default_keys))
def generateTestForNon_bonded_interaction(
_partType1, _partType2, _interClass, _params, _interName):
"""Generates test cases for checking interaction parameters set and
gotten back from the espresso core actually match those in the Python
classes. Only keys which are present in ``_params`` are checked.
Parameters
----------
_partType1, _partType2: :obj:`int`
Particle type ids to check on
_interClass: class derived from :class:`espressomd.interactions.NonBondedInteraction`
Class of the interaction to test, e.g.
:class:`~espressomd.interactions.LennardJonesInteraction`
_params: :obj:`dict`
Interaction parameters, e.g. ``{"k": 1., "r_0": 0}``
_interName: :obj:`str`
Name of the interaction property to set (e.g. ``"lennard_jones"``)
"""
partType1 = _partType1
partType2 = _partType2
interClass = _interClass
params = _params
interName = _interName
def func(self):
# This code is run at the execution of the generated function.
# It will use the state of the variables in the outer function,
# which was there, when the outer function was called
# Set parameters
getattr(self.system.non_bonded_inter[partType1, partType2],
interName).set_params(**params)
# Read them out again
outInter = getattr(
self.system.non_bonded_inter[partType1, partType2], interName)
outParams = outInter.get_params()
self.intersMatch(
interClass, outInter, params, outParams,
"{}: value set and value gotten back differ for particle types {} and {}: {} vs. {}"
.format(interClass(**params).type_name(), partType1, partType2,
params, outParams))
self.parameterKeys(outInter)
return func
if espressomd.has_features(["LENNARD_JONES"]):
test_lj1 = generateTestForNon_bonded_interaction(
0, 0, espressomd.interactions.LennardJonesInteraction,
{"epsilon": 1., "sigma": 2., "cutoff": 3.,
"shift": 4., "offset": 5., "min": 7.},
"lennard_jones")
test_lj2 = generateTestForNon_bonded_interaction(
0, 0, espressomd.interactions.LennardJonesInteraction,
{"epsilon": 1.3, "sigma": 2.2, "cutoff": 3.4,
"shift": 4.1, "offset": 5.1, "min": 7.1},
"lennard_jones")
test_lj3 = generateTestForNon_bonded_interaction(
0, 0, espressomd.interactions.LennardJonesInteraction,
{"epsilon": 1.3, "sigma": 2.2, "cutoff": 3.4,
"shift": 4.1, "offset": 5.1, "min": 7.1},
"lennard_jones")
if espressomd.has_features(["LENNARD_JONES_GENERIC"]):
test_ljgen1 = generateTestForNon_bonded_interaction(
0, 0, espressomd.interactions.GenericLennardJonesInteraction,
{"epsilon": 1., "sigma": 2., "cutoff": 3., "shift": 4.,
"offset": 5., "e1": 7, "e2": 8, "b1": 9., "b2": 10.},
"generic_lennard_jones")
test_ljgen2 = generateTestForNon_bonded_interaction(
0, 0, espressomd.interactions.GenericLennardJonesInteraction,
{"epsilon": 1.1, "sigma": 2.1, "cutoff": 3.1, "shift": 4.1,
"offset": 5.1, "e1": 71, "e2": 81, "b1": 9.1, "b2": 10.1},
"generic_lennard_jones")
test_ljgen3 = generateTestForNon_bonded_interaction(
0, 0, espressomd.interactions.GenericLennardJonesInteraction,
{"epsilon": 1.2, "sigma": 2.2, "cutoff": 3.2, "shift": 4.2,
"offset": 5.2, "e1": 72, "e2": 82, "b1": 9.2, "b2": 10.2},
"generic_lennard_jones")
if espressomd.has_features(["GAY_BERNE"]):
test_gb = generateTestForNon_bonded_interaction(
0, 0, espressomd.interactions.GayBerneInteraction,
{"eps": 1.0, "sig": 1.0, "cut": 4.0, "k1": 3.0,
"k2": 5.0, "mu": 2.0, "nu": 1.0},
"gay_berne")
@utx.skipIfMissingFeatures("LENNARD_JONES")
def test_exceptions(self):
err_msg_required = (r"The following keys have to be given as keyword arguments: "
r"\['cutoff', 'epsilon', 'shift', 'sigma'\], got "
r"\['epsilon', 'sigma'\] \(missing \['cutoff', 'shift'\]\)")
err_msg_valid = (r"Only the following keys can be given as keyword arguments: "
r"\['cutoff', 'epsilon', 'min', 'offset', 'shift', 'sigma'\], got "
r"\['cutoff', 'epsilon', 'shift', 'sigma', 'unknown'\] \(unknown \['unknown'\]\)")
with self.assertRaisesRegex(ValueError, err_msg_required):
espressomd.interactions.LennardJonesInteraction(
epsilon=1., sigma=2.)
with self.assertRaisesRegex(ValueError, err_msg_required):
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1., sigma=2.)
with self.assertRaisesRegex(ValueError, err_msg_valid):
espressomd.interactions.LennardJonesInteraction(
epsilon=1., sigma=2., cutoff=3., shift=4., unknown=5.)
with self.assertRaisesRegex(ValueError, err_msg_valid):
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1., sigma=2., cutoff=3., shift=4., unknown=5.)
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/interactions_non-bonded_interface.py
|
Python
|
gpl-3.0
| 8,780
|
[
"ESPResSo"
] |
f58f80c38f40a2719b5b17e76b8b1e07b67e0d0db5b4e48e58a982a72ca9cd73
|
# -*- coding: utf-8 -*-
import os
class AdoptionCenter:
"""
The AdoptionCenter class stores the important information that a
client would need to know about, such as the different numbers of
species stored, the location, and the name. It also has a method to adopt a pet.
"""
def __init__(self, name, species_types, location):
self.name = name
self.location = location
self.species_types = species_types
def get_number_of_species(self, animal):
return self.species_types.get(animal, 0)
def get_location(self):
return (float(self.location[0]), float(self.location[1]))
def get_species_count(self):
species_types = dict()
for k, v in self.species_types.items():
if v > 0:
species_types[k] = v
return species_types
def get_name(self):
return self.name
def adopt_pet(self, species):
if species in self.species_types:
self.species_types[species] -= 1
class Adopter:
"""
Adopters represent people interested in adopting a species.
They have a desired species type that they want, and their score is
simply the number of species that the shelter has of that species.
"""
def __init__(self, name, desired_species):
self.name = name
self.desired_species = desired_species
def get_name(self):
return self.name
def get_desired_species(self):
return self.desired_species
def get_score(self, adoption_center):
return 1.0 * adoption_center.get_species_count().get(self.desired_species, 0)
class FlexibleAdopter(Adopter):
"""
A FlexibleAdopter still has one type of species that they desire,
but they are also alright with considering other types of species.
considered_species is a list containing the other species the adopter will consider
Their score should be 1x their desired species + .3x all of their desired species
"""
def __init__(self, name, desired_species, considered_species):
Adopter.__init__(self, name, desired_species)
self.considered_species = considered_species
def get_score(self, adoption_center):
num_other = 0.3 * sum([adoption_center.get_species_count().get(c, 0) for c in self.considered_species])
return Adopter.get_score(self, adoption_center) + num_other
class FearfulAdopter(Adopter):
"""
A FearfulAdopter is afraid of a particular species of animal.
If the adoption center has one or more of those animals in it, they will
be a bit more reluctant to go there due to the presence of the feared species.
Their score should be 1x number of desired species - .3x the number of feared species
"""
def __init__(self, name, desired_species, feared_species):
Adopter.__init__(self, name, desired_species)
self.feared_species = feared_species
def get_score(self, adoption_center):
num_feared = 0.3 * adoption_center.get_species_count().get(self.feared_species, 0)
result = Adopter.get_score(self, adoption_center) - num_feared
if result > 0:
return result
else:
return 0.0
class AllergicAdopter(Adopter):
"""
An AllergicAdopter is extremely allergic to a one or more species and cannot
even be around it a little bit! If the adoption center contains one or more of
these animals, they will not go there.
Score should be 0 if the center contains any of the animals, or 1x number of desired animals if not
"""
def __init__(self, name, desired_species, allergic_species):
Adopter.__init__(self, name, desired_species)
self.allergic_species = allergic_species
def get_score(self, adoption_center):
for species in self.allergic_species:
if adoption_center.get_species_count().get(species, 0) > 0:
return 0.0
return Adopter.get_score(self, adoption_center)
class MedicatedAllergicAdopter(AllergicAdopter):
"""
A MedicatedAllergicAdopter is extremely allergic to a particular species
However! They have a medicine of varying effectiveness, which will be given in a dictionary
To calculate the score for a specific adoption center, we want to find what is the most allergy-inducing species that the adoption center has for the particular MedicatedAllergicAdopter.
To do this, first examine what species the AdoptionCenter has that the MedicatedAllergicAdopter is allergic to, then compare them to the medicine_effectiveness dictionary.
Take the lowest medicine_effectiveness found for these species, and multiply that value by the Adopter's calculate score method.
"""
def __init__(self, name, desired_species, allergic_species, medicine_effectiveness):
AllergicAdopter.__init__(self, name, desired_species, allergic_species)
self.medicine_effectiveness = medicine_effectiveness
def get_score(self, adoption_center):
min_med_effect = 1.0
for species in self.allergic_species:
if species in adoption_center.get_species_count():
med_effect = self.medicine_effectiveness.get(species, 0)
if med_effect < min_med_effect:
min_med_effect = med_effect
return Adopter.get_score(self, adoption_center) * min_med_effect
class SluggishAdopter(Adopter):
"""
A SluggishAdopter really dislikes travelleng. The further away the
AdoptionCenter is linearly, the less likely they will want to visit it.
Since we are not sure the specific mood the SluggishAdopter will be in on a
given day, we will asign their score with a random modifier depending on
distance as a guess.
Score should be
If distance < 1 return 1 x number of desired species
elif distance < 3 return random between (.7, .9) times number of desired species
elif distance < 5. return random between (.5, .7 times number of desired species
else return random between (.1, .5) times number of desired species
"""
def __init__(self, name, desired_species, location):
Adopter.__init__(self, name, desired_species)
self.location = location
def get_linear_distance(self, to_location):
import math
return math.sqrt((to_location[0] - self.location[0])**2 + (to_location[1] - self.location[1])**2)
def get_score(self, adoption_center):
import random
distance = self.get_linear_distance(adoption_center.get_location())
if distance < 1:
return 1 * Adopter.get_score(self, adoption_center)
if 1 <= distance < 3:
return random.uniform(0.7, 0.9) * Adopter.get_score(self, adoption_center)
if 3 <= distance < 5:
return random.uniform(0.5, 0.7) * Adopter.get_score(self, adoption_center)
if distance >= 5:
return random.uniform(0.1, 0.5) * Adopter.get_score(self, adoption_center)
def get_ordered_adoption_center_list(adopter, list_of_adoption_centers):
"""
The method returns a list of an organized adoption_center such that the scores for each AdoptionCenter to the Adopter will be ordered from highest score to lowest score.
"""
def get_score_key(x):
return adopter.get_score(x)
def get_name_key(x):
return x.get_name()
s = sorted(list_of_adoption_centers, key=get_name_key)
return sorted(s, key=get_score_key, reverse=True)
def get_adopters_for_advertisement(adoption_center, list_of_adopters, n):
"""
The function returns a list of the top n scoring Adopters from list_of_adopters (in numerical order of score)
"""
def get_score_key(x):
return x.get_score(adoption_center)
def get_name_key(x):
return x.get_name()
s1 = sorted(list_of_adopters, key=get_name_key)
s2 = sorted(s1, key=get_score_key, reverse=True)
if n > len(s2): return s2
return s2[:n + 1]
adopter = MedicatedAllergicAdopter("One", "Cat", ['Dog', 'Horse'], {"Dog": .5, "Horse": 0.2})
adopter2 = Adopter("Two", "Cat")
adopter3 = FlexibleAdopter("Three", "Horse", ["Lizard", "Cat"])
adopter4 = FearfulAdopter("Four","Cat","Dog")
adopter5 = SluggishAdopter("Five","Cat", (1,2))
adopter6 = AllergicAdopter("Six", "Cat", "Dog")
ac = AdoptionCenter("Place1", {"Mouse": 12, "Dog": 2}, (1,1))
ac2 = AdoptionCenter("Place2", {"Cat": 12, "Lizard": 2}, (3,5))
ac3 = AdoptionCenter("Place3", {"Horse": 25, "Dog": 9}, (-2,10))
# how to test get_adopters_for_advertisement
o = get_adopters_for_advertisement(ac3, [adopter, adopter2, adopter3, adopter4, adopter5, adopter6], 10)
# you can print the name and score of each item in the list returned
for i in o:
print (i.get_score(ac3), i.get_name())
adopter4 = FearfulAdopter("Four","Cat","Dog")
adopter5 = SluggishAdopter("Five","Cat", (1,2))
adopter6 = AllergicAdopter("Six", "Lizard", "Cat")
ac = AdoptionCenter("Place1", {"Cat": 12, "Dog": 2}, (1,1))
ac2 = AdoptionCenter("Place2", {"Cat": 12, "Lizard": 2}, (3,5))
ac3 = AdoptionCenter("Place3", {"Cat": 40, "Dog": 4}, (-2,10))
ac4 = AdoptionCenter("Place4", {"Cat": 33, "Horse": 5}, (-3,0))
ac5 = AdoptionCenter("Place5", {"Cat": 45, "Lizard": 2}, (8,-2))
ac6 = AdoptionCenter("Place6", {"Cat": 23, "Dog": 7, "Horse": 5}, (-10,10))
ac7 = AdoptionCenter("Place7", {"Cat": 12, "Dog": 2}, (1,1))
# how to test get_ordered_adoption_center_list
p = get_ordered_adoption_center_list(adopter4, [ac,ac2,ac3,ac4,ac5,ac6,ac7])
# you can print the name and score of each item in the list returned
for i in p:
print (i.get_name(), adopter2.get_score(i))
# print (i.get_name(), adopter5.get_score(i))
# print (i.get_name(), adopter6.get_score(i))
os.system("pause")
|
NicovincX2/Python-3.5
|
Divers/pets_adoption.py
|
Python
|
gpl-3.0
| 9,562
|
[
"VisIt"
] |
998f7b14da19e11412414e0f3091390b11bb2e3210a643cc8b2285e8fbfb7b7e
|
from collections import deque
from rdkit import Chem
import sys
import tensorflow as tf
import pickle
import os
import fnmatch
import numpy as np
from scipy.spatial.distance import pdist, squareform
import pandas as pd
from deepchem.feat.base_classes import Featurizer
from deepchem.feat.graph_features import atom_features
from scipy.sparse import csr_matrix
def get_atom_type(atom):
elem = atom.GetAtomicNum()
hyb = str(atom.GetHybridization).lower()
if elem == 1:
return (0)
if elem == 4:
return (1)
if elem == 5:
return (2)
if elem == 6:
if "sp2" in hyb:
return (3)
elif "sp3" in hyb:
return (4)
else:
return (5)
if elem == 7:
if "sp2" in hyb:
return (6)
elif "sp3" in hyb:
return (7)
else:
return (8)
if elem == 8:
if "sp2" in hyb:
return (9)
elif "sp3" in hyb:
return (10)
else:
return (11)
if elem == 9:
return (12)
if elem == 15:
if "sp2" in hyb:
return (13)
elif "sp3" in hyb:
return (14)
else:
return (15)
if elem == 16:
if "sp2" in hyb:
return (16)
elif "sp3" in hyb:
return (17)
else:
return (18)
if elem == 17:
return (19)
if elem == 35:
return (20)
if elem == 53:
return (21)
return (22)
def get_atom_adj_matrices(mol,
n_atom_types,
max_n_atoms=200,
max_valence=4,
graph_conv_features=True,
nxn=True):
if not graph_conv_features:
bond_matrix = np.zeros((max_n_atoms, 4 * max_valence)).astype(np.uint8)
if nxn:
adj_matrix = np.zeros((max_n_atoms, max_n_atoms)).astype(np.uint8)
else:
adj_matrix = np.zeros((max_n_atoms, max_valence)).astype(np.uint8)
adj_matrix += (adj_matrix.shape[0] - 1)
if not graph_conv_features:
atom_matrix = np.zeros((max_n_atoms, n_atom_types + 3)).astype(np.uint8)
atom_matrix[:, atom_matrix.shape[1] - 1] = 1
atom_arrays = []
for a_idx in range(0, mol.GetNumAtoms()):
atom = mol.GetAtomWithIdx(a_idx)
if graph_conv_features:
atom_arrays.append(atom_features(atom))
else:
atom_type = get_atom_type(atom)
atom_matrix[a_idx][-1] = 0
atom_matrix[a_idx][atom_type] = 1
for n_idx, neighbor in enumerate(atom.GetNeighbors()):
if nxn:
adj_matrix[a_idx][neighbor.GetIdx()] = 1
adj_matrix[a_idx][a_idx] = 1
else:
adj_matrix[a_idx][n_idx] = neighbor.GetIdx()
if not graph_conv_features:
bond = mol.GetBondBetweenAtoms(a_idx, neighbor.GetIdx())
bond_type = str(bond.GetBondType()).lower()
if "single" in bond_type:
bond_order = 0
elif "double" in bond_type:
bond_order = 1
elif "triple" in bond_type:
bond_order = 2
elif "aromatic" in bond_type:
bond_order = 3
bond_matrix[a_idx][(4 * n_idx) + bond_order] = 1
if graph_conv_features:
n_feat = len(atom_arrays[0])
atom_matrix = np.zeros((max_n_atoms, n_feat)).astype(np.uint8)
for idx, atom_array in enumerate(atom_arrays):
atom_matrix[idx, :] = atom_array
else:
atom_matrix = np.concatenate(
[atom_matrix, bond_matrix], axis=1).astype(np.uint8)
return (adj_matrix.astype(np.uint8), atom_matrix.astype(np.uint8))
def featurize_mol(mol, n_atom_types, max_n_atoms, max_valence):
adj_matrix, atom_matrix = get_atom_adj_matrices(mol, n_atom_types,
max_n_atoms, max_valence)
return ((adj_matrix, atom_matrix))
class AdjacencyFingerprint(Featurizer):
def __init__(self,
n_atom_types=23,
max_n_atoms=200,
add_hydrogens=False,
max_valence=4):
self.n_atom_types = n_atom_types
self.max_n_atoms = max_n_atoms
self.add_hydrogens = add_hydrogens
self.max_valence = max_valence
def featurize(self, rdkit_mols):
featurized_mols = np.empty((len(rdkit_mols)), dtype=object)
for idx, mol in enumerate(rdkit_mols):
if self.add_hydrogens:
mol = Chem.AddHs(mol)
featurized_mol = featurize_mol(mol, self.n_atom_types, self.max_n_atoms,
self.max_valence)
featurized_mols[idx] = featurized_mol
return (featurized_mols)
|
rbharath/deepchem
|
deepchem/feat/adjacency_fingerprints.py
|
Python
|
mit
| 4,395
|
[
"RDKit"
] |
c453fdd643118622633b8f12ef6698a659c47a95584a2580f9229f65a6da6644
|
# $Id$
#
# Copyright (C) 2005-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" uses DSViewer to interact with molecules
"""
from rdkit import Chem
from win32com.client import Dispatch
import tempfile, os
_nextDisplayId = 1
class Displayable(object):
def __init__(self, doc, id=-1):
global _nextDisplayId
if id < 0:
id = _nextDisplayId
_nextDisplayId += 1
self.doc = doc
self.id = id
self.visible = True
self.children = []
def Select(self, atoms=[], state=True, recurse=False):
if state:
selText = 'true'
else:
selText = 'false'
if not atoms or atoms == '*':
atomStr = '; atom "*"'
else:
# DSViewer has atom ids from 1, we do it from 0:
atoms = ['id=%d' % (x) for x in atoms]
atomStr = '; atom %s' % ','.join(atoms)
cmd = 'SetProperty object RD_Visual=%d %s: select=%s' % (self.id, atomStr, selText)
r = int(str(self.doc.DoCommand(cmd)))
if not r and not atoms:
# this handles an annoying case where if you try to select
# a molecule by ID in DSViewer, you get nothing selected:
atomStr = ''
cmd = 'SetProperty object RD_Visual=%d %s: select=%s' % (self.id, atomStr, selText)
r = int(str(self.doc.DoCommand(cmd)))
#print 'sel cmd:',cmd
#print 'result:', r
# stupid DSViewer will select the bonds between pairs of highlighted atoms,
# stop that nonsense right now:
if r:
cmd = 'SetProperty object RD_Visual=%d; bond index="*": select=off' % (self.id)
self.doc.DoCommand(cmd)
if recurse:
for child in self.children:
child.Select(atoms=atoms, state=state, recurse=True)
return r
def Hide(self, recurse=True):
self.Select(state=True, recurse=True)
self.doc.DoCommand('hide')
self.Select(state=False, recurse=True)
def Show(self, recurse=True):
self.Select(state=True, recurse=True)
self.doc.DoCommand('Show')
self.Select(state=False, recurse=True)
def ShowOnly(self, recurse=True):
self.doc.DoCommand('HideAll')
self.Select(state=True, recurse=True)
self.doc.DoCommand('Show')
self.Select(state=False, recurse=True)
def __del__(self):
self.doc.DoCommand('UnselectAll')
count = self.Select(state=True, recurse=True)
if count:
self.doc.DoCommand('Delete')
class MolViewer(object):
def __init__(self, force=0, title='Untitled', **kwargs):
self.app = Dispatch('WebLabViewerPro.Application')
self.app.Visible = 1
if force or self.app.ActiveDocument is None:
self.doc = self.app.New(title)
else:
self.doc = self.app.ActiveDocument
self.displayables = {}
def DeleteAll(self):
self.doc.DoCommand('SelectAll')
self.doc.DoCommand('Delete')
self.displayables = {}
def DeleteAllExcept(self, excludes):
excludes = [x.lower() for x in excludes]
allNames = self.displayables.keys()
for nm in allNames:
if nm not in excludes:
del self.displayables[nm]
def ShowMol(self, mol, name='molecule', showOnly=True, highlightFeatures=[], molB="", confId=-1,
zoom=True):
if showOnly:
self.DeleteAll()
obj = None
else:
obj = self.displayables.get(name.lower(), None)
#if obj:
# obj.Select(state=True)
# self.doc.DoCommand('Delete')
# obj.Select(state=False)
if not molB:
molB = Chem.MolToMolBlock(mol, confId=confId)
tmp = name + "\n" + molB[molB.index('\n') + 1:]
molB = tmp
if not obj:
obj = Displayable(self.doc)
if not hasattr(obj, '_molBlock') or obj._molBlock != molB:
obj._molBlock = molB
with tempfile.NamedTemporaryFile('w+', suffix='.mol') as tmpf:
tmpf.write(molB)
self.doc.DoCommand('PasteFrom %s' % tmp.name)
self.doc.DoCommand('SetProperty molecule id=0 : RD_Visual=%d' % (obj.id))
self.doc.DoCommand('SetProperty molecule id=0 : id=%d' % (obj.id))
self.doc.DoCommand('SetProperty molecule id=0 : select=off')
else:
obj.Select(state=True)
self.doc.DoCommand('Show')
self.displayables[name.lower()] = obj
if zoom:
self.doc.DoCommand('Center')
self.doc.DoCommand('FitView')
return
def LoadFile(self, filename, name, showOnly=False):
if showOnly:
self.DeleteAll()
self.doc.DoCommand('PasteFrom %s' % filename)
obj = Displayable(self.doc)
self.doc.DoCommand('SetProperty molecule id=0 : id=%d' % (obj.id))
self.doc.DoCommand('SetProperty molecule id=0 : select=off')
count = self.doc.DoCommand('SetProperty AminoAcidChain id=0 : RD_Visual=%d' % (obj.id))
if not count or int(count) <= 0:
count = self.doc.DoCommand('SetProperty molecule id=0 : RD_Visual=%d' % (obj.id))
self.displayables[name.lower()] = obj
return obj
def GetSelectedAtoms(self, whichSelection=''):
#print 'WHICH',repr(whichSelection), whichSelection.lower() in self.displayables
if not whichSelection:
d = str(self.doc.DoCommand('GetPropertyValue atom select=true: id=?'))
d2 = str(self.doc.DoCommand('GetPropertyValue atom select=true: molecule=?'))
if d2:
molIds = []
tmpD = {}
for id in d2.split(','):
id = int(id.split('/')[1]) + 1
if id in tmpD:
molIds.append(tmpD[id])
else:
for k, v in self.displayables.iteritems():
if id == v.id:
tmpD[id] = k
molIds.append(k)
else:
molIds = [''] * (d.count(',') + 1)
elif whichSelection.lower() in self.displayables:
whichSelection = whichSelection.lower()
whichSelection = self.displayables[whichSelection].id
d = str(
self.doc.DoCommand('GetPropertyValue molecule RD_Visual=%d; atom select=true: id=?' %
whichSelection))
molIds = [whichSelection] * (d.count(',') + 1)
else:
d = None
molIds = None
if d:
splitD = d.split(',')
#print 'splitD:',splitD
#print 'molIds:',molIds
try:
res = []
for i in range(len(splitD)):
# DSViewer has atom ids from 1, we do it from 0:
idx = int(splitD[i])
res.append((molIds[i], idx))
except Exception:
import traceback
traceback.print_exc()
res = []
else:
res = []
return res
def HighlightAtoms(self, indices, where, extraHighlight=False):
self.doc.DoCommand('UnSelectAll')
self.SelectAtoms(where, indices)
def SelectAtoms(self, itemId, atomIndices, selName='selection'):
self.doc.DoCommand('UnSelectAll')
self.doc.DoCommand('SetProperty atom id="*": select=off')
o = self.displayables.get(itemId.lower(), None)
#print 'O:',itemId,atomIndices
if o:
o.Select(atoms=atomIndices)
def SetDisplayUpdate(self, val):
if not val:
self.doc.DoCommand('UpdateView off')
else:
self.doc.DoCommand('UpdateView on')
def GetAtomCoords(self, sels):
res = {}
for label, idx in sels:
whichSelection = label.lower()
whichSelection = self.displayables[label].id
# DSViewer has atom ids from 1, we do it from 0:
idx += 1
cmd = 'GetPropertyValue molecule RD_Visual=%d; atom id=%d: xyz=?' % (whichSelection, idx)
coords = self.doc.DoCommand(cmd)
coords = [float(x) for x in coords.split(' ')]
res[(label, idx)] = coords
#print 'grab:',label,idx,coords
return res
def AddPharmacophore(self, locs, colors, label, sphereRad=0.5):
label = label.lower()
self.SetDisplayUpdate(False)
parent = Displayable(self.doc)
for i, loc in enumerate(locs):
color = colors[i]
color = ' '.join([str(int(255 * x)) for x in color])
obj = Displayable(self.doc)
nm = 'sphere-%d' % obj.id
self.doc.DoCommand('Sphere %s' % nm)
self.doc.DoCommand('SetProperty Object name=%s : xyz=%f %f %f' % (nm, loc[0], loc[1], loc[2]))
self.doc.DoCommand('SetProperty Object name=%s : radius=%f' % (nm, sphereRad))
self.doc.DoCommand('SetProperty Object name=%s : color=%s' % (nm, color))
self.doc.DoCommand('SetProperty Object name=%s : RD_Visual=%d' % (nm, parent.id))
self.doc.DoCommand('SetProperty Object name=%s : id=%d' % (nm, parent.id))
#parent.children.append(obj)
self.displayables[label] = parent
self.SetDisplayUpdate(True)
def SetDisplayStyle(self, obj, style=''):
self.doc.DoCommand('UnSelectAll')
obj = obj.lower()
o = self.displayables.get(obj, None)
if o:
o.Select(state=True)
if style == 'sticks':
self.doc.DoCommand('DisplayStyle Atom Stick')
elif style == 'lines':
self.doc.DoCommand('DisplayStyle Atom Line')
elif style == '':
self.doc.DoCommand('DisplayStyle Atom Off')
o.Select(state=False)
def HideAll(self):
self.doc.DoCommand('HideAll')
def HideObject(self, objName):
self.doc.DoCommand('UnSelectAll')
objName = objName.lower()
o = self.displayables.get(objName, None)
if o:
o.Hide()
def DisplayObject(self, objName):
self.doc.DoCommand('UnSelectAll')
objName = objName.lower()
o = self.displayables.get(objName, None)
if o:
o.Show()
def Zoom(self, objName):
self.doc.DoCommand('UnSelectAll')
objName = objName.lower()
o = self.displayables.get(objName, None)
if o:
r = o.Select(state=True)
self.doc.DoCommand('Center')
self.doc.DoCommand('FitView')
o.Select(state=False)
def SelectProteinNeighborhood(self, aroundObj, inObj, distance=5.0, name='neighborhood',
showSurface=False):
""" FIX: the surface display stuff here is all screwed up due to
differences between the way PyMol and DSViewer handle surfaces.
In PyMol they are essentially a display mode for the protein, so
they don't need to be managed separately.
In DSViewer, on the other hand, the surface is attached to the
protein, but it needs to be hidden or shown on its own. I haven't
figured out how to do that yet.
"""
self.doc.DoCommand('UnSelectAll')
o = self.displayables.get(aroundObj.lower(), None)
p = self.displayables.get(inObj.lower(), None)
if o and p:
self.SetDisplayUpdate(False)
p.Show()
self.doc.DoCommand('UnSelectAll')
tmp = self.doc.DoCommand('SetProperty object RD_Visual=%d;object id="*":select=on' % o.id)
tmp = self.doc.DoCommand('SelectByRadius inside %f atom' % distance)
# that selects all atoms in the radius, now we need to make sure
# only atoms in _inObj_ are selected:
for obj in self.displayables.values():
if obj.id != p.id:
self.doc.DoCommand('SetProperty object RD_Visual=%d;object id="*":select=off' % obj.id)
# ----
# now get all the residue names for the selected atoms:
rs = self.doc.DoCommand('GetPropertyValue atom select=true: parent=?')
if rs:
rs = rs.split(',')
residues = {}
for r in rs:
residues[r] = 1
# and select each atom in those residues:
parents = ','.join(['parent="%s"' % x for x in residues.keys()])
cmd = 'SetProperty atom %s: select=on' % parents
tmp = self.doc.DoCommand(cmd)
if showSurface:
# create the surface:
self.doc.DoCommand('Surface')
obj = Displayable(self.doc)
self.displayables[name] = obj
self.doc.DoCommand('SetProperty surface id="*":RD_Visual=%d' % obj.id)
self.doc.DoCommand('UnSelectAll')
self.SetDisplayUpdate(True)
def Redraw(self):
self.SetDisplayUpdate(True)
if __name__ == '__main__':
from rdkit import Chem
from rdkit.Chem import rdDistGeom, rdForceFieldHelpers
m = Chem.MolFromSmiles('c1cccc2c1cccc2')
rdDistGeom.EmbedMolecule(m)
rdForceFieldHelpers.UFFOptimizeMolecule(m)
s = MolViewer()
s.ShowMol(m)
|
bp-kelley/rdkit
|
rdkit/Chem/DSViewer.py
|
Python
|
bsd-3-clause
| 12,125
|
[
"PyMOL",
"RDKit"
] |
a5e9cc1f050dd810590272c5995fa97be9462424b6877039a8d141d53875ecac
|
import csv
import xlrd
import xlwt
import subprocess
import ConfigParser
import sys
import os
import string
import random
from datetime import datetime
FILE_CHRCHECKFIRST = ""
PARAM_NUMBER_N = 100
UNIQUE_PRIMERS = set("")
NON_UNIQUE_PRIMERS = set("")
FORBIDDEN_PRIMERS = set("")
def main():
"""Perform primer design"""
config = ConfigParser.ConfigParser()
config.optionxform = str
config.read(sys.argv[1])
maxSeqDist = config.getint("PrimerDesign", "max_dist_for_sequencing")
maxPriDist = config.getint("PrimerDesign", "max_pcr_product_size")
global FILE_CHRCHECKFIRST
FILE_CHRCHECKFIRST = "/tmp/primerDesign_chrToCheckFirst_" + str(os.getpid()) + "_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) + ".txt"
file = open(FILE_CHRCHECKFIRST, "w")
file.write(config.get("General", "check_first_chr"))
file.close()
varList = read_variant_list_from_file(sys.argv[2])
varListTmp = varList
primerList = [[-1, -1, -1, -1] for i in range(len(varList))]
for paramSet in [(int(maxSeqDist/3), 25), (int(maxSeqDist/3), 100), (maxSeqDist,50), (maxSeqDist,200), (maxSeqDist,2000), (int(maxPriDist/3),50), (int(maxPriDist/3),200), (int(maxPriDist/3),1000), (maxPriDist,50), (maxPriDist,200), (maxPriDist,1000), (maxPriDist,5000)]:
if len(varListTmp) == 0: break
primerTargetSize, primerNum = paramSet
primerPairs = determine_primer_pairs(varListTmp, primerTargetSize, primerNum, config)
select_primer_pairs(primerList, primerPairs, varListTmp, config.getint("PrimerDesign", "max_dist_for_sequencing"))
varListTmp = []
for i in range(len(primerList)):
if ((primerTargetSize <= maxSeqDist) and (primerList[i][0] <= 1 or primerList[i][1] <= 1)) or ((primerTargetSize > maxSeqDist) and (primerList[i][0] < 1 or primerList[i][1] < 1)):
varListTmp.append(varList[i])
resultList1 = createResultList(varList, primerList, config.getint("PrimerDesign", "max_dist_for_sequencing"), 0)
resultList2 = createResultList(varList, primerList, config.getint("PrimerDesign", "max_dist_for_sequencing"), 1)
if len(sys.argv) == 4:
write_primer_list_to_file(sys.argv[3], resultList1, resultList2)
else:
print_array(resultList1)
print ""
print_array(resultList2)
os.remove(FILE_CHRCHECKFIRST)
def read_variant_list_from_file(fileName):
"""Read in a list of variants from file"""
varRows = []
if fileName.split(".")[-1] == "xls":
workbook = xlrd.open_workbook(fileName)
worksheet = workbook.sheet_by_index(0)
for i in range(worksheet.nrows):
varRows.append([str(v) for v in worksheet.row_values(i)])
else:
varListFH = open(fileName, 'rb')
varRows = list(csv.reader(varListFH, delimiter='\t', quoting=csv.QUOTE_NONE))
varListFH.close()
varList = []
iter = 1
for row in varRows:
svStartChr, svStartPos, svEndChr, svEndPos, svType, svComment = row
svStartPos = int(float(svStartPos))
svEndPos = int(float(svEndPos))
if svType in ["del", "dup", "inv3to3", "inv5to5", "trans3to3", "trans3to5", "trans5to3", "trans5to5", "snv"]:
varList.append([iter, svStartChr, svStartPos, svEndChr, svEndPos, svType, svComment])
iter += 1
elif svType in ["inv", "invRef"]:
varList.append([iter, svStartChr, svStartPos, svEndChr, svEndPos, "invRefA", svComment])
iter += 1
varList.append([iter, svStartChr, svStartPos, svEndChr, svEndPos, "invRefB", svComment])
iter += 1
elif svType == "invAlt":
varList.append([iter, svStartChr, svStartPos, svEndChr, svEndPos, "invAltA", svComment])
iter += 1
varList.append([iter, svStartChr, svStartPos, svEndChr, svEndPos, "invAltB", svComment])
iter += 1
else:
sys.stderr.write(svType+" is not an accepted svType!\n")
return varList
def write_primer_list_to_file(fileName, resultList1, resultList2):
"""Write list of primers to file"""
if fileName.split(".")[-1] == "xls":
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('AtLeastOneUniquePrimer')
maxCharacters = [0]*len(resultList1[0])
for i in range(len(resultList1)):
primerResult = resultList1[i]
for j in range(len(primerResult)):
if is_number(primerResult[j]) and not (j in [9, 10, 15, 16]): primerResult[j] = float(primerResult[j])
worksheet.write(i,j,primerResult[j])
maxCharacters[j] = max(maxCharacters[j], len(str(primerResult[j])))
for i in range(len(maxCharacters)):
worksheet.col(i).width = 330 * (maxCharacters[i]+1)
worksheet = workbook.add_sheet('BothPrimersUnique')
maxCharacters = [0]*len(resultList1[0])
for i in range(len(resultList2)):
primerResult = resultList2[i]
for j in range(len(primerResult)):
if is_number(primerResult[j]) and not (j in [9, 10, 15, 16]): primerResult[j] = float(primerResult[j])
worksheet.write(i,j,primerResult[j])
maxCharacters[j] = max(maxCharacters[j], len(str(primerResult[j])))
for i in range(len(maxCharacters)):
worksheet.col(i).width = 330 * (maxCharacters[i]+1)
workbook.save(fileName)
else:
primerWriterFH = open(fileName, 'wb')
primerWriter = csv.writer(primerWriterFH, delimiter='\t')
primerWriter.writerows(resultList1)
primerWriter.writerow([])
primerWriter.writerows(resultList2)
primerWriterFH.close()
def determine_primer_pairs(varList, primerTargetSize, primerNum, config):
"""Determine a set of primer pairs for a list of variants"""
minPrimerOffset = config.getint("PrimerDesign","min_primer_offset")
targetSequences = []
sys.stderr.write("Number of variants left: "+str(len(varList))+"\n")
for variant in varList:
primerTargetSeqFwd, primerTargetSeqRev = get_primer_target_sequence(*variant, primerTargetSize=primerTargetSize, primerOffset=minPrimerOffset, blastdbcmd=config.get("Programs","blastdbcmd"), genomeFile=config.get("General","reference_genome"))
targetSequences.append([variant[0], primerTargetSeqFwd, primerTargetSeqRev])
primer3Input = generate_primer3_input(targetSequences, primerNum, config.getint("PrimerDesign","max_pcr_product_size")-(2*config.getint("PrimerDesign","min_primer_offset")), config.items("Primer3"))
primer3Output, primer3OutputTable, primerSeqList = call_primer3(primer3Input, config.get("Programs","primer3"), config.getfloat("PrimerDesign","primer_qual_cutoff"))
uniqPrimers = determine_unique_primers(set(primerSeqList), config.get("Programs", "blast"), config.get("General", "reference_genome"), config.getint("PrimerDesign","min_mismatches"), config.getint("PrimerDesign","min_mismatches_close3p"), config.getint("PrimerDesign","min_dist3p"), config.getint("PrimerDesign","max_mismatches_forbidden"), config.get("Programs","num_cpus"), config.get("PrimerDesign","word_size"))
sys.stderr.write("unique primers: "+str(len(UNIQUE_PRIMERS))+"\n")
sys.stderr.write("non-unique primers: "+str(len(NON_UNIQUE_PRIMERS))+"\n")
sys.stderr.write("forbidden primers: "+str(len(FORBIDDEN_PRIMERS))+"\n")
iter = 0
for primer3OutputSubTable in primer3OutputTable:
for primerPair in primer3OutputSubTable:
if ((primerPair[2] in FORBIDDEN_PRIMERS) or (primerPair[3] in FORBIDDEN_PRIMERS)):
primerPair.extend([False, False])
else:
primerPair.append(primerPair[2] in uniqPrimers)
primerPair.append(primerPair[3] in uniqPrimers)
primerPair.append(len(targetSequences[iter][1]) - int(primerPair[4].split(",")[0]) + minPrimerOffset - 1)
primerPair.append(int(primerPair[5].split(",")[0]) - len(targetSequences[iter][1]) - PARAM_NUMBER_N + minPrimerOffset)
iter += 1
return primer3OutputTable
def select_primer_pairs(primerList, primerPairs, varList, maxDistForSequencing):
"""Select suitable primer pairs and add to list of variants"""
iter = -1
for primerPairSet in primerPairs:
iter += 1
if len(primerPairSet) == 0:
continue
numSeqBp1 = primerList[int(primerPairSet[0][0])-1][0]
numSeqBp2 = primerList[int(primerPairSet[0][0])-1][1]
primerScore1, primerScore2 = (1000.0, 1000.0)
if numSeqBp1 > -1:
primerScore1 = float(primerList[int(primerPairSet[0][0])-1][2][1])
if numSeqBp2 > -1:
primerScore2 = float(primerList[int(primerPairSet[0][0])-1][3][1])
for primerPair in primerPairSet:
if (not primerPair[8]) and (not primerPair[9]):
continue
if (varList[iter][5] == "dup" and (primerPair[10]+primerPair[11] > varList[iter][4]-varList[iter][2])):
continue
numSeqBp = 0
if primerPair[10] < maxDistForSequencing:
numSeqBp += 1
if primerPair[11] < maxDistForSequencing:
numSeqBp += 1
if (primerPair[8] or primerPair[9]) and (numSeqBp > numSeqBp1):
primerList[int(primerPair[0])-1][2] = primerPair[:]
primerList[int(primerPair[0])-1][0] = numSeqBp
primerScore1 = float(primerPair[1])
numSeqBp1 = numSeqBp
if (primerPair[8] and primerPair[9]) and (numSeqBp > numSeqBp2):
primerList[int(primerPair[0])-1][3] = primerPair[:]
primerList[int(primerPair[0])-1][1] = numSeqBp
primerScore2 = float(primerPair[1])
numSeqBp2 = numSeqBp
def createResultList(varList, primerList, maxDistForSequencing, primerSetId=0):
"""Combines the variant and the primer list and computes expected band sizes"""
naString = "."
resultList = []
for i in range(len(varList)):
resultList.append(varList[i][1:])
if primerList[i][primerSetId] > -1:
resultList[i].extend(primerList[i][primerSetId+2][1:4])
resultList[i].extend(primerList[i][primerSetId+2][6:12])
else:
resultList[i].extend([naString]*13)
for i in range(len(varList)):
if primerList[i][primerSetId] > -1:
if resultList[i][4] == "del" or resultList[i][4] == "snv":
resultList[i].extend([resultList[i][1]-resultList[i][13], resultList[i][3]+resultList[i][14]])
resultList[i].extend([resultList[i][13]+resultList[i][14], resultList[i][16]-resultList[i][15]])
elif resultList[i][4] == "dup":
resultList[i].extend([resultList[i][1]+resultList[i][13], resultList[i][3]-resultList[i][14], resultList[i][13]+resultList[i][14]])
if (resultList[i][16]<resultList[i][15]):
resultList[i].append(resultList[i][15]-resultList[i][16])
else:
resultList[i].append(naString)
elif resultList[i][4] == "inv3to3":
resultList[i].extend([resultList[i][1]-resultList[i][13], resultList[i][3]-resultList[i][14], resultList[i][13]+resultList[i][14], naString])
elif resultList[i][4] == "inv5to5":
resultList[i].extend([resultList[i][1]+resultList[i][13], resultList[i][3]+resultList[i][14], resultList[i][13]+resultList[i][14], naString])
elif resultList[i][4] == "trans3to3":
resultList[i].extend([resultList[i][1]-resultList[i][13], resultList[i][3]-resultList[i][14], resultList[i][13]+resultList[i][14], naString])
elif resultList[i][4] == "trans3to5":
resultList[i].extend([resultList[i][1]-resultList[i][13], resultList[i][3]+resultList[i][14], resultList[i][13]+resultList[i][14], naString])
elif resultList[i][4] == "trans5to3":
resultList[i].extend([resultList[i][1]+resultList[i][13], resultList[i][3]-resultList[i][14], resultList[i][13]+resultList[i][14], naString])
elif resultList[i][4] == "trans5to5":
resultList[i].extend([resultList[i][1]+resultList[i][13], resultList[i][3]+resultList[i][14], resultList[i][13]+resultList[i][14], naString])
elif resultList[i][4] == "invRefA":
if resultList[i+1][13] == naString:
resultList[i] = varList[i][1:]
resultList[i].extend([naString]*13)
else:
resultList[i].extend([resultList[i][1]-resultList[i][13], resultList[i][1]+resultList[i][14], resultList[i][13]+resultList[i][14], resultList[i][13]+resultList[i+1][13]])
elif resultList[i][4] == "invRefB":
if resultList[i-1][13] == naString:
resultList[i] = varList[i][1:]
resultList[i].extend([naString]*13)
else:
resultList[i].extend([resultList[i][3]-resultList[i][13], resultList[i][3]+resultList[i][14], resultList[i][13]+resultList[i][14], resultList[i-1][14]+resultList[i][14]])
elif resultList[i][4] == "invAltA":
if resultList[i+1][13] == naString:
resultList[i] = varList[i][1:]
resultList[i].extend([naString]*13)
else:
resultList[i].extend([resultList[i][1]-resultList[i][13], resultList[i][3]-resultList[i][14], resultList[i][13]+resultList[i][14], resultList[i][13]+resultList[i+1][13]])
elif resultList[i][4] == "invAltB":
if resultList[i-1][13] == naString:
resultList[i] = varList[i][1:]
resultList[i].extend([naString]*13)
else:
resultList[i].extend([resultList[i][1]+resultList[i][13], resultList[i][3]+resultList[i][14], resultList[i][13]+resultList[i][14], resultList[i-1][14]+resultList[i][14]])
for i in range(len(varList)):
row = resultList[i][0:9]
row.extend(resultList[i][11:13])
row.extend(resultList[i][9:11])
row.extend(resultList[i][13:15])
if resultList[i][13] != naString:
row.extend([d<=maxDistForSequencing for d in resultList[i][13:15]])
else:
row.extend([naString]*2)
row.extend(resultList[i][15:])
resultList[i] = row
return resultList
def generate_primer3_input(targetSequences, primerNum, maxPrimerDist, primer3Parameters):
"""Generate the input for primer3_core"""
numberNs = PARAM_NUMBER_N
primer3Input = ""
for target in targetSequences:
primer3Input += "SEQUENCE_ID=" + str(target[0]) + "\n"
primer3Input += "SEQUENCE_TEMPLATE=" + target[1] + 'N'*numberNs + target[2] + "\n"
primer3Input += "SEQUENCE_TARGET=" + str(len(target[1])) + "," + str(numberNs) + "\n"
primer3Input += "PRIMER_PRODUCT_SIZE_RANGE=" + str(numberNs) + "-" + str(min(len(target[1]) + len(target[2]), maxPrimerDist) + numberNs) + "\n"
primer3Input += "PRIMER_TASK=generic\n"
primer3Input += "PRIMER_PICK_LEFT_PRIMER=1\n"
primer3Input += "PRIMER_PICK_INTERNAL_OLIGO=0\n"
primer3Input += "PRIMER_PICK_RIGHT_PRIMER=1\n"
primer3Input += "PRIMER_MAX_NS_ACCEPTED=1\n"
primer3Input += "PRIMER_NUM_RETURN="+ str(primerNum) + "\n"
primer3Input += "PRIMER_EXPLAIN_FLAG=1\n"
for param in primer3Parameters:
primer3Input += param[0] + "=" + param[1] + "\n"
primer3Input += "=\n"
return primer3Input
def call_primer3(primer3Input, primer3Program, maxPrimerPenalty):
"""Call primer3_core"""
p1 = subprocess.Popen(primer3Program, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
primer3Output = p1.communicate(input=primer3Input)[0]
primer3OutputTable = []
primer3OutputSubTable = []
primerSeqList = []
numValues = 0
for line in primer3Output.splitlines():
value = line.split("=")[-1]
if ("SEQUENCE_ID=" in line):
variantID = value
if ("PRIMER_PAIR_" in line) and ("_PENALTY" in line):
primerPenalty = value
numValues += 1
elif ("PRIMER_LEFT_" in line) and ("_SEQUENCE" in line):
primerSeqLeft = value
primerSeqList.append(value)
numValues += 1
elif ("PRIMER_RIGHT_" in line) and ("_SEQUENCE" in line):
primerSeqRight = value
primerSeqList.append(value)
numValues += 1
elif ("PRIMER_LEFT_" in line) and ("," in line) and not ("EXPLAIN" in line):
primerPosLeft = value
numValues += 1
elif ("PRIMER_RIGHT_" in line) and ("," in line) and not ("EXPLAIN" in line):
primerPosRight = value
numValues += 1
elif ("PRIMER_LEFT_" in line) and ("TM" in line):
primerTmLeft = value
numValues += 1
elif ("PRIMER_RIGHT_" in line) and ("TM" in line):
primerTmRight = value
numValues += 1
if numValues == 7:
if float(primerPenalty) < maxPrimerPenalty:
primer3OutputSubTable.append([variantID, primerPenalty, primerSeqLeft, primerSeqRight, primerPosLeft, primerPosRight, primerTmLeft, primerTmRight])
numValues = 0
if line == "=":
primer3OutputTable.append(primer3OutputSubTable)
primer3OutputSubTable = []
return primer3Output, primer3OutputTable, primerSeqList
def run_blast(sequences, eValue, blast, genomeFile, numThreads, wordSize, seqidListFile=""):
"""Run blast for a set of sequences"""
blastInput = ""
iter = 0
for primerSeq in sequences:
iter += 1
blastInput += ">" + primerSeq + "\n"
blastInput += primerSeq + "\n"
if seqidListFile == "":
p1 = subprocess.Popen([blast, "-task", "blastn", "-db", genomeFile, "-evalue", str(eValue), "-num_threads", numThreads, "-outfmt", "6 std gaps nident", "-dust", "no", "-gapopen", "4", "-gapextend", "2", "-penalty", "-2", "-reward", "2", "-word_size", wordSize, "-max_target_seqs", "500"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
p1 = subprocess.Popen([blast, "-task", "blastn", "-db", genomeFile, "-seqidlist", seqidListFile, "-evalue", str(eValue), "-num_threads", numThreads, "-outfmt", "6 std gaps nident", "-dust", "no", "-gapopen", "4", "-gapextend", "2", "-penalty", "-2", "-reward", "2", "-word_size", wordSize, "-max_target_seqs", "2"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p1.communicate(input=blastInput)[0]
def determine_nonunique_sequences_from_blastoutput(blastOutput, minMismatches, minMismatchesClose3p, minDist3p, maxMismatchesToBeForbidden):
"""Determine non-unique sequences from blast output"""
prevSeq = ""
skipSeq = ""
nonUniqueSequences = []
completelyNonUniqueSequences = []
for blastHit in blastOutput.splitlines():
seq, chr, a, b, c, d, e, f, pos1, pos2, score1, score2, g, h = blastHit.split("\t")
a = float(a)
b = int(b)
c = int(c)
d = int(d)
e = int(e)
f = int(f)
g = int(g)
h = int(h)
length = len(seq)
if seq != skipSeq and ((h>length-minMismatchesClose3p) or (f<=length-minDist3p and h>length-minMismatches)):
if seq==prevSeq:
nonUniqueSequences.append(seq)
if h>length-maxMismatchesToBeForbidden: completelyNonUniqueSequences.append(seq)
skipSeq = seq
else:
prevSeq = seq
return (nonUniqueSequences, completelyNonUniqueSequences)
def determine_unique_primers(primerList, blast, genomeFile, minMismatches, minMismatchesClose3p, minDist3p, maxMismatchesToBeForbidden, numThreads, wordSize):
"""Check primer uniqueness using blast"""
sys.stderr.write(str(datetime.now())+" Primers to test: "+str(len(primerList))+"\n")
global UNIQUE_PRIMERS
global NON_UNIQUE_PRIMERS
global FORBIDDEN_PRIMERS
potUniquePrimers = primerList - NON_UNIQUE_PRIMERS - UNIQUE_PRIMERS - FORBIDDEN_PRIMERS
for seqidListFile in [FILE_CHRCHECKFIRST, ""]:
for eValue in [0.01, 0.1, 1, 10, 100]:
if (seqidListFile == "" and eValue < 5):
continue
blastOutput = run_blast(potUniquePrimers, eValue, blast, genomeFile, numThreads, wordSize, seqidListFile)
nonUniquePrimers, forbiddenPrimers = determine_nonunique_sequences_from_blastoutput(blastOutput, minMismatches, minMismatchesClose3p, minDist3p, maxMismatchesToBeForbidden)
NON_UNIQUE_PRIMERS = NON_UNIQUE_PRIMERS | set(nonUniquePrimers)
potUniquePrimers = potUniquePrimers - set(nonUniquePrimers)
FORBIDDEN_PRIMERS = FORBIDDEN_PRIMERS | set(forbiddenPrimers)
sys.stderr.write(str(datetime.now())+" potUnique "+str(eValue)+" "+str(len(potUniquePrimers))+"\n")
UNIQUE_PRIMERS = UNIQUE_PRIMERS | potUniquePrimers
return (potUniquePrimers | UNIQUE_PRIMERS)
def get_DNA_sequence(chr, start, end, blastdbcmd, genomeFile):
"""Get the DNA sequence of a given genomic region."""
p1 = subprocess.Popen([blastdbcmd, "-db", genomeFile, "-entry", chr, "-range", str(start) + "-" + str(end)], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "-v", ">"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(["tr", "-d", "'\n'"], stdin=p2.stdout, stdout=subprocess.PIPE)
sequence = p3.communicate()[0]
return sequence
def get_primer_target_sequence(id, svStartChr, svStartPos, svEndChr, svEndPos, svType, svComment, primerTargetSize, primerOffset, blastdbcmd, genomeFile):
"""Get the sequences in which primers will be placed"""
if svType in ["del", "inv3to3", "trans3to3", "trans3to5", "snv", "invRefA", "invAltA"]:
targetSeq1Start = svStartPos - primerOffset - primerTargetSize
targetSeq1End = svStartPos - primerOffset
targetSeq1 = get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper()
elif svType in ["invRefB"]:
targetSeq1Start = max(svEndPos - primerOffset - primerTargetSize, svStartPos + primerOffset)
targetSeq1End = svEndPos - primerOffset
targetSeq1 = get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper()
elif svType in ["trans5to3", "trans5to5"]:
targetSeq1Start = svStartPos + primerOffset
targetSeq1End = svStartPos + primerOffset + primerTargetSize
targetSeq1 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper())
elif svType in ["dup", "inv5to5", "invAltB"]:
targetSeq1Start = svStartPos + primerOffset
targetSeq1End = min(svStartPos + primerOffset + primerTargetSize, svEndPos - primerOffset)
targetSeq1 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq1Start, targetSeq1End, blastdbcmd, genomeFile).upper())
if svType in ["del", "inv5to5", "snv", "invRefB", "invAltB"]:
targetSeq2Start = svEndPos + primerOffset
targetSeq2End = svEndPos + primerOffset + primerTargetSize
targetSeq2 = get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()
elif svType in ["invRefA"]:
targetSeq2Start = svStartPos + primerOffset
targetSeq2End = min(svStartPos + primerOffset + primerTargetSize, svEndPos - primerOffset)
targetSeq2 = get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()
elif svType in ["dup", "inv3to3", "invAltA"]:
targetSeq2Start = max(svEndPos - primerTargetSize - primerOffset, svStartPos + primerOffset)
targetSeq2End = svEndPos - primerOffset
targetSeq2 = reverseComplementSequence(get_DNA_sequence(svStartChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper())
elif svType in ["trans3to5", "trans5to5"]:
targetSeq2Start = svEndPos + primerOffset
targetSeq2End = svEndPos + primerOffset + primerTargetSize
targetSeq2 = get_DNA_sequence(svEndChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper()
elif svType in ["trans3to3", "trans5to3"]:
targetSeq2Start = svEndPos - primerTargetSize - primerOffset
targetSeq2End = svEndPos - primerOffset
targetSeq2 = reverseComplementSequence(get_DNA_sequence(svEndChr, targetSeq2Start, targetSeq2End, blastdbcmd, genomeFile).upper())
return (targetSeq1, targetSeq2)
def reverseComplementSequence(sequence):
'''Reverse complements a DNA sequence'''
basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a', 'N': 'N', 'n': 'n'}
letters = list(sequence)
letters = [basecomplement[base] for base in letters]
complement = ''.join(letters)
return complement[::-1]
def print_array(arrayVariable):
"""Print 2D array to terminal"""
print "\n".join(["\t".join(map(str, r)) for r in arrayVariable])
def is_number(s):
"""Check whether string is a number"""
try:
float(s)
return True
except ValueError:
return False
sys.stderr.write(str(datetime.now())+"\n")
main()
sys.stderr.write(str(datetime.now())+"\n")
|
zichner/primerDesign
|
primerDesign.py
|
Python
|
mit
| 25,518
|
[
"BLAST"
] |
7317ba510fc07e41211be0a770d7902afca9bd40896c5d57a5596d1b0ff2ce7c
|
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import pandas as pd
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import scipy as sp
from scipy.interpolate import griddata
import seaborn as sns
from scipy.stats import norm
from scipy.stats import uniform
from scipy.stats import multivariate_normal
import sys
from mpl_toolkits.mplot3d import Axes3D
import itertools
from matplotlib import cm
from matplotlib.colors import LightSource
import pdb
from sklearn.utils import resample
import ast
df = pd.read_csv('AlkEthOH_c1143_C-H_bl_stats.csv',sep=';')
df1 = pd.read_csv('AlkEthOH_c1143_C-H_bl_stats_MBAR.csv',sep=';')
df1['param_values'] = df1['param_values'].apply(ast.literal_eval)
df1[['k_values', 'length_values']] = df1['param_values'].apply(pd.Series)
df1.k_values = df1.k_values.astype(float)
df1.length_values = df1.length_values.astype(float)
df1 = df1.drop('param_values', 1)
points_av = df.as_matrix(columns=['k_values','length_values','bond_length_average'])
points_var = df.as_matrix(columns=['k_values','length_values','bond_length_variance'])
points_var_var = df.as_matrix(columns=['k_values','length_values','bond_length_variance_variance'])
points_av_MBAR = df1.as_matrix(columns=['k_values','length_values','bond_length_average'])
points_var_MBAR = df1.as_matrix(columns=['k_values','length_values','bond_length_variance'])
points_av_unc_MBAR = df1.as_matrix(columns=['k_values','length_values','bond_length_average_unc'])
points_var_unc_MBAR = df1.as_matrix(columns=['k_values','length_values','bond_length_variance_unc'])
def poly_matrix(x, y, order=2):
""" generate Matrix use with lstsq """
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i, j) in enumerate(ij):
G[:, k] = x**i * y**j
return G
ordr = 2 # order of polynomial
#x_av_0 = x_av[0]
#y_av_0 = y_av[0]
#x_var_0 = x_var[0]
x_av, y_av, z_av = points_av.T
#x_av, y_av = x_av - x_av[0], y_av - y_av[0] # this improves accuracy
x_var, y_var, z_var = points_var.T
#x_var, y_var = x_var - x_var[0], y_var - y_var[0] # this improves accuracy
x_var_var, y_var_var, z_var_var = points_var_var.T
x_avm, y_avm, z_avm = points_av_MBAR.T
#x_av, y_av = x_av - x_av[0], y_av - y_av[0] # this improves accuracy
x_varm, y_varm, z_varm = points_var_MBAR.T
#x_var, y_var = x_var - x_var[0], y_var - y_var[0] # this improves accuracy
x_av_uncm, y_av_uncm, z_av_uncm = points_av_unc_MBAR.T
x_var_uncm, y_var_uncm, z_var_uncm = points_var_unc_MBAR.T
print x_avm
print y_avm
# make Matrix:
G = poly_matrix(x_av, y_av, ordr)
# Solve for np.dot(G, m) = z:
m_av = np.linalg.lstsq(G, z_av)[0]
# Solve for np.dot(G, m) = z:
m_var = np.linalg.lstsq(G, z_var)[0]
Gm = poly_matrix(x_avm, y_avm, ordr)
m_avm = np.linalg.lstsq(Gm, z_avm)[0]
m_varm = np.linalg.lstsq(Gm, z_varm)[0]
# Evaluate it on a grid...
nx, ny = 100, 100
xx, yy = np.meshgrid(np.linspace(x_av.min(), x_av.max(), nx),
np.linspace(y_av.min(), y_av.max(), ny))
xxm, yym = np.meshgrid(np.linspace(x_avm.min(), x_avm.max(), nx),
np.linspace(y_avm.min(), y_avm.max(), ny))
GG = poly_matrix(xx.ravel(), yy.ravel(), ordr)
zz_av = np.reshape(np.dot(GG, m_av), xx.shape)
zz_var = np.reshape(np.dot(GG, m_var), xx.shape)
GGm = poly_matrix(xxm.ravel(), yym.ravel(), ordr)
zz_avm = np.reshape(np.dot(GGm, m_avm), xxm.shape)
zz_varm = np.reshape(np.dot(GGm, m_varm), xxm.shape)
zz_avcheck = np.reshape(np.dot(GGm, m_av), xxm.shape)
zz_varcheck = np.reshape(np.dot(GGm, m_var), xxm.shape)
zz_av_comp = m_av[0] + m_av[1]*yy + m_av[2]*yy**2 + m_av[3]*xx + m_av[4]*xx*yy + m_av[5]*xx*(yy**2) + m_av[6]*xx**2 + m_av[7]*(xx**2)*yy + m_av[8]*(xx**2)*(yy**2)
zz_var_comp = m_var[0] + m_var[1]*yy + m_var[2]*yy**2 + m_var[3]*xx + m_var[4]*xx*yy + m_var[5]*xx*(yy**2) + m_var[6]*xx**2 + m_var[7]*(xx**2)*yy + m_var[8]*(xx**2)*(yy**2)
zz_av_comp_boots = []
zz_var_comp_boots = []
# Bootstrap regression for error bars
nBoots_work = 200
for n in range(nBoots_work):
if (n == 0):
booti_av = zz_av_comp
booti_var = zz_var_comp
else:
booti_av = resample(zz_av_comp)
booti_var = resample(zz_var_comp)
zz_av_comp_boots.append(booti_av)
zz_var_comp_boots.append(booti_var)
zz_av_comp_boots = np.array(zz_av_comp_boots)
zz_var_comp_boots = np.array(zz_var_comp_boots)
zz_av_unc = np.apply_along_axis(np.var,0,zz_av_comp_boots)
zz_var_unc = np.apply_along_axis(np.var,0,zz_var_comp_boots)
# Plotting (see http://matplotlib.org/examples/mplot3d/custom_shaded_3d_surface.html):
fg, ax = plt.subplots(subplot_kw=dict(projection='3d'))
ls = LightSource(270, 45)
rgb = ls.shade(zz_avcheck, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
#rgbm = ls.shade(zz_avm, cmap=cm.jet, vert_exag=0.1, blend_mode='soft')
#heatmap = ax.pcolor(zz_av, cmap=rgb)
#plt.colorbar(mappable=heatmap) # put the major ticks at the middle of each cell
surf = ax.plot_surface(xxm, yym, zz_avcheck, rstride=1, cstride=1, facecolors=rgb,
linewidth=0, antialiased=False, shade=False)
#surf1 = ax.plot_surface(xxm, yym, zz_avm, rstride=1, cstride=1, facecolors=rgbm,
# linewidth=0, antialiased=False, shade=False)
ax.set_xlabel('Bonded force constant - (kcal/mol/A^2)')
ax.set_ylabel('Equilibrium bond length - (A)')
ax.set_zlabel('Average of bond length distribution - (A)')
#ax.plot3D(x_av, y_av, z_av, "o",label="Simulation data")
ax.plot3D(x_avm,y_avm,z_avm,"o",label="MBAR data")
#ax.set_xlim([x_avm.min(),x_avm.max()])
#ax.set_ylim([y_avm.min(),y_avm.max()])
#ax.set_zlim([z_avm.min(),z_avm.max()])
ax.legend()
for i in np.arange(0, len(x_avm)):
ax.plot([x_avm[i],x_avm[i]], [y_avm[i],y_avm[i]], [z_avm[i]-z_av_uncm[i], z_avm[i]+z_av_uncm[i]], marker="_")
"""
xx_arr = np.vstack(xx.flatten()).T[0]
yy_arr = np.vstack(yy.flatten()).T[0]
zz_av_comp_arr = np.vstack(zz_av_comp.flatten()).T[0]
zz_av_unc_arr = np.vstack(zz_av_unc.flatten()).T[0]
zz_var_comp_arr = np.vstack(zz_var_comp.flatten()).T[0]
zz_var_unc_arr = np.vstack(zz_var_unc.flatten()).T[0]
"""
#for i in np.arange(0, len(xx_arr)):
# ax.plot([xx_arr[i],xx_arr[i]], [yy_arr[i],yy_arr[i]], zs=[zz_av_comp_arr[i]-zz_av_unc_arr[i],zz_av_comp_arr[i]+zz_av_unc_arr[i]],marker="_")
fg.canvas.draw()
plt.savefig('bond_length_average_vs_k_length_w_fit.png')
"""
# Plotting (see http://matplotlib.org/examples/mplot3d/custom_shaded_3d_surface.html):
fg, ax = plt.subplots(subplot_kw=dict(projection='3d'))
ls = LightSource(270, 45)
rgb = ls.shade(zz_avm, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
#heatmap = ax.pcolor(zz_av, cmap=rgb)
#plt.colorbar(mappable=heatmap) # put the major ticks at the middle of each cell
surf = ax.plot_surface(xxm, yym, zz_avm, rstride=1, cstride=1, facecolors=rgb,
linewidth=0, antialiased=False, shade=False)
ax.set_xlabel('Bonded force constant - (kcal/mol/A^2)')
ax.set_ylabel('Equilibrium bond length - (A)')
ax.set_zlabel('MBAR expectation of average of bond length distribution - (A)')
ax.plot3D(x_avm, y_avm, z_avm, "o",label='MBAR data')
ax.plot3D(x_av, y_av, z_av, "o",label="Simulation data")
ax.set_xlim([x_avm.min(),x_avm.max()])
ax.set_ylim([y_avm.min(),y_avm.max()])
ax.set_zlim([z_avm.min(),z_avm.max()])
ax.legend()
fg.canvas.draw()
plt.savefig('bond_length_average_vs_k_length_w_fit_MBAR.png')
# Plotting (see http://matplotlib.org/examples/mplot3d/custom_shaded_3d_surface.html):
fg, ax = plt.subplots(subplot_kw=dict(projection='3d'))
ls = LightSource(270, 45)
rgb = ls.shade(zz_var, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
rgb1 = ls.shade(zz_var, cmap=cm.jet, vert_exag=0.1, blend_mode='soft')
surf = ax.plot_surface(xx, yy, zz_var,cmap=cm.gist_earth, rstride=1, cstride=1, facecolors=rgb,
linewidth=0, antialiased=False, shade=False)
surf1 = ax.plot_surface(xxm, yym, zz_varm,cmap=cm.gist_earth, rstride=1, cstride=1, facecolors=rgb1,
linewidth=0, antialiased=False, shade=False)
ax.set_xlabel('Bonded force constant - (kcal/mol/A^2)')
ax.set_ylabel('Equilibrium bond length - (A)')
ax.set_zlabel('Variance of bond length distribution - (A^2)')
ax.plot3D(x_var, y_var, z_var, "o",label="Simulation data")
ax.plot3D(x_varm, y_varm, z_varm, "o",label="MBAR data")
ax.legend()
#for i in np.arange(0, len(x_av)):
# ax.plot([x_var[i],x_var[i]], [y_var[i],y_var[i]], [z_var[i]-z_var_var[i], z_var[i]+z_var_var[i]], marker="_")
#for i in np.arange(0, len(xx_arr)):
# ax.plot([xx_arr[i],xx_arr[i]], [yy_arr[i],yy_arr[i]], zs=[zz_var_comp_arr[i]-zz_var_unc_arr[i],zz_var_comp_arr[i]+zz_var_unc_arr[i]],marker="_")
fg.canvas.draw()
plt.savefig('bond_length_variance_vs_k_length_w_fit.png')
"""
# Plotting (see http://matplotlib.org/examples/mplot3d/custom_shaded_3d_surface.html):
fg, ax = plt.subplots(subplot_kw=dict(projection='3d'))
ls = LightSource(270, 45)
rgb = ls.shade(zz_varcheck, cmap=cm.gist_earth, vert_exag=0.1, blend_mode='soft')
surf = ax.plot_surface(xxm, yym, zz_varcheck,cmap=cm.gist_earth, rstride=1, cstride=1, facecolors=rgb,
linewidth=0, antialiased=False, shade=False)
#fg.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('Bonded force constant - (kcal/mol/A^2)')
ax.set_ylabel('Equilibrium bond length - (A)')
ax.set_zlabel('MBAR expectation of variance of bond length distribution - (A^2)')
ax.plot3D(x_varm, y_varm, z_varm, "o",label='MBAR data')
#ax.plot3D(x_var, y_var, z_var, "o",label='Simulation data')
#ax.set_xlim([x_varm.min(),x_varm.max()])
#ax.set_ylim([y_varm.min(),y_varm.max()])
#ax.set_zlim([z_varm.min(),z_varm.max()])
ax.legend()
for i in np.arange(0, len(x_varm)):
ax.plot([x_varm[i],x_varm[i]], [y_varm[i],y_varm[i]], [z_varm[i]-z_var_uncm[i], z_varm[i]+z_var_uncm[i]], marker="_")
#for i in np.arange(0, len(xx_arr)):
# ax.plot([xx_arr[i],xx_arr[i]], [yy_arr[i],yy_arr[i]], zs=[zz_var_comp_arr[i]-zz_var_unc_arr[i],zz_var_comp_arr[i]+zz_var_unc_arr[i]],marker="_")
fg.canvas.draw()
plt.savefig('bond_length_variance_vs_k_length_w_fit_MBAR.png')
sys.exit()
x_av,res_av,rank_av,s_av = np.linalg.lstsq(G, z_av)
x_var,res_var,rank_var,s_var = np.linalg.lstsq(G, z_var)
print x_av
print x_var
print rank_av
print rank_var
print res_av
print res_var
def sampler(data, samples=4, theta_init=[500,0.8], proposal_width=[10,0.05], plot=False, mu_prior_mu=0, mu_prior_sd=1.):
"""
Outline:
1)Take data and calculate observable
2)Reweight observable to different state and calculate observable based on new state
- smarty move in many parameters
- will have to start without torsion moves
- Safe moves in equilibrium bond length and angle is ~3%. For force constants ~5%.
3)Will have to make decision here:
a)Continue to sample in order to gather more data? -or-
b)Attempt to create surrogate models from the data we have? What does that entail?
i)We want a surrogate model for every observable we have, $O\left(\theta\right)$
ii)Thus for bonds and angles; we have 4 observables as a function of however many parameters we're working with at the time
iii)Choice of surrogate model becomes important. Start with splining though
iv)What is the best surrogate modeling technique to use when we have very sparse data?
Other things to consider:
1)Choice of surrogate models:
a)Splining
b)Rich's ideas
c)Other ideas from Michael he got at conference last week
2)Choice of likelihood:
a)Gaussian likelihood
b)More general based on mean squared error
3)Prior
a)Start with uniforms with physically relevant bounds for given parameter
b)Informationless priors
Expanding initial knowledge region using MBAR
1) Simulate single thermodynamic state
2) Use MBAR to reweight in parameter space
a) Will go to full extent of parameters within region where we know MBAR estimates are good
b) Reweighting at multiple steps to full extent and along diagonals between parameters in order to create grid of points of evidence
c) Now we cheaply achieved a region of evidence vs a single point
3) Can fit our hypercube to multiple planes
a) Assuming trends in very local space will be incredibly linear
b) Probably a pretty safe assumption given minute change in parameter
"""
# Begin process by loading a prescribed simulation or performing it if it doesn't exist in the specified directory
theta_current = theta_init
posterior = [theta_current]
probs = [np.random.rand()]
hits = []
for i in range(samples):
# suggest new position
theta_proposal = [norm(theta_current[j],proposal_width[j]).rvs() for j in range(len(theta_current))]
# Compute observables at proposed theta with surrgates
O_av_comp_curr = m_av[0] + m_av[1]*theta_current[1] + m_av[2]*theta_current[1]**2 + m_av[3]*theta_current[0] +\
m_av[4]*theta_current[0]*theta_current[1] + m_av[5]*theta_current[0]*(theta_current[1]**2) +\
m_av[6]*theta_current[0]**2 + m_av[7]*(theta_current[0]**2)*theta_current[1] +\
m_av[8]*(theta_current[0]**2)*(theta_current[1]**2)
O_var_comp_curr = m_var[0] + m_var[1]*theta_current[1] + m_var[2]*theta_current[1]**2 + m_var[3]*theta_current[0] +\
m_var[4]*theta_current[0]*theta_current[1] + m_var[5]*theta_current[0]*(theta_current[1]**2) +\
m_var[6]*theta_current[0]**2 + m_var[7]*(theta_current[0]**2)*theta_current[1] +\
m_var[8]*(theta_current[0]**2)*(theta_current[1]**2)
O_comp_curr = [O_av_comp_curr,O_var_comp_curr]
O_av_comp_prop = m_av[0] + m_av[1]*theta_proposal[1] + m_av[2]*theta_proposal[1]**2 + m_av[3]*theta_proposal[0] +\
m_av[4]*theta_proposal[0]*theta_proposal[1] + m_av[5]*theta_proposal[0]*(theta_proposal[1]**2) +\
m_av[6]*theta_proposal[0]**2 + m_av[7]*(theta_proposal[0]**2)*theta_proposal[1] +\
m_av[8]*(theta_proposal[0]**2)*(theta_proposal[1]**2)
O_var_comp_prop = m_var[0] + m_var[1]*theta_proposal[1] + m_var[2]*theta_proposal[1]**2 + m_var[3]*theta_proposal[0] + \
m_var[4]*theta_proposal[0]*theta_proposal[1] + m_var[5]*theta_proposal[0]*(theta_proposal[1]**2) + \
m_var[6]*theta_proposal[0]**2 + m_var[7]*(theta_proposal[0]**2)*theta_proposal[1] + \
m_var[8]*(theta_proposal[0]**2)*(theta_proposal[1]**2)
O_comp_prop = [O_av_comp_prop,O_var_comp_prop]
# Compute likelihood by multiplying probabilities of each data point
likelihood_current = np.prod(np.array([1/(np.sqrt(2*np.pi*data[1][j])) * np.exp(- ((data[0][j] - O_comp_curr[j])**2)/(2*data[1][j]))
for j in range(len(data))]))
likelihood_proposal = np.prod(np.array([1/(np.sqrt(2*np.pi*data[1][j])) * np.exp(- ((data[0][j] - O_comp_prop[j])**2)/(2*data[1][j]))
for j in range(len(data))]))
# Compute prior probability of current and proposed mu
prior_current = norm(theta_current[0],theta_current[1]).pdf(theta_current[0])
prior_proposal = norm(theta_proposal[0],theta_proposal[1]).pdf(theta_proposal[0])
p_current = likelihood_current * prior_current
p_proposal = likelihood_proposal * prior_proposal
# Accept proposal?
p_accept = p_proposal / p_current
# Usually would include prior probability, which we neglect here for simplicity
accept = np.random.rand() < p_accept
#if plot:
# plot_proposal(mu_current, mu_proposal, mu_prior_mu, mu_prior_sd, data, accept, posterior, i)
if accept:
# Update position
theta_current = theta_proposal
hits.append(1)
print "%s out of %s MCMC steps completed. Prior current = %s" % (i,samples,prior_current)
else:
hits.append(0)
posterior.append(theta_current)
probs.append(float(likelihood_current*prior_current))
efficiency = float(sum(hits))/float(samples)
print efficiency
return posterior,probs
posterior,probs = sampler([[1.0920405895833334,0.00090201196735599997],[0.00090201196735599997,2.8009246152166006e-10]],samples=1000000)
x = np.array([a[0] for a in posterior])
y = np.array([a[1] for a in posterior])
fig, ax = plt.subplots()
hb = ax.hexbin(x, y, cmap=cm.jet)
ax.axis([625.0, 725.0, 0.95, 1.20])
ax.set_xlabel('Bonded force constant - (kcal/mol/A^2)')
ax.set_ylabel('Equilibrium bond length - (A)')
ax.set_title('Frequency of parameter combinations sampled from posterior distribution')
cb = fig.colorbar(hb, ax=ax)
cb.set_label('Frequency')
plt.savefig('C-H_2D_posterior.png')
#------------------------------------------------------------------
|
bmanubay/open-forcefield-tools
|
single-molecule-property-generation/graph_bl.py
|
Python
|
mit
| 17,095
|
[
"Gaussian"
] |
df1d3c31ddf5b40e0245f89e0a44cd6d81d77df54b06ecea49614e3446d132f5
|
import os
import netCDF4 as nc
import numpy
import pyresample
from satistjenesten import io
from satistjenesten import image
# Steps for the IO implementation
@given(u'we process mitiff file {input_filepath}')
def step_impl(context, input_filepath):
context.input_filepath = input_filepath
context.scene = io.load_mitiff(input_filepath)
assert context.scene.area_def is not None
@then(u'export a geotiff file {output_filepath}')
def step_impl(context, output_filepath):
context.scene.save_geotiff(output_filepath, cmap='istjenesten')
@given(u'we process netcdf file {input_filepath}')
def step_impl(context, input_filepath):
context.scene = io.load_netcdf(input_filepath, bands = ['ct_n90_OSISAF_corrNASA_wWF'])
@given(u'we process GeoTIFF file {input_filepath}')
def step_impl(context, input_filepath):
context.scene = io.load_geotiff(input_filepath, bands = ['1'])
assert context.scene.bands[1].data.any() > 0
@then(u'resample to {area_name}')
def step_impl(context, area_name):
area = pyresample.utils.load_area('areas.cfg', 'istjenesten_main_4k')
context.scene = context.scene.resample_to_area(area, resample_method='nn')
@then(u'export an image {image_filepath} with graphics')
def step_impl(context, image_filepath):
context.scene.compose_rgb_image([1, 2, 3])
context.scene.add_coastlines_graticules_to_image()
context.scene.add_caption_to_image(u'Barents sea')
context.scene.save_image(image_filepath)
context.scene.save_reduced_jpeg('/tmp/barents.jpg', 100)
|
mitkin/satistjenesten
|
features/steps/steps.py
|
Python
|
mit
| 1,531
|
[
"NetCDF"
] |
cac3c6c02b1addf3b4bbfc77dbaf364dd1f2746c16756bd6ce94097e0e51317d
|
""" DIRAC.Resources.Storage package """
|
DIRACGrid/DIRAC
|
src/DIRAC/Resources/Storage/__init__.py
|
Python
|
gpl-3.0
| 40
|
[
"DIRAC"
] |
5068bfac7fb45974f6b786af9bd399184d057851dcfad0eae5f2c6390ee35f59
|
#pylint: disable=C0111
#pylint: disable=W0621
from lettuce import world, step
from lettuce.django import django_url
from course_modes.models import CourseMode
from nose.tools import assert_equal
def create_cert_course():
world.clear_courses()
org = 'edx'
number = '999'
name = 'Certificates'
course_id = '{org}/{number}/{name}'.format(
org=org, number=number, name=name)
world.scenario_dict['course_id'] = course_id
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org=org, number=number, display_name=name)
audit_mode = world.CourseModeFactory.create(
course_id=course_id,
mode_slug='audit',
mode_display_name='audit course',
min_price=0,
)
assert isinstance(audit_mode, CourseMode)
verfied_mode = world.CourseModeFactory.create(
course_id=course_id,
mode_slug='verified',
mode_display_name='verified cert course',
min_price=16,
suggested_prices='32,64,128',
currency='usd',
)
assert isinstance(verfied_mode, CourseMode)
def register():
url = 'courses/{org}/{number}/{name}/about'.format(
org='edx', number='999', name='Certificates')
world.browser.visit(django_url(url))
world.css_click('section.intro a.register')
assert world.is_css_present('section.wrapper h3.title')
@step(u'the course has an honor mode')
def the_course_has_an_honor_mode(step):
create_cert_course()
honor_mode = world.CourseModeFactory.create(
course_id=world.scenario_dict['course_id'],
mode_slug='honor',
mode_display_name='honor mode',
min_price=0,
)
assert isinstance(honor_mode, CourseMode)
@step(u'I select the audit track$')
def select_the_audit_track(step):
create_cert_course()
register()
btn_css = 'input[value="Select Audit"]'
world.wait(1) # TODO remove this after troubleshooting JZ
world.css_find(btn_css)
world.css_click(btn_css)
def select_contribution(amount=32):
radio_css = 'input[value="{}"]'.format(amount)
world.css_click(radio_css)
assert world.css_find(radio_css).selected
@step(u'I select the verified track$')
def select_the_verified_track(step):
create_cert_course()
register()
select_contribution(32)
world.wait_for_ajax_complete()
btn_css = 'input[value="Select Certificate"]'
world.css_click(btn_css)
assert world.is_css_present('section.progress')
@step(u'I should see the course on my dashboard$')
def should_see_the_course_on_my_dashboard(step):
course_css = 'li.course-item'
assert world.is_css_present(course_css)
@step(u'I go to step "([^"]*)"$')
def goto_next_step(step, step_num):
btn_css = {
'1': '#face_next_button',
'2': '#face_next_link',
'3': '#photo_id_next_link',
'4': '#pay_button',
}
next_css = {
'1': 'div#wrapper-facephoto.carousel-active',
'2': 'div#wrapper-idphoto.carousel-active',
'3': 'div#wrapper-review.carousel-active',
'4': 'div#wrapper-review.carousel-active',
}
world.css_click(btn_css[step_num])
# Pressing the button will advance the carousel to the next item
# and give the wrapper div the "carousel-active" class
assert world.css_find(next_css[step_num])
@step(u'I capture my "([^"]*)" photo$')
def capture_my_photo(step, name):
# Hard coded red dot image
image_data = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg=='
snapshot_script = "$('#{}_image')[0].src = '{}';".format(name, image_data)
# Mirror the javascript of the photo_verification.html page
world.browser.execute_script(snapshot_script)
world.browser.execute_script("$('#{}_capture_button').hide();".format(name))
world.browser.execute_script("$('#{}_reset_button').show();".format(name))
world.browser.execute_script("$('#{}_approve_button').show();".format(name))
assert world.css_find('#{}_approve_button'.format(name))
@step(u'I approve my "([^"]*)" photo$')
def approve_my_photo(step, name):
button_css = {
'face': 'div#wrapper-facephoto li.control-approve',
'photo_id': 'div#wrapper-idphoto li.control-approve',
}
wrapper_css = {
'face': 'div#wrapper-facephoto',
'photo_id': 'div#wrapper-idphoto',
}
# Make sure that the carousel is in the right place
assert world.css_has_class(wrapper_css[name], 'carousel-active')
assert world.css_find(button_css[name])
# HACK: for now don't bother clicking the approve button for
# id_photo, because it is sending you back to Step 1.
# Come back and figure it out later. JZ Aug 29 2013
if name=='face':
world.css_click(button_css[name])
# Make sure you didn't advance the carousel
assert world.css_has_class(wrapper_css[name], 'carousel-active')
@step(u'I select a contribution amount$')
def select_contribution_amount(step):
select_contribution(32)
@step(u'I confirm that the details match$')
def confirm_details_match(step):
# First you need to scroll down on the page
# to make the element visible?
# Currently chrome is failing with ElementNotVisibleException
world.browser.execute_script("window.scrollTo(0,1024)")
cb_css = 'input#confirm_pics_good'
world.css_click(cb_css)
assert world.css_find(cb_css).checked
@step(u'I am at the payment page')
def at_the_payment_page(step):
assert world.css_find('input[name=transactionSignature]')
@step(u'I submit valid payment information$')
def submit_payment(step):
# First make sure that the page is done if it still executing
# an ajax query.
world.wait_for_ajax_complete()
button_css = 'input[value=Submit]'
world.css_click(button_css)
@step(u'I have submitted face and ID photos$')
def submitted_face_and_id_photos(step):
step.given('I am logged in')
step.given('I select the verified track')
step.given('I go to step "1"')
step.given('I capture my "face" photo')
step.given('I approve my "face" photo')
step.given('I go to step "2"')
step.given('I capture my "photo_id" photo')
step.given('I approve my "photo_id" photo')
step.given('I go to step "3"')
@step(u'I have submitted photos to verify my identity')
def submitted_photos_to_verify_my_identity(step):
step.given('I have submitted face and ID photos')
step.given('I select a contribution amount')
step.given('I confirm that the details match')
step.given('I go to step "4"')
@step(u'I see that my payment was successful')
def see_that_my_payment_was_successful(step):
title = world.css_find('div.wrapper-content-main h3.title')
assert_equal(title.text, u'Congratulations! You are now verified on edX.')
@step(u'I navigate to my dashboard')
def navigate_to_my_dashboard(step):
world.css_click('span.avatar')
assert world.css_find('section.my-courses')
@step(u'I see the course on my dashboard')
def see_the_course_on_my_dashboard(step):
course_link_css = 'section.my-courses a[href*="edx/999/Certificates"]'
assert world.is_css_present(course_link_css)
@step(u'I see that I am on the verified track')
def see_that_i_am_on_the_verified_track(step):
id_verified_css = 'li.course-item article.course.verified'
assert world.is_css_present(id_verified_css)
@step(u'I leave the flow and return$')
def leave_the_flow_and_return(step):
world.visit('verify_student/verified/edx/999/Certificates')
@step(u'I am at the verified page$')
def see_the_payment_page(step):
assert world.css_find('button#pay_button')
@step(u'I edit my name$')
def edit_my_name(step):
btn_css = 'a.retake-photos'
world.css_click(btn_css)
@step(u'I give a reason why I cannot pay$')
def give_a_reason_why_i_cannot_pay(step):
register()
link_css = 'h5 i.expandable-icon'
world.css_click(link_css)
cb_css = 'input#honor-code'
world.css_click(cb_css)
text_css = 'li.field-explain textarea'
world.css_find(text_css).type('I cannot afford it.')
btn_css = 'input[value="Select Certificate"]'
world.css_click(btn_css)
|
abo-abo/edx-platform
|
lms/djangoapps/courseware/features/certificates.py
|
Python
|
agpl-3.0
| 8,213
|
[
"VisIt"
] |
e74a1509ccc12b88660de0c6fe62c1a031ac06bc52b9acf7dbb022f0a0f93fa3
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
AddressBookListPage
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import FULLCLEAR
LOG = logging.getLogger(".NarrativeWeb")
_ = glocale.translation.sgettext
getcontext().prec = 8
class AddressBookListPage(BasePage):
"""
Create the index for addresses.
"""
def __init__(self, report, title, has_url_addr_res):
"""
@param: report -- The instance of the main report class
for this report
@param: title -- Is the title of the web page
@param: has_url_addr_res -- The url, address and residence to use
for the report
"""
BasePage.__init__(self, report, title)
# Name the file, and create it
output_file, sio = self.report.create_file("addressbook")
# Add xml, doctype, meta and stylesheets
addressbooklistpage, head, body = self.write_header(_("Address Book"))
# begin AddressBookList division
with Html("div", class_="content",
id="AddressBookList") as addressbooklist:
body += addressbooklist
# Address Book Page message
msg = _("This page contains an index of all the individuals in "
"the database, sorted by their surname, with one of the "
"following: Address, Residence, or Web Links. "
"Selecting the person’s name will take you "
"to their individual Address Book page.")
addressbooklist += Html("p", msg, id="description")
# begin Address Book table
with Html("table",
class_="infolist primobjlist addressbook") as table:
addressbooklist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow.extend(
Html("th", label, class_=colclass, inline=True)
for (label, colclass) in [
[" ", "ColumnRowLabel"],
[_("Full Name"), "ColumnName"],
[_("Address"), "ColumnAddress"],
[_("Residence"), "ColumnResidence"],
[_("Web Links"), "ColumnWebLinks"]
]
)
tbody = Html("tbody")
table += tbody
index = 1
for (sort_name, person_handle,
has_add, has_res,
has_url) in has_url_addr_res:
address = None
residence = None
weblinks = None
# has address but no residence event
if has_add and not has_res:
address = "X"
# has residence, but no addresses
elif has_res and not has_add:
residence = "X"
# has residence and addresses too
elif has_add and has_res:
address = "X"
residence = "X"
# has Web Links
if has_url:
weblinks = "X"
trow = Html("tr")
tbody += trow
trow.extend(
Html("td", data or " ", class_=colclass,
inline=True)
for (colclass, data) in [
["ColumnRowLabel", index],
["ColumnName",
self.addressbook_link(person_handle)],
["ColumnAddress", address],
["ColumnResidence", residence],
["ColumnWebLinks", weblinks]
]
)
index += 1
# Add footer and clearline
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send the page out for processing
# and close the file
self.xhtml_writer(addressbooklistpage, output_file, sio, 0)
|
jralls/gramps
|
gramps/plugins/webreport/addressbooklist.py
|
Python
|
gpl-2.0
| 6,358
|
[
"Brian"
] |
a5fda53318480dd2c7f93a4f2a555dd6bf3e937be25ca65e7b078aa352058fac
|
import numpy
from pyproj import Proj
import click
import rasterio
from rasterio.crs import CRS
from rasterio.warp import reproject
from rasterio.enums import Resampling
from trefoil.netcdf.crs import get_crs
from trefoil.netcdf.utilities import copy_variable, copy_dimension, get_fill_value
from trefoil.netcdf.variable import SpatialCoordinateVariables
def warp_array(
arr,
src_crs,
src_transform,
dst_crs,
dst_transform,
dst_shape,
resampling=Resampling.nearest):
"""
Warp a 2D array using rasterio, always returning a masked array.
A fill_value will be chosen from the array's data type if the input is not a masked array (beware conflicts with
valid values!)
All nodata values are filled in prior to warping, and masked back out later if necessary.
:param dst_shape: shape of destination array
All other parameters are the same as for rasterio.warp.reproject
"""
with rasterio.Env():
orig_dtype = arr.dtype
if arr.dtype == numpy.int8:
# Have to upcast for rasterio
arr = arr.astype('int16')
out = numpy.empty(shape=dst_shape, dtype=arr.dtype)
fill = get_fill_value(arr.dtype)
if hasattr(arr, 'fill_value'):
fill = arr.fill_value
arr = numpy.ma.filled(arr, arr.fill_value)
reproject(
arr,
out,
src_crs=src_crs,
src_transform=src_transform,
dst_crs=dst_crs,
dst_transform=dst_transform,
resampling=resampling,
src_nodata=fill,
dst_nodata=fill
)
if out.dtype != orig_dtype:
out = out.astype(orig_dtype)
out = numpy.ma.masked_array(out, mask=out == fill)
return out
def warp_like(ds, ds_projection, variables, out_ds, template_ds, template_varname, resampling=Resampling.nearest):
"""
Warp one or more variables in a NetCDF file based on the coordinate reference system and
spatial domain of a template NetCDF file.
:param ds: source dataset
:param ds_projection: source dataset coordiante reference system, proj4 string or EPSG:NNNN code
:param variables: list of variable names in source dataset to warp
:param out_ds: output dataset. Must be opened in write or append mode.
:param template_ds: template dataset
:param template_varname: variable name for template data variable in template dataset
:param resampling: resampling method. See rasterio.enums.Resampling for options
"""
template_variable = template_ds.variables[template_varname]
template_prj = Proj(get_crs(template_ds, template_varname))
template_mask = template_variable[:].mask
template_y_name, template_x_name = template_variable.dimensions[-2:]
template_coords = SpatialCoordinateVariables.from_dataset(
template_ds,
x_name=template_x_name,
y_name=template_y_name,
projection=template_prj
)
# template_geo_bbox = template_coords.bbox.project(ds_prj, edge_points=21) # TODO: add when needing to subset
ds_y_name, ds_x_name = ds.variables[variables[0]].dimensions[-2:]
proj = Proj(init=ds_projection) if 'EPSG:' in ds_projection.upper() else Proj(str(ds_projection))
ds_coords = SpatialCoordinateVariables.from_dataset(ds, x_name=ds_x_name, y_name=ds_y_name, projection=proj)
with rasterio.Env():
# Copy dimensions for variable across to output
for dim_name in template_variable.dimensions:
if not dim_name in out_ds.dimensions:
if dim_name in template_ds.variables and not dim_name in out_ds.variables:
copy_variable(template_ds, out_ds, dim_name)
else:
copy_dimension(template_ds, out_ds, dim_name)
for variable_name in variables:
click.echo('Processing: {0}'.format(variable_name))
variable = ds.variables[variable_name]
fill_value = getattr(variable, '_FillValue', variable[0, 0].fill_value)
for dim_name in variable.dimensions[:-2]:
if not dim_name in out_ds.dimensions:
if dim_name in ds.variables:
copy_variable(ds, out_ds, dim_name)
else:
copy_dimension(ds, out_ds, dim_name)
out_var = out_ds.createVariable(
variable_name,
variable.dtype,
dimensions=variable.dimensions[:-2] + template_variable.dimensions,
fill_value=fill_value
)
reproject_kwargs = {
'src_transform': ds_coords.affine,
'src_crs': CRS.from_string(ds_projection),
'dst_transform': template_coords.affine,
'dst_crs': template_prj.srs,
'resampling': resampling,
'src_nodata': fill_value,
'dst_nodata': fill_value,
'threads': 4
}
# TODO: may only need to select out what is in window
if len(variable.shape) == 3:
idxs = range(variable.shape[0])
with click.progressbar(idxs) as bar:
for i in bar:
# print('processing slice: {0}'.format(i))
data = variable[i, :]
out = numpy.ma.empty(template_coords.shape, dtype=data.dtype)
out.mask = template_mask
out.fill(fill_value)
reproject(data, out, **reproject_kwargs)
out_var[i, :] = out
else:
data = variable[:]
out = numpy.ma.empty(template_coords.shape, dtype=data.dtype)
out.mask = template_mask
out.fill(fill_value)
reproject(data, out, **reproject_kwargs)
out_var[:] = out
|
consbio/clover
|
trefoil/netcdf/warp.py
|
Python
|
bsd-3-clause
| 5,979
|
[
"NetCDF"
] |
36652e7425fb45c28f71b7f4352e9d6d59d0958fdfe8b27eba0b1e65417916c2
|
import os
import pymc3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def hdi_of_mcmc(sample_vec, cred_mass=0.95):
"""
Highest density interval of sample.
"""
assert len(sample_vec), 'need points to find HDI'
sorted_pts = np.sort(sample_vec)
ci_idx_inc = int(np.floor(cred_mass * len(sorted_pts)))
n_cis = len(sorted_pts) - ci_idx_inc
ci_width = sorted_pts[ci_idx_inc:] - sorted_pts[:n_cis]
min_idx = np.argmin(ci_width)
hdi_min = sorted_pts[min_idx]
hdi_max = sorted_pts[min_idx + ci_idx_inc]
return hdi_min, hdi_max
data_dir = '/home/ilya/Dropbox/petya'
data_file = 'Total_rate_vs_Years.txt'
df = pd.read_table(os.path.join(data_dir, data_file), delim_whitespace=True,
names=['exper', 'band', 'date', 'time', 'st1', 'st2',
'rate_off1', 'rate_off2', 'total_rate', 'snr'])
df['datetime'] = pd.to_datetime(df['date'] + ' ' + df['time'])
# Fit changepoint problem
# Convert datetime to timedelta
df['timedelta'] = df['datetime'] - sorted(df['datetime'])[0]
df['timedelta'] = [int(dt.days) for dt in df['timedelta']]
# FIXME: Allow each station can have nonzero mean. Check it for both parts. Use
# hierarchical model.
with pymc3.Model() as model:
# Prior for dof
nu = pymc3.Exponential('nu', 0.1)
# Prior for distribution of switchpoint location
switchpoint = pymc3.DiscreteUniform('switchpoint', lower=0,
upper=max(df['timedelta']))
# Priors for pre- and post-switch std
early_std = pymc3.Exponential('early_std', lam=1.)
late_std = pymc3.Exponential('late_std', lam=1.)
# Allocate appropriate gaussian std to years before and after current
# switchpoint location
idx = np.arange(len(df['timedelta']))
std = pymc3.switch(switchpoint >= idx, early_std, late_std)
# Data likelihood
total_rates = pymc3.T('total_rates', nu=nu, mu=0., lam=std,
observed=3. * 10 ** 10 * df['total_rate'])
with model:
# Initial values for stochastic nodes
start = {'early_std': 3., 'late_std': 0.5, 'nu': 3}
# Use slice sampler for means
step1 = pymc3.Slice([nu, early_std, late_std])
# Use Metropolis for switchpoint, since it accomodates discrete variables
step2 = pymc3.Metropolis([switchpoint])
tr = pymc3.sample(10000, tune=500, start=start, step=[step1, step2])
pymc3.traceplot(tr[1000:])
plt.savefig('mcmc.png', bbox_inches='tight')
# plot one plot for all stations
ground_stations = set(df['st2'])
colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
# Plot several subplots for each ground stations
fig, axes = plt.subplots(nrows=len(ground_stations), ncols=1, sharex=True,
sharey=True)
fig.set_size_inches(18.5, 18.5)
plt.rcParams.update({'axes.titlesize': 'medium'})
for i, ground_station in enumerate(ground_stations):
print i
axes[i].plot(df[df['st2'] == ground_station]['datetime'],
df[df['st2'] == ground_station]['total_rate'] * 3. * 10 ** 10,
'.{}'.format(colors[i]), label=ground_station)
axes[i].legend(prop={'size': 11}, loc='best', fancybox=True,
framealpha=0.5)
fig.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i, ground_station in enumerate(ground_stations):
ax.plot(df[df['st2'] == ground_station]['datetime'],
df[df['st2'] == ground_station]['total_rate'] * 3. * 10 ** 10,
'.{}'.format(colors[i]), label=ground_station)
plt.legend()
low, high = hdi_of_mcmc(tr.get_values('switchpoint')[1000:])
plt.axvline(sorted(df['datetime'])[0] + pd.Timedelta('{} days'.format(low)))
plt.axvline(sorted(df['datetime'])[0] + pd.Timedelta('{} days'.format(high)))
plt.gcf().autofmt_xdate()
fig.show()
fig.savefig('res_rate_switchpoint_95perc.png', bbox_inches='tight')
|
ipashchenko/ra_orbit
|
changepoint.py
|
Python
|
mit
| 3,862
|
[
"Gaussian"
] |
0ba3265736feab7e8b73210e2459911856e7462c08331f34838655897ba4047c
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import math
import random
import time
from itertools import chain
from deap import base
from deap import creator
# from deap import benchmarks
import fgeneric
import bbobbenchmarks as bn
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode="d", fitness=creator.FitnessMin)
def update(individual, mu, sigma):
"""Update the current *individual* with values from a gaussian centered on
*mu* and standard deviation *sigma*.
"""
for i, mu_i in enumerate(mu):
individual[i] = random.gauss(mu_i, sigma)
def tupleize(func):
"""A decorator that tuple-ize the result of a function. This is useful
when the evaluation function returns a single value.
"""
def wrapper(*args, **kargs):
return func(*args, **kargs),
return wrapper
def main(func, dim, maxfuncevals, ftarget=None):
toolbox = base.Toolbox()
toolbox.register("update", update)
toolbox.register("evaluate", func)
toolbox.decorate("evaluate", tupleize)
# Create the desired optimal function value as a Fitness object
# for later comparison
opt = creator.FitnessMin((ftarget,))
# Interval in which to initialize the optimizer
interval = -5, 5
sigma = (interval[1] - interval[0]) / 2.0
alpha = 2.0 ** (1.0 / dim)
# Initialize best randomly and worst as a place holder
best = creator.Individual(random.uniform(interval[0], interval[1]) for _ in range(dim))
worst = creator.Individual([0.0] * dim)
# Evaluate the first individual
best.fitness.values = toolbox.evaluate(best)
# Evolve until ftarget is reached or the number of evaluation
# is exausted (maxfuncevals)
for g in range(1, maxfuncevals):
toolbox.update(worst, best, sigma)
worst.fitness.values = toolbox.evaluate(worst)
if best.fitness <= worst.fitness:
# Incease mutation strength and swap the individual
sigma = sigma * alpha
best, worst = worst, best
else:
# Decrease mutation strength
sigma = sigma * alpha**(-0.25)
# Test if we reached the optimum of the function
# Remember that ">" for fitness means better (not greater)
if best.fitness > opt:
return best
return best
if __name__ == "__main__":
# Maximum number of restart for an algorithm that detects stagnation
maxrestarts = 1000
# Create a COCO experiment that will log the results under the
# ./output directory
e = fgeneric.LoggingFunction("output")
# Iterate over all desired test dimensions
for dim in (2, 3, 5, 10, 20, 40):
# Set the maximum number function evaluation granted to the algorithm
# This is usually function of the dimensionality of the problem
maxfuncevals = 100 * dim**2
minfuncevals = dim + 2
# Iterate over a set of benchmarks (noise free benchmarks here)
for f_name in bn.nfreeIDs:
# Iterate over all the instance of a single problem
# Rotation, translation, etc.
for instance in chain(range(1, 6), range(21, 31)):
# Set the function to be used (problem) in the logger
e.setfun(*bn.instantiate(f_name, iinstance=instance))
# Independent restarts until maxfunevals or ftarget is reached
for restarts in range(0, maxrestarts + 1):
if restarts > 0:
# Signal the experiment that the algorithm restarted
e.restart('independent restart') # additional info
# Run the algorithm with the
# remaining number of evaluations
revals = int(math.ceil(maxfuncevals - e.evaluations))
main(e.evalfun, dim, revals, e.ftarget)
# Stop if ftarget is reached
if e.fbest < e.ftarget or e.evaluations + minfuncevals > maxfuncevals:
break
e.finalizerun()
print('f%d in %d-D, instance %d: FEs=%d with %d restarts, '
'fbest-ftarget=%.4e'
% (f_name, dim, instance, e.evaluations, restarts,
e.fbest - e.ftarget))
print('date and time: %s' % time.asctime())
|
PyQuake/earthquakemodels
|
code/cocobbob/coco/deapbbob/deapbbob.py
|
Python
|
bsd-3-clause
| 5,055
|
[
"Gaussian"
] |
d8592c13c42dc724bc6be06d26bdd4c875cff32ced3761cf27cabefe60b6d1c6
|
# The following is a Python translation of a MATLAB file originally written principally by Mike Tipping
# as part of his SparseBayes software library. Initially published on GitHub on July 21st, 2015.
# SB2_PARAMETERSETTINGS User parameter initialisation for SPARSEBAYES
#
# SETTINGS = SB2_PARAMETERSETTINGS(parameter1, value1, parameter2, value2,...)
#
# OUTPUT ARGUMENTS:
#
# SETTINGS An initialisation structure to pass to SPARSEBAYES
#
# INPUT ARGUMENTS:
#
# Optional number of parameter-value pairs to specify some, all, or
# none of the following:
#
# BETA (Gaussian) noise precision (inverse variance)
# NOISESTD (Gaussian) noise standard deviation
# RELEVANT Indices of columns of basis matrix to use at start-up
# MU (WEIGHTS) Corresponding vector of weights to RELEVANT
# ALPHA Corresponding vector of hyperparameter values to RELEVANT
#
# EXAMPLE:
#
# SETTINGS = SB2_ParameterSettings('NoiseStd',0.1)
#
# NOTES:
#
# 1. If no input arguments are supplied, defaults (effectively an
# empty structure) will be returned.
#
# 2. If both BETA and NOISESTD are specified, BETA will take
# precedence.
#
# 3. RELEVANT may be specified without WEIGHTS or ALPHA (these will be
# sensibly initialised later).
#
# 4. If RELEVANT is specified, WEIGHTS may be specified also without ALPHA.
#
#
# Copyright 2009, Vector Anomaly Ltd
#
# This file is part of the SPARSEBAYES library for Matlab (V2.0).
#
# SPARSEBAYES is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# SPARSEBAYES is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with SPARSEBAYES in the accompanying file "licence.txt"; if not, write to
# the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
# MA 02110-1301 USA
#
# Contact the author: m a i l [at] m i k e t i p p i n g . c o m
#
def SB2_ParameterSettings(*args):
# Ensure arguments are supplied in pairs
if len(args) % 2 != 0:
raise Exception('Arguments to SB2_ParameterSettings should be (property, value) pairs')
# Any settings specified?
numSettings = len(args)/2
## Defaults - over-ridden later if requested
# Two options for setting noise level (purely for convenience)
# - if 'beta' set, 'noiseStdDev' will be over-ridden
SETTINGS = {
'BETA' : [],
'NOISESTD' : [],
'RELEVANT' : [],
'MU' : [],
'ALPHA' : []
}
## Requested overrides
# Parse string/variable pairs
for n in range(numSettings):
property_ = args[n*2]
value = args[n*2 + 1]
if property_ not in SETTINGS:
raise Exception('Unrecognised initialisation property: {0}'.format(property_))
else:
SETTINGS[property_] = value
return SETTINGS
|
jhallock7/SparseBayes-Python
|
SB2_ParameterSettings.py
|
Python
|
gpl-2.0
| 3,373
|
[
"Gaussian"
] |
e261417b78543776dccfa97cdb89813dc4849fb4e7a89d83dfd0577056f5ef43
|
from pylab import *
import json
nodes = [
{
"fileName": "generators/PoissonGenerator.qml",
"label": "",
"x": 100,
"y": 100,
"engine": {
"fireOutput": 0.0001,
"rate": 300
}
},
# {
# "fileName":"meters/RasterPlot.qml",
# "label":"",
# "x":96,
# "y":-400,
# "height":800,
# "showLegend":True,
# "timeRange":0.10000000000000002,
# "width":1200
# },
{
"fileName":"generators/ACClamp.qml",
"label":"",
"x":0,
"y":0,
"engine":{
"amplitude":0.00001,
"frequency":40,
"time":0.10300000000000192
}
},
{"fileName":"meters/Speaker.qml","label":"","x":267.7179788505151,"y":0.1856258160838,"source":"glass.wav"},
]
edges = [{
"from": 0,
"to": 1
}]
column_count = 64
offset_count = len(nodes)
neuron_count = 512
poisson_index = 0
ac_index = 1
speaker_index = 2
for i in range(neuron_count):
# poisson_index = offset_count + 2 * i
neuron_index = offset_count + i
# if not (i % neuron_count / 100):
# edge_neuron_main = {
# "from": neuron_index,
# "to": 1
# }
# edges.append(edge_neuron_main)
edge_poisson_neuron = {
"from": poisson_index,
"to": neuron_index
}
edge_ac_neuron = {
"from": ac_index,
"to": neuron_index
}
edge_speaker_neuron = {
"from": neuron_index,
"to": speaker_index
}
edges.append(edge_poisson_neuron)
edges.append(edge_ac_neuron)
edges.append(edge_speaker_neuron)
factor = 1.0
if rand() < 0.5:
factor = -1.0
fire_output = factor * 0.00001
neuron = {
"fileName": "neurons/PassiveNeuron.qml",
"label": "",
"x": 100 + 64 * (i % column_count),
"y": 500 + 200 * floor(i / column_count),
"engine": {
"capacitance": 0.000001001,
"fireOutput": fire_output,
"initialPotential": -0.08,
"restingPotential": -0.065,
"synapticConductance": 0,
"synapticPotential": 0.04999999999999999,
"synapticTimeConstant": 0.01,
"threshold": (2.0 * rand() - 1.0) * 30e-3,
"voltage": -0.0763497668807441
},
"resistance": 10000,
}
# nodes.append(poisson)
nodes.append(neuron)
data = {
"edges": edges,
"nodes": nodes,
"fileFormatVersion": 2,
"workspace": {
"playbackSpeed": 2,
"visibleRectangle": {
"height": 778.8299070684317,
"width": 1372.8527175443542,
"x": -177.09803541776617,
"y": 179.8000536767801
}
}
}
latest_file = open("/home/svenni/.config/Ovilab/Neuronify/latest.nfy", "w")
latest_file.write(json.dumps(data))
|
dragly/hodgkin-huxley
|
tools/generate_massive.py
|
Python
|
gpl-3.0
| 2,939
|
[
"NEURON"
] |
68ce105b400b3ea65a0c3abb2033c4bdf8d5b12d97f5caf801b92feb8d678be0
|
import os
import itertools
import numpy as np
from scipy.optimize import differential_evolution
from sklearn.decomposition import PCA
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import cross_val_score
import matplotlib.backends.backend_pdf
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
def kernel_smoothing(data, optimize=False):
"""Create gaussian kernel."""
n_sample, dim = data.shape
cv = n_sample if n_sample < 50 else 50
if optimize:
def bw_score(bw):
score = cross_val_score(KernelDensity(bandwidth=bw), data, cv=cv, n_jobs=-1)
return - score.mean()
bounds = [(0., 5.)]
results = differential_evolution(bw_score, bounds)
bw = results.x
ks_gaussian = KernelDensity(bandwidth=bw)
ks_gaussian.fit(data)
else:
scott = n_sample **(-1./(dim + 4))
silverman = (n_sample * (dim + 2) / 4.) ** (-1. / (dim + 4))
bandwidth = np.hstack([np.linspace(0.1, 5.0, 30), scott, silverman])
grid = GridSearchCV(KernelDensity(),
{'bandwidth': bandwidth},
cv=cv, n_jobs=-1) # 20-fold cross-validation
grid.fit(data)
ks_gaussian = grid.best_estimator_
return ks_gaussian
def hdr_boxplot(data, x_common=None, path=None, variance=0.8, alpha=[],
threshold=0.95, outliers='kde', optimize=False,
n_contours=50, xlabel='t', ylabel='y'):
"""High Density Region boxplot.
Using the dataset :attr:`data`:
1. Compute a 2D kernel smoothing with a Gaussian kernel,
2. Compute contour lines for quartiles 90, 50 and :attr:`alpha`,
3. Plot the bivariate plot,
4. Compute mediane curve along with quartiles and outliers.
:param np.array data: dataset (n_samples, n_features)
:param list(float) x_common: abscissa
:param float variance: percentage of total variance to conserve
:param list(float) alpha: extra contour values
:param float threshold: threshold for outliers
:param str outliers: detection method ['kde', 'forest']
:param bool optimize: bandwidth global optimization or grid search
:param int n_contours: discretization to compute contour
:param str xlabel: label for x axis
:param str ylabel: label for y axis
:returns: mediane curve along with 50%, 90% quartile (inf and sup curves)
and outliers.
:rtypes: np.array, list(np.array), np.array
"""
n_sample, dim = data.shape
# PCA and bivariate plot
pca = PCA(n_components=variance, svd_solver='full')
data_r = pca.fit_transform(data)
n_components = len(pca.explained_variance_ratio_)
print('Explained variance ratio: {} -> {}'
.format(pca.explained_variance_ratio_,
np.sum(pca.explained_variance_ratio_)))
# Create gaussian kernel
ks_gaussian = kernel_smoothing(data_r, optimize)
# Evaluate density on a regular grid
min_max = np.array([data_r.min(axis=0), data_r.max(axis=0)]).T
contour_grid = np.meshgrid(*[np.linspace(*min_max[i], n_contours)
for i in range(n_components)])
contour_stack = np.dstack(contour_grid).reshape(-1, n_components)
pdf = np.exp(ks_gaussian.score_samples(contour_stack)).flatten()
# Compute contour line of pvalue linked to a given probability level
alpha.extend([threshold, 0.9, 0.5]) # 0.001
alpha = list(set(alpha))
alpha.sort(reverse=True)
print('alpha: ', alpha)
n_contour_lines = len(alpha)
pdf_r = np.exp(ks_gaussian.score_samples(data_r)).flatten()
pvalues = [np.percentile(pdf_r, (1 - alpha[i]) * 100, interpolation='linear')
for i in range(n_contour_lines)]
print('pvalues: ', pvalues)
# Find mean, quartiles and outliers curves
median_r = pdf.argmax()
median_r = contour_stack[median_r]
if outliers == 'kde':
outliers = np.where(pdf_r < pvalues[alpha.index(threshold)])
outliers = data_r[outliers]
elif outliers == 'forest':
forrest = IsolationForest(contamination=(1 - threshold), n_jobs=-1)
detector = forrest.fit(data_r)
outliers = np.where(detector.predict(data_r) == -1)
outliers = data_r[outliers]
else:
print('Unknown outlier method: no detection')
outliers = []
extreme_quartile = np.where((pdf > pvalues[alpha.index(0.9)])
& (pdf < pvalues[alpha.index(0.5)]))
extreme_quartile = contour_stack[extreme_quartile]
mean_quartile = np.where(pdf > pvalues[alpha.index(0.5)])
mean_quartile = contour_stack[mean_quartile]
extra_alpha = [i for i in alpha if 0.5 != i and 0.9 != i and threshold != i]
if extra_alpha != []:
extra_quartiles = []
for i in extra_alpha:
extra_quartile = np.where(pdf > pvalues[alpha.index(i)])
extra_quartile = contour_stack[extra_quartile]
extra_quartile = pca.inverse_transform(extra_quartile)
extra_quartiles.extend([extra_quartile.max(axis=0),
extra_quartile.min(axis=0)])
else:
extra_quartiles = None
# Inverse transform from bivariate plot to dataset
median = pca.inverse_transform(median_r)
outliers = pca.inverse_transform(outliers)
extreme_quartile = pca.inverse_transform(extreme_quartile)
mean_quartile = pca.inverse_transform(mean_quartile)
extreme_quartile = [extreme_quartile.max(axis=0), extreme_quartile.min(axis=0)]
mean_quartile = [mean_quartile.max(axis=0), mean_quartile.min(axis=0)]
# Plots
figures = []
if n_components == 2:
figures.append(plt.figure('2D Kernel Smoothing with Gaussian kernel'))
contour = plt.contour(*contour_grid,
pdf.reshape((n_contours, n_contours)), pvalues)
# contour = plt.contourf(*contour_grid,
# pdf.reshape((n_contours, n_contours)), 100)
# plt.colorbar(contour, shrink=0.8, extend='both')
# Labels: probability instead of density
fmt = {}
for i in range(n_contour_lines):
lev = contour.levels[i]
fmt[lev] = "%.0f %%" % (alpha[i] * 100)
plt.clabel(contour, contour.levels, inline=True, fontsize=10, fmt=fmt)
figures.append(plt.figure('Bivariate space'))
plt.tick_params(axis='both', labelsize=8)
for i, j in itertools.combinations_with_replacement(range(n_components), 2):
ax = plt.subplot2grid((n_components, n_components), (j, i))
ax.tick_params(axis='both', labelsize=(10 - n_components))
if i == j: # diag
x_plot = np.linspace(min(data_r[:, i]), max(data_r[:, i]), 100)[:, np.newaxis]
_ks = kernel_smoothing(data_r[:, i, np.newaxis], optimize)
ax.plot(x_plot, np.exp(_ks.score_samples(x_plot)))
elif i < j: # lower corners
ax.scatter(data_r[:, i], data_r[:, j], s=5, c='k', marker='o')
if i == 0:
ax.set_ylabel(str(j + 1))
if j == (n_components - 1):
ax.set_xlabel(str(i + 1))
figures.append(plt.figure('Time Serie'))
if x_common is None:
x_common = np.linspace(0, 1, dim)
plt.plot(np.array([x_common] * n_sample).T, data.T, alpha=.2)
plt.fill_between(x_common, *mean_quartile, color='gray', alpha=.4)
plt.fill_between(x_common, *extreme_quartile, color='gray', alpha=.4)
try:
plt.plot(np.array([x_common] * len(extra_quartiles)).T,
np.array(extra_quartiles).T, color='c', ls='-.', alpha=.4)
except TypeError:
pass
plt.plot(x_common, median, c='k')
try:
plt.plot(np.array([x_common] * len(outliers)).T, outliers.T,
c='r', alpha=0.7)
except ValueError:
print('It seems that there are no outliers...')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
try:
path = os.path.join(path, 'hdr_boxplot.pdf')
pdf = matplotlib.backends.backend_pdf.PdfPages(path)
for fig in figures:
fig.tight_layout()
pdf.savefig(fig, transparent=True, bbox_inches='tight')
pdf.close()
except TypeError:
plt.show()
plt.close('all')
return median, outliers, extreme_quartile, mean_quartile, extra_quartiles
|
tupui/HDR-Boxplot
|
hdr.py
|
Python
|
mit
| 8,452
|
[
"Gaussian"
] |
c04e7a19e75a3743393104821c8ab784063a2f26d1742eeb465498beda92c442
|
from easy.visitors.codegen import CodeGenVisitor
from easy.visitors.constant_folding import ConstantFoldingVisitor
from easy.visitors.pprint import PPrintVisitor
from easy.visitors.symbol_table import SymbolTableVisitor
class Compiler(object):
def __init__(self, ast):
self._ast = ast
def _do_pass(self, visitor):
return visitor.visit(self._ast)
def compile(self):
self._do_pass(SymbolTableVisitor(self))
self._do_pass(ConstantFoldingVisitor(self))
self._do_pass(PPrintVisitor(self))
output = self._do_pass(CodeGenVisitor(self))
return output
|
helgefmi/Easy
|
src/easy/compiler.py
|
Python
|
mit
| 616
|
[
"VisIt"
] |
e07e384a8f0b926daf75f4bbd54d8625db968cf1280d0e36070d2e6e8be37a9c
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cuisine',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128)),
('description', models.TextField()),
('location_lat', models.FloatField()),
('location_lon', models.FloatField()),
('smoking', models.BooleanField(default=False)),
('cuisine', models.ForeignKey(to='main.Cuisine')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.DateTimeField(auto_now_add=True)),
('rating', models.SmallIntegerField(null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
('place', models.ForeignKey(to='main.Place')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
|
FMI-B20/Yarr
|
main/migrations/0001_initial.py
|
Python
|
mit
| 1,995
|
[
"VisIt"
] |
9e429d828bc0c2ff41b54607cab87650c2c33cb4e678cd620175db6c32d61c6c
|
"""
Implementation of the Brunel 2000 network:
sparsely connected network of identical LIF neurons (Model A).
"""
# This file is part of the exercise code repository accompanying
# the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch)
# located at http://github.com/EPFL-LCN/neuronaldynamics-exercises.
# This free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License 2.0 as published by the
# Free Software Foundation. You should have received a copy of the
# GNU General Public License along with the repository. If not,
# see http://www.gnu.org/licenses/.
# Should you reuse and publish the code for your own purposes,
# please cite the book or point to the webpage http://neuronaldynamics.epfl.ch.
# Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski.
# Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition.
# Cambridge University Press, 2014.
import brian2 as b2
from brian2 import NeuronGroup, Synapses, PoissonInput
from brian2.monitors import StateMonitor, SpikeMonitor, PopulationRateMonitor
from random import sample
from neurodynex3.tools import plot_tools
from numpy import random
import matplotlib.pyplot as plt
# Default parameters of a single LIF neuron
V_REST = 0. * b2.mV
V_RESET = +10. * b2.mV
FIRING_THRESHOLD = +20. * b2.mV
MEMBRANE_TIME_SCALE = 20. * b2.ms
ABSOLUTE_REFRACTORY_PERIOD = 2.0 * b2.ms
# Default parameters of the network
SYNAPTIC_WEIGHT_W0 = 0.1 * b2.mV
# note: w_ee = w_ei = w0 and w_ie=w_ii = -g*w0
RELATIVE_INHIBITORY_STRENGTH_G = 4. # balanced
CONNECTION_PROBABILITY_EPSILON = 0.1
SYNAPTIC_DELAY = 1.5 * b2.ms
POISSON_INPUT_RATE = 13. * b2.Hz
N_POISSON_INPUT = 1000
b2.defaultclock.dt = 0.05 * b2.ms
def simulate_brunel_network(
N_Excit=5000,
N_Inhib=None,
N_extern=N_POISSON_INPUT,
connection_probability=CONNECTION_PROBABILITY_EPSILON,
w0=SYNAPTIC_WEIGHT_W0,
g=RELATIVE_INHIBITORY_STRENGTH_G,
synaptic_delay=SYNAPTIC_DELAY,
poisson_input_rate=POISSON_INPUT_RATE,
w_external=None,
v_rest=V_REST,
v_reset=V_RESET,
firing_threshold=FIRING_THRESHOLD,
membrane_time_scale=MEMBRANE_TIME_SCALE,
abs_refractory_period=ABSOLUTE_REFRACTORY_PERIOD,
monitored_subset_size=100,
random_vm_init=False,
sim_time=100.*b2.ms):
"""
Fully parametrized implementation of a sparsely connected network of LIF neurons (Brunel 2000)
Args:
N_Excit (int): Size of the excitatory popluation
N_Inhib (int): optional. Size of the inhibitory population.
If not set (=None), N_Inhib is set to N_excit/4.
N_extern (int): optional. Number of presynaptic excitatory poisson neurons. Note: if set to a value,
this number does NOT depend on N_Excit and NOT depend on connection_probability (this is different
from the book and paper. Only if N_extern is set to 'None', then N_extern is computed as
N_Excit*connection_probability.
connection_probability (float): probability to connect to any of the (N_Excit+N_Inhib) neurons
CE = connection_probability*N_Excit
CI = connection_probability*N_Inhib
Cexternal = N_extern
w0 (float): Synaptic strength J
g (float): relative importance of inhibition. J_exc = w0. J_inhib = -g*w0
synaptic_delay (Quantity): Delay between presynaptic spike and postsynaptic increase of v_m
poisson_input_rate (Quantity): Poisson rate of the external population
w_external (float): optional. Synaptic weight of the excitatory external poisson neurons onto all
neurons in the network. Default is None, in that case w_external is set to w0, which is the
standard value in the book and in the paper Brunel2000.
The purpose of this parameter is to see the effect of external input in the
absence of network feedback(setting w0 to 0mV and w_external>0).
v_rest (Quantity): Resting potential
v_reset (Quantity): Reset potential
firing_threshold (Quantity): Spike threshold
membrane_time_scale (Quantity): tau_m
abs_refractory_period (Quantity): absolute refractory period, tau_ref
monitored_subset_size (int): nr of neurons for which a VoltageMonitor is recording Vm
random_vm_init (bool): if true, the membrane voltage of each neuron is initialized with a
random value drawn from Uniform(v_rest, firing_threshold)
sim_time (Quantity): Simulation time
Returns:
(rate_monitor, spike_monitor, voltage_monitor, idx_monitored_neurons)
PopulationRateMonitor: Rate Monitor
SpikeMonitor: SpikeMonitor for ALL (N_Excit+N_Inhib) neurons
StateMonitor: membrane voltage for a selected subset of neurons
list: index of monitored neurons. length = monitored_subset_size
"""
if N_Inhib is None:
N_Inhib = int(N_Excit/4)
if N_extern is None:
N_extern = int(N_Excit*connection_probability)
if w_external is None:
w_external = w0
J_excit = w0
J_inhib = -g*w0
lif_dynamics = """
dv/dt = -(v-v_rest) / membrane_time_scale : volt (unless refractory)"""
network = NeuronGroup(
N_Excit+N_Inhib, model=lif_dynamics,
threshold="v>firing_threshold", reset="v=v_reset", refractory=abs_refractory_period,
method="linear")
if random_vm_init:
network.v = random.uniform(v_rest/b2.mV, high=firing_threshold/b2.mV, size=(N_Excit+N_Inhib))*b2.mV
else:
network.v = v_rest
excitatory_population = network[:N_Excit]
inhibitory_population = network[N_Excit:]
exc_synapses = Synapses(excitatory_population, target=network, on_pre="v += J_excit", delay=synaptic_delay)
exc_synapses.connect(p=connection_probability)
inhib_synapses = Synapses(inhibitory_population, target=network, on_pre="v += J_inhib", delay=synaptic_delay)
inhib_synapses.connect(p=connection_probability)
external_poisson_input = PoissonInput(target=network, target_var="v", N=N_extern,
rate=poisson_input_rate, weight=w_external)
# collect data of a subset of neurons:
monitored_subset_size = min(monitored_subset_size, (N_Excit+N_Inhib))
idx_monitored_neurons = sample(range(N_Excit+N_Inhib), monitored_subset_size)
rate_monitor = PopulationRateMonitor(network)
# record= some_list is not supported? :-(
spike_monitor = SpikeMonitor(network, record=idx_monitored_neurons)
voltage_monitor = StateMonitor(network, "v", record=idx_monitored_neurons)
b2.run(sim_time)
return rate_monitor, spike_monitor, voltage_monitor, idx_monitored_neurons
def getting_started():
"""
A simple example to get started
"""
rate_monitor, spike_monitor, voltage_monitor, monitored_spike_idx = simulate_brunel_network(
N_Excit=2000, sim_time=800. * b2.ms)
plot_tools.plot_network_activity(rate_monitor, spike_monitor, voltage_monitor,
spike_train_idx_list=monitored_spike_idx, t_min=0.*b2.ms,
N_highlighted_spiketrains=3, avg_window_width=1. * b2.ms)
plt.show()
def _demo_emergence_of_oscillation():
poisson_rate = 18 * b2.Hz
g = 2.5
rate_monitor, spike_monitor, voltage_monitor, monitored_spike_idx = \
simulate_brunel_network(N_Excit=6000, random_vm_init=True, poisson_input_rate=poisson_rate,
g=g, sim_time=300. * b2.ms, monitored_subset_size=50)
plot_tools.plot_network_activity(rate_monitor, spike_monitor, voltage_monitor,
spike_train_idx_list=monitored_spike_idx, t_min=0*b2.ms)
plot_tools.plot_network_activity(rate_monitor, spike_monitor, voltage_monitor,
spike_train_idx_list=monitored_spike_idx, t_max=50*b2.ms)
plot_tools.plot_network_activity(rate_monitor, spike_monitor, voltage_monitor,
spike_train_idx_list=monitored_spike_idx, t_min=250*b2.ms)
plt.show()
def _some_example_calls_and_tests():
from neurodynex3.tools import spike_tools
poisson_rate = 35*b2.Hz
g = 4
CE = 5000
delta_t = 0.1 * b2.ms
delta_f = 5. * b2.Hz
T_init = 100 * b2.ms
k = 9
f_max = 1./(2. * delta_t)
N_samples = 2. * f_max / delta_f
T_signal = N_samples * delta_t
T_sim = k * T_signal + T_init
print("Start simulation. T_sim={}, T_signal={}. N_samples={}".format(T_sim, T_signal, N_samples))
b2.defaultclock.dt = delta_t
stime = T_sim + (10 + k) * b2.defaultclock.dt # add a few extra samples (solves rounding issues)
rate_monitor, spike_monitor, voltage_monitor, monitored_spike_idx = \
simulate_brunel_network(
N_Excit=CE, poisson_input_rate=poisson_rate, g=g, sim_time=stime)
plot_tools.plot_network_activity(rate_monitor, spike_monitor, voltage_monitor,
spike_train_idx_list=monitored_spike_idx, t_min=0*b2.ms)
plot_tools.plot_network_activity(rate_monitor, spike_monitor, voltage_monitor,
spike_train_idx_list=monitored_spike_idx, t_min=T_sim - 80*b2.ms)
spike_stats = spike_tools.get_spike_train_stats(spike_monitor, window_t_min=150.*b2.ms)
plot_tools.plot_ISI_distribution(spike_stats, hist_nr_bins=77, xlim_max_ISI=100*b2.ms)
# # Power Spectrum
pop_freqs, pop_ps, average_population_rate = \
spike_tools.get_population_activity_power_spectrum(
rate_monitor, delta_f, k, T_init, subtract_mean_activity=True)
plot_tools.plot_population_activity_power_spectrum(pop_freqs, pop_ps, 1000*b2.Hz, average_population_rate)
plt.show()
freq, mean_ps, all_ps, mean_firing_rate, all_mean_firing_freqs = \
spike_tools.get_averaged_single_neuron_power_spectrum(
spike_monitor, sampling_frequency=1./delta_t, window_t_min=100.*b2.ms,
window_t_max=T_sim, subtract_mean=False, nr_neurons_average=200)
print("plot_spike_train_power_spectrum")
plot_tools.plot_spike_train_power_spectrum(freq, mean_ps, all_ps, 1000 * b2.Hz,
mean_firing_freqs_per_neuron=all_mean_firing_freqs,
nr_highlighted_neurons=2)
plt.show()
print("done")
if __name__ == "__main__":
getting_started()
|
EPFL-LCN/neuronaldynamics-exercises
|
neurodynex3/brunel_model/LIF_spiking_network.py
|
Python
|
gpl-2.0
| 10,560
|
[
"NEURON"
] |
1fdd8ac7348f7d64ac73a67606aea1d148c9a9ae58dc3b201880f76cfab9920f
|
# $HeadURL$
"""
DIRAC - Distributed Infrastructure with Remote Agent Control
The LHCb distributed data production and analysis system.
DIRAC is a software framework for distributed computing which
allows to integrate various computing resources in a single
system. At the same time it integrates all kinds of computing
activities like Monte Carlo simulations, data processing, or
final user analysis.
It is build as number of cooperating systems:
- Accounting
- Configuration
- Core
- Base
- DISET
- Security
- Utilities
- Workflow
- Framework
- RequestManagement
- Resources
- Transformation
Which are used by other system providing functionality to
the end user:
- DataManagement
- Interfaces
- ResourceStatus
- StorageManagement
- WorkloadManagement
It defines the following data members:
- majorVersion: DIRAC Major version number
- minorVersion: DIRAC Minor version number
- patchLevel: DIRAC Patch level number
- preVersion: DIRAC Pre release number
- version: DIRAC version string
- buildVersion: DIRAC version string
- errorMail: mail address for important errors
- alarmMail: mail address for important alarms
- pythonPath: absolute real path to the directory that contains this file
- rootPath: absolute real path to the parent of DIRAC.pythonPath
- platform: DIRAC platform string for current host
- platformTuple: DIRAC platform tuple for current host
It loads:
- S_OK: OK return structure
- S_ERROR: ERROR return structure
- gLogger: global Logger object
- gConfig: global Config object
- gMonitor: global Monitor object
It defines the following functions:
- abort: aborts execution
- exit: finish execution using callbacks
- siteName: returns DIRAC name for current site
"""
__RCSID__ = "$Id$"
from pkgutil import extend_path
__path__ = extend_path( __path__, __name__ )
import platform as pyPlatform
import sys, os
# Define Version
majorVersion = 6
minorVersion = 11
patchLevel = 3
preVersion = 0
version = "v%sr%s" % ( majorVersion, minorVersion )
buildVersion = "v%dr%d" % ( majorVersion, minorVersion )
if patchLevel:
version = "%sp%s" % ( version, patchLevel )
buildVersion = "%s build %s" % ( buildVersion, patchLevel )
if preVersion:
version = "%s-pre%s" % ( version, preVersion )
buildVersion = "%s pre %s" % ( buildVersion, preVersion )
# Check of python version
__pythonMajorVersion = ( "2", )
__pythonMinorVersion = ( "4", "5", "6", "7" )
pythonVersion = pyPlatform.python_version_tuple()
if str( pythonVersion[0] ) not in __pythonMajorVersion or str( pythonVersion[1] ) not in __pythonMinorVersion:
print "Python Version %s not supported by DIRAC" % pyPlatform.python_version()
print "Supported versions are: "
for major in __pythonMajorVersion:
for minor in __pythonMinorVersion:
print "%s.%s.x" % ( major, minor )
sys.exit( 1 )
errorMail = "dirac.alarms@gmail.com"
alarmMail = "dirac.alarms@gmail.com"
# Set rootPath of DIRAC installation
pythonPath = os.path.realpath( __path__[0] )
rootPath = os.path.dirname( pythonPath )
# Import DIRAC.Core.Utils modules
from DIRAC.Core.Utilities import ExitCallback
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
#Logger
from DIRAC.FrameworkSystem.Client.Logger import gLogger
#Configuration client
from DIRAC.ConfigurationSystem.Client.Config import gConfig
#Monitoring client
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
__siteName = False
def siteName():
"""
Determine and return DIRAC name for current site
"""
global __siteName
if not __siteName:
#FIXME: does this ever happen that we have to use the defaultValue if getValue ???
from DIRAC.Core.Utilities import Network
# Some Defaults if not present in the configuration
fqdn = Network.getFQDN()
if len( fqdn.split( '.' ) ) > 2 :
# Use the last component of the FQDN as country code if there are more than 2 components
_siteName = 'DIRAC.Client.%s' % fqdn.split( '.' )[-1]
else:
# else use local as country code
_siteName = 'DIRAC.Client.local'
__siteName = gConfig.getValue( '/LocalSite/Site', _siteName )
return __siteName
#Callbacks
ExitCallback.registerSignals()
#Set the platform
from DIRAC.Core.Utilities.Platform import getPlatformString
platform = getPlatformString()
platformTuple = tuple( platform.split( '_' ) )
def exit( exitCode = 0 ):
"""
Finish execution using callbacks
"""
ExitCallback.execute( exitCode, [] )
sys.exit( exitCode )
def abort( exitCode, *args, **kwargs ):
"""
Abort execution
"""
try:
gLogger.fatal( *args, **kwargs )
os._exit( exitCode )
except:
gLogger.exception( 'Error while executing DIRAC.abort' )
os._exit( exitCode )
|
rajanandakumar/DIRAC
|
__init__.py
|
Python
|
gpl-3.0
| 4,985
|
[
"DIRAC"
] |
2e37dabd0dfe8f24cfbe8a1550ac5d8306fa7c632f448d4c7f13c0de81c51107
|
from __future__ import (absolute_import, division, print_function)
import stresstesting
from mantid.simpleapi import Abins, mtd, DeleteWorkspace
from AbinsModules import AbinsConstants, AbinsTestHelpers
class HelperTestingClass(object):
def __init__(self):
self._temperature = 10 # K
self._sample_form = "Powder"
self._instrument_name = "TOSCA"
self._atoms = ""
self._sum_contributions = True
self._cross_section_factor = "Incoherent"
self._extension = {"CASTEP": ".phonon", "CRYSTAL": ".out", "DMOL3": ".outmol", "GAUSSIAN": ".log"}
self._output_name = "output_workspace"
self._ref = "reference_workspace"
self._scale = 1.0
self._dft_program = None
self._quantum_order_event = None
self._system_name = None
def set_instrument_name(self, instrument_name=None):
if instrument_name in AbinsConstants.ALL_INSTRUMENTS:
self._instrument_name = instrument_name
else:
raise ValueError("Wrong instrument.")
def set_scale(self, scale=None):
if isinstance(scale, float) and scale > 0.0:
self._scale = scale
else:
raise ValueError("Wrong scale.")
def set_dft_program(self, dft_program=None):
if dft_program in AbinsConstants.ALL_SUPPORTED_DFT_PROGRAMS:
self._dft_program = dft_program
else:
raise RuntimeError("Unsupported DFT program: %s " % dft_program)
def set_order(self, order=None):
orders = [AbinsConstants.QUANTUM_ORDER_ONE, AbinsConstants.QUANTUM_ORDER_TWO,
AbinsConstants.QUANTUM_ORDER_THREE, AbinsConstants.QUANTUM_ORDER_FOUR]
if order in orders:
self._quantum_order_event = order
else:
raise RuntimeError(
"Unsupported number of quantum order event %s" % order)
def set_name(self, name):
if isinstance(name, str):
self._system_name = name
self._output_name = name
else:
raise RuntimeError(
"Invalid name. Name should be a string but it is %s " % type(name))
def set_cross_section(self, cross_section=None):
self._cross_section_factor = cross_section
def case_from_scratch(self):
"""
User performs calculation from scratch (not loaded from hdf file). All data is calculated.
"""
Abins(DFTprogram=self._dft_program, PhononFile=self._system_name + self._extension[self._dft_program],
Temperature=self._temperature, SampleForm=self._sample_form, Instrument=self._instrument_name,
Atoms=self._atoms, SumContributions=self._sum_contributions,
QuantumOrderEventsNumber=str(self._quantum_order_event), Scale=self._scale,
ScaleByCrossSection=self._cross_section_factor, OutputWorkspace=self._output_name)
def case_restart_diff_t(self):
"""
The considered testing scenario looks as follows. First the user performs the simulation for T=10K (first run).
Then the user changes T to 20K (second run). For T=20K S has to be recalculated. After that the user performs
simulation with the same parameters as for the initial simulation, e.g., T=10K (third run). In the third run all
required data will be read from hdf file. It is checked if workspace for the initial run and the third run is
the same (should be the same).
"""
temperature_for_test = self._temperature + 10 # 20K
wrk_name = self._system_name
# T = 10 K
Abins(DFTprogram=self._dft_program, PhononFile=self._system_name + self._extension[self._dft_program],
Temperature=self._temperature, SampleForm=self._sample_form, Instrument=self._instrument_name,
Atoms=self._atoms, SumContributions=self._sum_contributions, Scale=self._scale,
QuantumOrderEventsNumber=str(self._quantum_order_event), ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace=wrk_name + "init")
# T = 20 K
Abins(DFTprogram=self._dft_program, PhononFile=self._system_name + self._extension[self._dft_program],
Temperature=temperature_for_test, SampleForm=self._sample_form, Instrument=self._instrument_name,
Atoms=self._atoms, SumContributions=self._sum_contributions, Scale=self._scale,
QuantumOrderEventsNumber=str(self._quantum_order_event), ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace=wrk_name + "_mod")
# T = 10 K
Abins(DFTprogram=self._dft_program, PhononFile=self._system_name + self._extension[self._dft_program],
Temperature=self._temperature, SampleForm=self._sample_form, Instrument=self._instrument_name,
Atoms=self._atoms, SumContributions=self._sum_contributions, Scale=self._scale,
QuantumOrderEventsNumber=str(self._quantum_order_event),
ScaleByCrossSection=self._cross_section_factor, OutputWorkspace=self._output_name)
def case_restart_diff_order(self, order=None):
"""
The considered testing scenario looks as follows. First calculations are performed for
self._quantum_order_event. Then calculations are performed for order (different quantum order event). In case
order > self._quantum_order_event then S should be calculated. Otherwise, it will be loaded from an hdf file.
:param order: number of quantum order event for which restart should be done.
"""
self.case_from_scratch()
DeleteWorkspace(self._output_name)
Abins(DFTprogram=self._dft_program, PhononFile=self._system_name + self._extension[self._dft_program],
Temperature=self._temperature, SampleForm=self._sample_form, Instrument=self._instrument_name,
Atoms=self._atoms, SumContributions=self._sum_contributions, Scale=self._scale,
QuantumOrderEventsNumber=str(order), ScaleByCrossSection=self._cross_section_factor,
OutputWorkspace=self._output_name)
def __del__(self):
"""
Destructor removes output files after tests and workspaces.
:return:
"""
try:
AbinsTestHelpers.remove_output_files(list_of_names=[self._system_name])
except TypeError:
# nothing to remove but it is OK
pass
mtd.clear()
# ----------------------------------------------------------------------------------------------------------------
# Tests for 1D S
# ----------------------------------------------------------------------------------------------------------------
class AbinsCRYSTALTestScratch(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark it is tested if calculation from scratch with input data from CRYSTAL and for 1-4 quantum
order events is correct.
"""
tolerance = None
ref_result = None
def runTest(self):
HelperTestingClass.__init__(self)
name = "TolueneScratchAbins"
self.ref_result = name + ".nxs"
self.set_dft_program("CRYSTAL")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_FOUR)
self.case_from_scratch()
def excludeInPullRequests(self):
return True
def validate(self):
self.tolerance = 1e-2
return self._output_name, self.ref_result
# ----------------------------------------------------------------------------------------------------------------
class AbinsCRYSTALTestBiggerSystem(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark it is tested if calculation from scratch with input data from CRYSTAL and for only 1 quantum
order event is correct.
"""
tolerance = None
ref_result = None
def runTest(self):
HelperTestingClass.__init__(self)
name = "Crystalb3lypScratchAbins"
self.ref_result = name + ".nxs"
self.set_dft_program("CRYSTAL")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_ONE)
self.case_from_scratch()
def validate(self):
self.tolerance = 1e-1
return self._output_name, self.ref_result
# ----------------------------------------------------------------------------------------------------------------
class AbinsCRYSTALTestT(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark scenario of restart is considered in which data for other temperature already exists in an hdf
file. In this benchmark input data from CRYSTAL DFT program is used.
"""
tolerance = None
ref_result = None
def runTest(self):
HelperTestingClass.__init__(self)
name = "TolueneTAbins"
self.ref_result = name + ".nxs"
self.set_dft_program("CRYSTAL")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_TWO)
self.case_restart_diff_t()
def excludeInPullRequests(self):
return True
def validate(self):
self.tolerance = 1e-1
return self._output_name, self.ref_result
# ----------------------------------------------------------------------------------------------------------------
class AbinsCRYSTALTestLargerOrder(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark it is tested if calculation from restart with input data from CRYSTAL is correct. Requested order
of quantum event is larger than the one which is saved to an hdf file so S has to be calculated.
"""
tolerance = None
ref_result = None
def runTest(self):
HelperTestingClass.__init__(self)
name = "TolueneLargerOrderAbins"
self.ref_result = name + ".nxs"
self.set_dft_program("CRYSTAL")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_TWO)
self.case_restart_diff_order(AbinsConstants.QUANTUM_ORDER_THREE)
def excludeInPullRequests(self):
return True
def validate(self):
self.tolerance = 1e-1
return self._output_name, self.ref_result
# ----------------------------------------------------------------------------------------------------------------
class AbinsCRYSTALTestSmallerOrder(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark it is tested if calculation from restart with input data from CRYSTAL is correct. Requested
order of quantum event is smaller than the one which is saved to an hdf file so S is loaded from an hdf file.
"""
tolerance = None
ref_result = None
def runTest(self):
HelperTestingClass.__init__(self)
name = "TolueneSmallerOrderAbins"
self.ref_result = name + ".nxs"
self.set_dft_program("CRYSTAL")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_TWO)
self.case_restart_diff_order(AbinsConstants.QUANTUM_ORDER_ONE)
def validate(self):
self.tolerance = 1e-1
return self._output_name, self.ref_result
class AbinsCRYSTALTestScale(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark it is tested if scaling is correct.
"""
_wrk_1 = None
_ref_result = None
tolerance = None
def runTest(self):
HelperTestingClass.__init__(self)
scaling_factor = 2.0
name = "TolueneScale"
self.ref_result = name + ".nxs"
self.set_dft_program("CRYSTAL")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_TWO)
self.set_scale(scale=scaling_factor)
self.case_from_scratch()
def validate(self):
self.tolerance = 1e-1
return self._output_name, self.ref_result
# noinspection PyAttributeOutsideInit,PyPep8Naming
class AbinsCASTEPNoH(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark it is tested if calculation for systems without H is correct.
"""
tolerance = None
ref_result = None
def runTest(self):
HelperTestingClass.__init__(self)
name = "Na2SiF6_CASTEP"
self.ref_result = name + ".nxs"
self.set_dft_program("CASTEP")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_FOUR)
self.set_cross_section(cross_section="Total")
self.case_from_scratch()
self._wrk_1 = self._output_name
def validate(self):
self.tolerance = 1e-1
return self._output_name, self.ref_result
# noinspection PyAttributeOutsideInit,PyPep8Naming
class AbinsCASTEP1DDispersion(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark it is tested if calculation of S from phonon dispersion is correct (1D case).
"""
tolerance = None
ref_result = None
def runTest(self):
HelperTestingClass.__init__(self)
name = "Mapi"
self.ref_result = name + ".nxs"
self.set_dft_program("CASTEP")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_ONE)
self.case_from_scratch()
self._wrk_1 = self._output_name
def validate(self):
self.tolerance = 1e-1
return self._output_name, self.ref_result
class AbinsDMOL3TestScratch(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark it is tested if calculation from scratch with input data from DMOL3 and for 1-4 quantum
order events is correct.
"""
tolerance = None
ref_result = None
def runTest(self):
HelperTestingClass.__init__(self)
name = "Na2SiF6_DMOL3"
self.ref_result = name + ".nxs"
self.set_dft_program("DMOL3")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_FOUR)
self.set_cross_section(cross_section="Total")
self.case_from_scratch()
def excludeInPullRequests(self):
return True
def validate(self):
self.tolerance = 1e-2
return self._output_name, self.ref_result
class AbinsGAUSSIANestScratch(stresstesting.MantidStressTest, HelperTestingClass):
"""
In this benchmark it is tested if calculation from scratch with input data from GAUSSIAN and for 1-4 quantum
order events is correct.
"""
tolerance = None
ref_result = None
def runTest(self):
HelperTestingClass.__init__(self)
name = "C6H5Cl-Gaussian"
self.ref_result = name + ".nxs"
self.set_dft_program("GAUSSIAN")
self.set_name(name)
self.set_order(AbinsConstants.QUANTUM_ORDER_FOUR)
self.set_cross_section(cross_section="Incoherent")
self.case_from_scratch()
def excludeInPullRequests(self):
return True
def validate(self):
self.tolerance = 1e-2
return self._output_name, self.ref_result
|
dymkowsk/mantid
|
Testing/SystemTests/tests/analysis/AbinsTest.py
|
Python
|
gpl-3.0
| 14,934
|
[
"CASTEP",
"CRYSTAL",
"DMol3",
"Gaussian"
] |
ae730d36d6b83861d98fc41dfe211093c1cd50040bd9d3e8fd4b5d3e784f2dda
|
#!/usr/bin/env python3
"""Cheap and simple API helper
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
# While this is a good example script to teach about introspection,
# in real life it has been superceded by PyDoc, which is part of the
# standard library in Python 2.1 and later.
#
# Your IDE may already import the "help" function from pydoc
# automatically on startup; if not, do this:
#
# >>> from pydoc import help
#
# The help function in this module takes the object itself to get
# help on, but PyDoc can also take a string, like this:
#
# >>> help("string") # gets help on the string module
# >>> help("apihelper.help") # gets help on the function below
# >>> help() # enters an interactive help mode
#
# PyDoc can also act as an HTTP server to dynamically produce
# HTML-formatted documentation of any module in your path.
# That's wicked cool. Read more about PyDoc here:
# http://www.onlamp.com/pub/a/python/2001/04/18/pydoc.html
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if callable(getattr(object, e))]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
if __name__ == "__main__":
print(help.__doc__)
|
jtraver/dev
|
python3/sys/apihelper.py
|
Python
|
mit
| 1,802
|
[
"VisIt"
] |
8b8c638db17ba696e8572f2cbdf60e988919c4806f20ecd86464931ef2160735
|
import sys
import os
import platform
import stat
import time
from ddsc.exceptions import DDSUserException
TERMINAL_ENCODING_NOT_UTF_ERROR = """
ERROR: DukeDSClient requires UTF terminal encoding.
Follow this guide for adjusting your terminal encoding:
https://github.com/Duke-GCB/DukeDSClient/blob/master/docs/UnicodeTerminalSetup.md
"""
CONFIG_FILE_PERMISSIONS_ERROR = """
ERROR: Your config file ~/.ddsclient permissions can allow other users to see your secret key.
Please remove group and other permissions for your DukeDSClient configuration file.
To do so run the following command:
chmod 600 ~/.ddsclient
"""
REMOTE_PATH_SEP = '/'
class KindType(object):
"""
The types of items that are part of a project. Strings are from the duke-data-service.
"""
file_str = 'dds-file'
folder_str = 'dds-folder'
project_str = 'dds-project'
@staticmethod
def is_file(item):
return item.kind == KindType.file_str
@staticmethod
def is_folder(item):
return item.kind == KindType.folder_str
@staticmethod
def is_project(item):
return item.kind == KindType.project_str
class NoOpProgressPrinter(object):
def transferring_item(self, item, increment_amt=1, override_msg_verb=None):
pass
def increment_progress(self, increment_amt=1):
pass
def finished(self):
pass
class ProgressPrinter(object):
"""
Prints a progress bar(percentage) to the terminal, expects to have sending_item and finished called.
Replaces the same line again and again as progress changes.
"""
def __init__(self, total, msg_verb):
"""
Setup printer expecting to have sending_item called total times.
:param total: int the number of items we are expecting, used to determine progress
"""
self.total = total
self.cnt = 0
self.max_width = 0
self.waiting = False
self.msg_verb = msg_verb
self.progress_bar = ProgressBar()
self.transferred_bytes = 0
self.total_bytes = 0
def transferring_item(self, item, increment_amt=1, override_msg_verb=None, transferred_bytes=0):
"""
Update progress that item is about to be transferred.
:param item: LocalFile, LocalFolder, or LocalContent(project) that is about to be sent.
:param increment_amt: int amount to increase our count(how much progress have we made)
:param override_msg_verb: str: overrides msg_verb specified in constructor
"""
self.increment_progress(increment_amt)
percent_done = int(float(self.cnt) / float(self.total) * 100.0)
if KindType.is_project(item):
details = 'project'
else:
details = os.path.basename(item.path)
msg_verb = self.msg_verb
if override_msg_verb:
msg_verb = override_msg_verb
self.transferred_bytes += transferred_bytes
self.progress_bar.update(percent_done, self.transferred_bytes, '{} {}'.format(msg_verb, details))
self.progress_bar.show()
def increment_progress(self, amt=1):
self.cnt += amt
def finished(self):
"""
Must be called to print final progress label.
"""
self.progress_bar.set_state(ProgressBar.STATE_DONE)
self.progress_bar.show()
def show_warning(self, message):
"""
Shows warnings to the user.
:param message: str: Message to display
"""
print(message)
def start_waiting(self):
"""
Show waiting progress bar until done_waiting is called.
Only has an effect if we are in waiting state.
"""
if not self.waiting:
self.waiting = True
wait_msg = "Waiting for project to become ready for {}".format(self.msg_verb)
self.progress_bar.show_waiting(wait_msg)
def done_waiting(self):
"""
Show running progress bar (only has an effect if we are in waiting state).
"""
if self.waiting:
self.waiting = False
self.progress_bar.show_running()
class ProgressBar(object):
STATE_RUNNING = 'running'
STATE_WAITING = 'waiting'
STATE_DONE = 'done'
def __init__(self):
self.max_width = 0
self.percent_done = 0
self.current_item_details = ''
self.line = ''
self.state = self.STATE_RUNNING
self.wait_msg = 'Waiting'
self.transferred_bytes = 0
self.start_time = time.time()
def update(self, percent_done, transferred_bytes, details):
self.percent_done = percent_done
self.current_item_details = details
self.transferred_bytes = transferred_bytes
def set_state(self, state):
self.state = state
def _get_line(self):
speed = transfer_speed_str(current_time=time.time(), start_time=self.start_time,
transferred_bytes=self.transferred_bytes)
if self.state == self.STATE_DONE:
return 'Done: 100%{}'.format(speed)
details = self.current_item_details
if self.state == self.STATE_WAITING:
details = self.wait_msg
return 'Progress: {}%{} - {}'.format(self.percent_done, speed, details)
def show(self):
line = self._get_line()
sys.stdout.write(self.format_line(line))
sys.stdout.flush()
self.max_width = max(len(line), self.max_width)
def format_line(self, line):
justified_line = line.ljust(self.max_width)
formatted_line = '\r{}'.format(justified_line)
if self.state == self.STATE_DONE:
formatted_line += '\n'
return formatted_line
def show_running(self):
"""
Show running progress bar
"""
self.set_state(ProgressBar.STATE_RUNNING)
self.show()
def show_waiting(self, wait_msg):
"""
Show waiting progress bar until done_waiting is called.
Only has an effect if we are in waiting state.
:param wait_msg: str: message describing what we are waiting for
"""
self.wait_msg = wait_msg
self.set_state(ProgressBar.STATE_WAITING)
self.show()
class ProjectWalker(object):
"""
Generic tool for visiting all the nodes in a project.
For use with RemoteProject and LocalProject
"""
@staticmethod
def walk_project(project, visitor):
"""
Visit all nodes in the project tree(project, folders, files).
:param project: LocalProject project we want to visit all children of.
:param visitor: object must implement visit_project, visit_folder, visit_file
"""
ProjectWalker._visit_content(project, None, visitor)
@staticmethod
def _visit_content(item, parent, visitor):
"""
Recursively visit nodes in the project tree.
:param item: LocalContent/LocalFolder/LocalFile we are traversing down from
:param parent: LocalContent/LocalFolder parent or None
:param visitor: object visiting the tree
"""
if KindType.is_project(item):
visitor.visit_project(item)
elif KindType.is_folder(item):
visitor.visit_folder(item, parent)
else:
visitor.visit_file(item, parent)
if not KindType.is_file(item):
for child in item.children:
ProjectWalker._visit_content(child, item, visitor)
class FilteredProject(object):
"""
Adds ability to filter items when a visitor is visiting a project.
"""
def __init__(self, filter_func, visitor):
"""
Setup to let visitor walk a project filtering out items based on a function.
:param filter_func: function(item): returns True to let visitor see the item
:param visitor: object: object with visit_project,visit_folder,visit_file methods
"""
self.filter_func = filter_func
self.visitor = visitor
def walk_project(self, project):
"""
Go through all nodes(RemoteProject,RemoteFolder,RemoteFile) in project and send them to visitor if filter allows.
:param project: RemoteProject: project we will walk
"""
ProjectWalker.walk_project(project, self)
def visit_project(self, item):
if self.filter_func(item):
self.visitor.visit_project(item)
def visit_folder(self, item, parent):
if self.filter_func(item):
self.visitor.visit_folder(item, parent)
def visit_file(self, item, parent):
if self.filter_func(item):
self.visitor.visit_file(item, parent)
class ProjectDetailsList(object):
"""
Walks a project and saves the project name and filenames to [str] filenames property.
"""
def __init__(self, long_format):
self.long_format = long_format
self.details = []
self.id_to_path = {}
def walk_project(self, project):
"""
Walks a project and saves the project name and filenames to [str] filenames property.
:param project: LocalProject project we will read details from.
"""
# This method will call visit_project, visit_folder, and visit_file below as it walks the project tree.
ProjectWalker.walk_project(project, self)
def visit_project(self, item):
if self.long_format:
self.details.append("{} - Project {} Contents:".format(item.id, item.name))
else:
self.details.append("Project {} Contents:".format(item.name))
def visit_folder(self, item, parent):
name = self.get_name(item, parent)
self.id_to_path[item.id] = name
if self.long_format:
self.details.append('{}\t{}'.format(item.id, name))
else:
self.details.append(name)
def visit_file(self, item, parent):
name = self.get_name(item, parent)
if self.long_format:
self.details.append('{}\t{}\t({}:{})'.format(item.id, name, item.hash_alg, item.file_hash))
else:
self.details.append(name)
def get_name(self, item, parent):
if parent:
if parent.kind == KindType.project_str:
return '{}{}'.format(REMOTE_PATH_SEP, item.name)
parent_name = self.id_to_path.get(parent.id)
if parent_name:
return "{}{}{}".format(parent_name, REMOTE_PATH_SEP, item.name)
return item.name
class ProgressQueue(object):
"""
Sends tuples over queue for amount processed or an error with a message.
"""
ERROR = 'error'
PROCESSED = 'processed'
START_WAITING = 'start_waiting'
DONE_WAITING = 'done_waiting'
def __init__(self, queue):
self.queue = queue
def error(self, error_msg):
self.queue.put((ProgressQueue.ERROR, error_msg))
def processed(self, amt):
self.queue.put((ProgressQueue.PROCESSED, amt))
def start_waiting(self):
self.queue.put((ProgressQueue.START_WAITING, None))
def done_waiting(self):
self.queue.put((ProgressQueue.DONE_WAITING, None))
def get(self):
"""
Get the next tuple added to the queue.
:return: (str, value): where str is either ERROR or PROCESSED and value is the message or processed int amount.
"""
return self.queue.get()
def wait_for_processes(processes, size, progress_queue, watcher, item):
"""
Watch progress queue for errors or progress.
Cleanup processes on error or success.
:param processes: [Process]: processes we are waiting to finish downloading a file
:param size: int: how many values we expect to be processed by processes
:param progress_queue: ProgressQueue: queue which will receive tuples of progress or error
:param watcher: ProgressPrinter: we notify of our progress:
:param item: object: RemoteFile/LocalFile we are transferring.
"""
while size > 0:
progress_type, value = progress_queue.get()
if progress_type == ProgressQueue.PROCESSED:
chunk_size, transferred_bytes = value
watcher.transferring_item(item, increment_amt=chunk_size, transferred_bytes=transferred_bytes)
size -= chunk_size
elif progress_type == ProgressQueue.START_WAITING:
watcher.start_waiting()
elif progress_type == ProgressQueue.DONE_WAITING:
watcher.done_waiting()
else:
error_message = value
for process in processes:
process.terminate()
raise DDSUserException(error_message)
for process in processes:
process.join()
def verify_terminal_encoding(encoding):
"""
Raises ValueError with error message when terminal encoding is not Unicode(contains UTF ignoring case).
:param encoding: str: encoding we want to check
"""
if encoding and not ("UTF" in encoding.upper()):
raise DDSUserException(TERMINAL_ENCODING_NOT_UTF_ERROR)
def verify_file_private(filename):
"""
Raises ValueError the file permissions allow group/other
On windows this never raises due to the implementation of stat.
"""
if platform.system().upper() != 'WINDOWS':
filename = os.path.expanduser(filename)
if os.path.exists(filename):
file_stat = os.stat(filename)
if mode_allows_group_or_other(file_stat.st_mode):
raise DDSUserException(CONFIG_FILE_PERMISSIONS_ERROR)
def mode_allows_group_or_other(st_mode):
"""
Returns True if st_mode bitset has group or other permissions
:param st_mode: int: bit set from a file
:return: bool: true when group or other has some permissions
"""
return (st_mode & stat.S_IRWXO or st_mode & stat.S_IRWXG) != 0
class RemotePath(object):
@staticmethod
def add_leading_slash(path):
return '{}{}'.format(REMOTE_PATH_SEP, path)
@staticmethod
def strip_leading_slash(path):
return path.lstrip(REMOTE_PATH_SEP)
@staticmethod
def split(remote_path):
remote_path_no_leading_slash = RemotePath.strip_leading_slash(remote_path)
return remote_path_no_leading_slash.split(REMOTE_PATH_SEP)
def humanize_bytes(num_bytes):
"""
Convert a number of bytes to human version
:param num_bytes: int: bytes to be converted to
:return: str
"""
val = num_bytes
suffix = "B"
factor = 1000
if val >= factor:
val = val / factor
suffix = "KB"
if val >= factor:
val = val / factor
suffix = "MB"
if val >= factor:
val = val / factor
suffix = "GB"
val = "{:0.1f} {}".format(val, suffix)
return val.replace(".0", "")
def plural_fmt(name, cnt):
"""
pluralize name if necessary and combine with cnt
:param name: str name of the item type
:param cnt: int number items of this type
:return: str name and cnt joined
"""
if cnt == 1:
return '{} {}'.format(cnt, name)
else:
return '{} {}s'.format(cnt, name)
def join_with_commas_and_and(items):
if not items:
return ""
head_items = items[:-1]
last_item = items[-1]
head_items_str = ', '.join(head_items)
if head_items_str:
return ' and '.join([head_items_str, last_item])
else:
return last_item
def transfer_speed_str(current_time, start_time, transferred_bytes):
"""
Return transfer speed str based
:param current_time: float: current time
:param start_time: float: starting time
:param transferred_bytes: int: bytes transferred
:return: str: end user str
"""
elapsed_seconds = current_time - start_time
if elapsed_seconds > 0 and transferred_bytes > 0:
bytes_per_second = float(transferred_bytes) / (elapsed_seconds + 0.5)
return '@ {}/s'.format(humanize_bytes(bytes_per_second))
return ''
def boolean_input_prompt(message):
if sys.version_info >= (3, 0, 0):
result = input(message)
else:
result = input(message)
result = result.upper()
return result == "Y" or result == "YES" or result == "T" or result == "TRUE"
|
Duke-GCB/DukeDSClient
|
ddsc/core/util.py
|
Python
|
mit
| 16,145
|
[
"VisIt"
] |
045bfd7cc3f5d1b777ca0ad8860cc935804986deef7738567ac20cd0d06a2e6e
|
# -*- coding: utf-8 -*-
"""
.. note::
Functions for calculating surfaces and analyzing them.
.. moduleauthor:: Adam Gagorik <adam.gagorik@gmail.com>
"""
import langmuir as lm
import numpy as np
try:
import scipy.special as special
import scipy.fftpack as fftpack
import scipy.ndimage as ndimage
import scipy.signal as signal
import scipy.stats as stats
import scipy.misc as misc
except ImportError:
special = None
fftpack = None
ndimage = None
signal = None
stats = None
misc = None
def make_3D(array):
"""
Force the numpy array passed to be 3D via :py:func:`np.expand_dims`.
:param array: numpy array
:type array: :py:class:`numpy.ndarray`
"""
ndims = len(array.shape)
if ndims >= 3:
return array
if ndims == 2:
return np.expand_dims(array, 2)
if ndims == 1:
return np.expand_dims(np.expand_dims(array, 1), 1)
raise RuntimeError, 'can not expand dimensions'
def load_ascii(handle, square=False, cube=False, shape=None, **kwargs):
"""
Wrapper around np.loadtxt. Forces data to be at least 3 dimensions.
:param square: reshape as if data is NxN
:param cube: reshape as if data is NxNxN
:parm shape: reshape data
:param square: bool
:param cube: bool
:param shape: list
"""
# load the data
image = np.loadtxt(handle, **kwargs)
if square:
try:
size = int(np.sqrt(image.size))
image = np.reshape(image, (size, size))
except ValueError:
raise RuntimeError, 'can not reshape data'
if cube:
try:
size = int(image.size ** (1.0 / 3.0))
image = np.reshape(image, (size, size, size))
except ValueError:
raise RuntimeError, 'can not reshape data'
if not shape is None:
image = np.reshape(image, shape)
# force data to be 3D
return make_3D(image)
def load_chk(handle):
"""
Load checkpoint file and convert traps into a surface.
"""
chk = lm.checkpoint.load(handle)
chk.fix_traps()
grid = lm.grid.Grid.from_checkpoint(chk)
if chk.potentials:
xyzv = lm.grid.XYZV(grid, chk.traps, chk.potentials)
else:
if 'trap.potential' in chk.parameters:
xyzv = lm.grid.XYZV(grid, chk.traps, chk['trap.potential'])
else:
xyzv = lm.grid.XYZV(grid, chk.traps, chk['trap.potential'])
return make_3D(xyzv.mv)
def load(handle, rot90=-1, **kwargs):
"""
Load surface from file. Takes into account the file extension.
===== ===============================
*ext* *func*
===== ===============================
pkl :py:meth:`common.load_pkl`
npy :py:func:`numpy.load`
chk :py:meth:`surface.load_chk`
inp :py:meth:`surface.load_chk`
csv :py:meth:`surface.load_ascii`
txt :py:meth:`surface.load_ascii`
dat :py:meth:`surface.load_ascii`
png :py:meth:`scipy.ndimage.imread`
jpg :py:meth:`scipy.ndimage.imread`
jpeg :py:meth:`scipy.ndimage.imread`
===== ===============================
:param handle: filename
:param rot90: number of times to rotate image by 90 degrees
:type handle: str
:type rot90: int
:return: image
:rtype: :py:class:`numpy.ndarray`
.. warning::
image file (png, jpg, etc) data is forced into range [0,255].
.. warning::
data is always made 3D via :py:func:`numpy.expand_dims`
.. warning::
image data is rotated by -90 degrees.
"""
if handle.endswith('.gz'):
stub, ext = lm.common.splitext(handle.rstrip('.gz'))
else:
stub, ext = lm.common.splitext(handle)
if ext == '.pkl':
return make_3D(lm.common.load_pkl(handle, **kwargs))
if ext == '.npy':
return make_3D(np.load(handle))
if ext in ['.chk', '.inp']:
return lm.surface.load_chk(handle)
if ext in ['.csv', '.txt', '.dat']:
return lm.surface.load_ascii(handle, **kwargs)
if ext in ['.png', '.jpg', '.jpeg']:
_kwargs = dict(flatten=True)
_kwargs.update(**kwargs)
image = ndimage.imread(handle, **_kwargs)
image = np.rot90(image, rot90)
return make_3D(image)
raise RuntimeError, 'ext=%s not supported' % ext
def save_vti(handle, array, **kwargs):
"""
Save numpy array to vtkImageData XML file. You can open it in paraview.
:param handle: filename
:param array: data
:type handle: str
:type array: :py:class:`numpy.ndarray`
"""
vtkImageData = lm.vtkutils.create_image_data_from_array(array, **kwargs)
lm.vtkutils.save_image_data(handle, vtkImageData)
def save_chk(handle, array):
"""
Save numpy array to Langmuir checkpoint file.
:param handle: filename
:param array: data
:type handle: str
:type array: :py:class:`numpy.ndarray`
"""
grid = lm.grid.Grid(*array.shape)
chk = lm.checkpoint.CheckPoint.from_grid(grid)
traps = lm.grid.IndexMapper.map_mesh(grid, array)
chk.traps = traps
chk.save(handle)
def save(handle, obj, zlevel=0, **kwargs):
"""
Save object to a file. Takes into account the file extension.
===== ===================================
*ext* *func*
===== ===================================
pkl :py:meth:`common.load_pkl`
npy :py:func:`numpy.load`
vti :py:meth:`surface.save_cti`
csv :py:meth:`np.savetxt`
txt :py:meth:`np.savetxt`
dat :py:meth:`np.savetxt`
png :py:meth:`scipy.misc.imsave`
jpg :py:meth:`scipy.misc.imsave`
jpeg :py:meth:`scipy.misc.imsave`
===== ===================================
:param handle: filename
:param obj: object to save
:param zlevel: slice z-index
:type handle: str
:type array: :py:class:`numpy.ndarray`
:type zlevel: int
.. warning::
image file (png, jpg, etc) data is forced into range [0,255].
.. warning::
if ndims is 3 and an image file is being saved, only a slice is saved.
"""
stub, ext = lm.common.splitext(handle)
if ext == '.pkl':
lm.common.save_pkl(obj, handle)
return handle
if ext == '.npy':
np.save(handle, obj)
return handle
if ext == '.vti':
lm.surface.save_vti(handle, obj, **kwargs)
return handle
if ext in ['.chk', '.inp']:
lm.surface.save_chk(handle, obj)
return handle
if ext in ['.csv', '.txt', '.dat']:
_kwargs = dict(fmt='%+.18e', sep=',')
_kwargs.update(**kwargs)
try:
np.savetxt(handle, obj, **_kwargs)
except TypeError:
handle = lm.common.zhandle(handle, 'wb')
print >> handle, '# ' + ' '.join([str(s) for s in obj.shape])
obj = np.reshape(obj, obj.size)
np.savetxt(handle, obj, **_kwargs)
return handle
if ext in ['.png', '.jpg', '.jpeg']:
if len(obj.shape) > 2:
obj = obj[:,:,zlevel]
misc.imsave(handle, obj)
return handle
raise RuntimeError, 'ext=%s not supported' % ext
def threshold(a, v=0, v0=0, v1=1, copy=False):
"""
Set values in array above {v} to {v1}, and below {v} to {v0}.
:param v: threshold value
:param v0: lower value
:param v1: upper value
:param copy: copy array
:type v: float
:type v0: float
:type v1: float
:type copy: bool
"""
if copy:
t = np.copy(a)
else:
t = a
t[t <= v] = 0
t[t > 0] = 1
if not v0 == 0 or not v1 == 1:
s0 = [t == 0]
s1 = [t == 1]
if not v0 == 0:
t[s0] = v0
if not v1 == 1:
t[s1] = v1
return t
def linear_mapping(array, n=0.0, m=1.0):
"""
Map values in array to fall in the range [n,m].
:param array: array like object
:param n: lower bound
:param m: upper bound
:type n: float
:type m: float
"""
array = np.asanyarray(array)
a = np.amin(array)
b = np.amax(array)
return (array - a) / (b - a) * (m - n) + n
def rfunc(size=None):
"""
Produces numbers in the range [-0.5, 0.5].
:param size: shape of output
:type: int
"""
return np.random.random(size) - 0.5
class WaveDimensions:
"""Compute wavelength, wavenumber, etc from an interval length (L)
and number of waves (n).
:param L: interval length
:param n: number of waves in interval
:type L: float
:type n: int
>>> wx = WaveDimensions(10, 2)
>>> print wx
[Wave Dimensions]
L = 10
n = 2
lambda = 5.00000e+00
nubar = 2.00000e-01
k = 1.25664e+00
"""
def __init__(self, L=2 * np.pi, n=1):
self.L = L
self.n = n
self.k = 0.0
self.nubar = 0.0
self.wavelength = 0.0
self.calc()
def calc(self, L=None, n=None):
"""Perform calculations to compute wavelength, wavenumber, etc.
Called automatically in constructor.
:param L: interval length
:param n: number of waves in interval
:type L: float
:type n: int
"""
if not L is None:
self.L = L
if not n is None:
self.n = n
self.wavelength = float(self.L) / self.n
self.nubar = 1 / self.wavelength
self.k = (2 * np.pi) / self.wavelength
def __str__(self):
s = '[Wave Dimensions]\n'
s += ' L = %d\n' % self.L
s += ' n = %d\n' % self.n
s += ' lambda = %.5e\n' % self.wavelength
s += ' nubar = %.5e\n' % self.nubar
s += ' k = %.5e\n' % self.k
return s
f_sin_x = lambda x, y, z, kx, ky, kz : np.sin(kx * x)
f_sin_y = lambda x, y, z, kx, ky, kz : np.sin(ky * y)
f_sin_z = lambda x, y, z, kx, ky, kz : np.sin(kz * z)
f_cos_x = lambda x, y, z, kx, ky, kz : np.cos(kx * x)
f_cos_y = lambda x, y, z, kx, ky, kz : np.cos(ky * y)
f_cos_z = lambda x, y, z, kx, ky, kz : np.cos(kz * z)
sin_x = lambda x, y, z, wx, wy, wz : np.sin(wx.k * x)
sin_y = lambda x, y, z, wx, wy, wz : np.sin(wy.k * y)
sin_z = lambda x, y, z, wx, wy, wz : np.sin(wz.k * z)
cos_x = lambda x, y, z, wx, wy, wz : np.cos(wx.k * x)
cos_y = lambda x, y, z, wx, wy, wz : np.cos(wy.k * y)
cos_z = lambda x, y, z, wx, wy, wz : np.cos(wz.k * z)
def f_gyroid(x, y, z, kx, ky, kz):
"""
Surface function f(x,y,z) for gyroid.
:param x: x-value(s)
:param y: y-value(s)
:param z: z-value(s)
:param kx: 2 pi nx / Lx
:param ky: 2 pi ny / Ly
:param kz: 2 pi nz / Lz
:type x: float
:type y: float
:type z: float
:type kx: float
:type ky: float
:type kz: float
>>> w = WaveDimensions(10, 2)
>>> x, y, z = np.mgrid[0:10:100j,0:10:100j,0:10:100j]
>>> gyroid(x, y, z, w.k, w.k, w.k)
"""
value = np.cos(kx * x) * np.sin(ky * y) + \
np.cos(ky * y) * np.sin(kz * z) + \
np.cos(kz * z) * np.sin(kx * x)
return value
def gyroid(x, y, z, wx, wy, wz):
"""
Wrapper around f_* that uses WaveDimensions.
"""
return f_gyroid(x, y, z, wx.k, wy.k, wz.k)
def f_scherk_first_surface(x, y, z, kx, ky, kz):
"""
Surface function f(x,y,z) for scherk.
:param x: x-value(s)
:param y: y-value(s)
:param z: z-value(s)
:param kx: 2 pi nx / Lx
:param ky: 2 pi ny / Ly
:param kz: 2 pi nz / Lz
:type x: float
:type y: float
:type z: float
:type kx: float
:type ky: float
:type kz: float
>>> w = WaveDimensions(10, 2)
>>> x, y, z = np.mgrid[0:10:100j,0:10:100j,0:10:100j]
>>> scherk_first_surface(x, y, z, w.k, w.k, w.k)
"""
value = np.exp(kx / (2.0 * np.pi) * x) * np.cos(ky * y) - np.cos(kz * z)
return value
def scherk_first_surface(x, y, z, wx, wy, wz):
"""
Wrapper around f_* that uses WaveDimensions.
"""
return f_scherk_first_surface(x, y, z, wx.k, wy.k, wz.k)
def f_schwarz_p_surface(x, y, z, kx, ky, kz):
"""
Surface function f(x,y,z) for psurface.
:param x: x-value(s)
:param y: y-value(s)
:param z: z-value(s)
:param kx: 2 pi nx / Lx
:param ky: 2 pi ny / Ly
:param kz: 2 pi nz / Lz
:type x: float
:type y: float
:type z: float
:type kx: float
:type ky: float
:type kz: float
>>> w = WaveDimensions(10, 2)
>>> x, y, z = np.mgrid[0:10:100j,0:10:100j,0:10:100j]
>>> schwarz_p_surface(x, y, z, w.k, w.k, w.k)
"""
value = np.cos(kx * x) + np.cos(ky * y) + np.cos(kz * z)
return value
def schwarz_p_surface(x, y, z, wx, wy, wz):
"""
Wrapper around f_* that uses WaveDimensions.
"""
return f_schwarz_p_surface(x, y, z, wx.k, wy.k, wz.k)
def f_schwarz_d_surface(x, y, z, kx, ky, kz):
"""
Surface function f(x,y,z) for dsurface.
:param x: x-value(s)
:param y: y-value(s)
:param z: z-value(s)
:param kx: 2 pi nx / Lx
:param ky: 2 pi ny / Ly
:param kz: 2 pi nz / Lz
:type x: float
:type y: float
:type z: float
:type kx: float
:type ky: float
:type kz: float
>>> w = WaveDimensions(10, 2)
>>> x, y, z = np.mgrid[0:10:100j,0:10:100j,0:10:100j]
>>> schwarz_d_surface(x, y, z, w.k, w.k, w.k)
"""
value = np.sin(kx * x) * np.sin(ky * y) * np.sin(kz * z) + \
np.sin(kx * x) * np.cos(ky * y) * np.cos(kz * z) + \
np.cos(kx * x) * np.sin(ky * y) * np.cos(kz * z) + \
np.cos(kx * x) * np.cos(ky * y) * np.sin(kz * z)
return value
def schwarz_d_surface(x, y, z, wx, wy, wz):
"""
Wrapper around f_* that uses WaveDimensions.
"""
return f_schwarz_d_surface(x, y, z, wx.k, wy.k, wz.k)
def f_bandXY(x, y, z, kx, ky, kz):
"""
Surface function f(x,y,z) for bands that run along z-direction.
:param x: x-value(s)
:param y: y-value(s)
:param z: z-value(s)
:param kx: 2 pi nx / Lx
:param ky: 2 pi ny / Ly
:param kz: 2 pi nz / Lz
:type x: float
:type y: float
:type z: float
:type kx: float
:type ky: float
:type kz: float
>>> w = WaveDimensions(10, 2)
>>> x, y, z = np.mgrid[0:10:100j,0:10:100j,0:10:100j]
>>> bandXY(x, y, z, w.k, w.k, w.k)
"""
return np.sin(kx * x) * np.sin(ky * y)
def bandXY(x, y, z, wx, wy, wz):
"""
Wrapper around f_* that uses WaveDimensions.
"""
return f_bandXY(x, y, z, wx.k, wy.k, wz.k)
def f_bandXZ(x, y, z, kx, ky, kz):
"""
Surface function f(x,y,z) for bands that run along y-direction.
:param x: x-value(s)
:param y: y-value(s)
:param z: z-value(s)
:param kx: 2 pi nx / Lx
:param ky: 2 pi ny / Ly
:param kz: 2 pi nz / Lz
:type x: float
:type y: float
:type z: float
:type kx: float
:type ky: float
:type kz: float
>>> w = WaveDimensions(10, 2)
>>> x, y, z = np.mgrid[0:10:100j,0:10:100j,0:10:100j]
>>> bandXZ(x, y, z, w.k, w.k, w.k)
"""
return np.sin(kx * x) * np.sin(kz * z)
def bandXZ(x, y, z, wx, wy, wz):
"""
Wrapper around f_* that uses WaveDimensions.
"""
return f_bandXZ(x, y, z, wx.k, wy.k, wz.k)
def f_bandYZ(x, y, z, kx, ky, kz):
"""
Surface function f(x,y,z) for bands that run along x-direction.
:param x: x-value(s)
:param y: y-value(s)
:param z: z-value(s)
:param kx: 2 pi nx / Lx
:param ky: 2 pi ny / Ly
:param kz: 2 pi nz / Lz
:type x: float
:type y: float
:type z: float
:type kx: float
:type ky: float
:type kz: float
>>> w = WaveDimensions(10, 2)
>>> x, y, z = np.mgrid[0:10:100j,0:10:100j,0:10:100j]
>>> bandYZ(x, y, z, w.k, w.k, w.k)
"""
return np.sin(ky * y) * np.sin(kz * z)
def bandYZ(x, y, z, wx, wy, wz):
"""
Wrapper around f_* that uses WaveDimensions.
"""
return f_bandYZ(x, y, z, wx.k, wy.k, wz.k)
class Kernel(object):
"""
An x, y, z, v mgrid.
:param xmin: lower x
:param xmax: upper x
:param ymin: lower y
:param ymax: upper y
:param zmin: lower z
:param zmax: upper z
:param spacing: grid spacing
:type xmin: float
:type xmax: float
:type ymin: float
:type ymax: float
:type zmin: float
:type zmax: float
:type spacing: float
"""
def __init__(self, xmin, xmax, ymin, ymax, zmin, zmax, spacing=1.0):
self.x, self.y, self.z = np.mgrid[xmin:xmax+1e-10:spacing,
ymin:ymax+1e-10:spacing,
zmin:zmax+1e-10:spacing]
self.v = np.zeros(self.x.shape)
self.spacing = spacing
class FuncKernel(Kernel):
"""
An x, y, z, v mgrid. Computes v using the function = f(x, y, z) passed.
See Kernel for more parameters.
"""
def __init__(self, func, *args, **kwargs):
Kernel.__init__(self, *args, **kwargs)
self.v = func(self.x, self.y, self.z)
class SimpleKernel(FuncKernel):
"""
An x, y, z, v mgrid. Computes v using the function = f(x, y, z) passed.
Creates x, y, z domain using spacing and number of points.
:param func: function of x, y, z
:param nx: x-direction has 2*nx + 1 points
:param ny: y-direction has 2*ny + 1 points
:param nz: z-direction has 2*nz + 1 points
:param spacing: grid spacing
:type func: func
:type nx: int
:type ny: int
:type nz: int
:type spacing: double
"""
def __init__(self, func, nx=3, ny=3, nz=3, spacing=1.0):
xmin, xmax = -spacing * nx, spacing * nx
ymin, ymax = -spacing * ny, spacing * ny
zmin, zmax = -spacing * nz, spacing * nz
args = [func, xmin, xmax, ymin, ymax, zmin, zmax, spacing]
FuncKernel.__init__(self, *args)
class RandomKernel(SimpleKernel):
"""
An x, y, z, v mgrid. Computes v using random noise. Creates x, y, z domain
using spacing and number of points. See SimpleKernel for more parameters.
"""
def __init__(self, *args, **kwargs):
func = lambda x, y, z : np.random.random(x.shape)
SimpleKernel.__init__(self, func, *args, **kwargs)
self.v = self.v / np.sum(self.v)
class GaussianKernel(Kernel):
"""
An x, y, z, v mgrid. The size of the grid is determined using the stdev
of the Gaussian PDF.
:param sx: sigma x
:param sy: sigma y
:param sz: sigma z
:param mx: mean x
:param my: mean y
:param mz: mean z
:param spacing: grid spacing
:type sx: float
:type sy: float
:type sz: float
:type mx: float
:type my: float
:type mz: float
:type spacing: float
"""
def __init__(self, sx, sy, sz, mx=0.0, my=0.0, mz=0.0, spacing=1.0):
self.sx, self.sy, self.sz = sx, sy, sz
xmin, xmax = -3.0*self.sx, 3.0*self.sx
ymin, ymax = -3.0*self.sy, 3.0*self.sy
zmin, zmax = -3.0*self.sz, 3.0*self.sz
Kernel.__init__(self, xmin, xmax, ymin, ymax, zmin, zmax, spacing)
dst_x = stats.norm(mx, sx)
dst_y = stats.norm(my, sy)
dst_z = stats.norm(mz, sz)
pdf_x = dst_x.pdf
pdf_y = dst_y.pdf
pdf_z = dst_z.pdf
self.v = pdf_x(self.x) * pdf_y(self.y) * pdf_z(self.z)
self.v = self.v / np.sum(self.v)
def __str__(self):
s = '[Kernel]\n'
s += ' %-7s = %s\n' % ('shape', self.v.shape)
s += ' %-7s = %s\n' % ('spacing', self.spacing)
return s
class Isotropic(object):
"""
Performs convolution of random noise with a kernel to make morphology.
:param grid: grid
:param kernel: Kernel instance
:param rfunc: function that produces random numbers in range [-0.5,0.5]
:type grid: :py:class:`surface.Grid`
:type kernel: Kernel
:type rfunc: func
"""
def __init__(self, grid, kernel, rfunc=rfunc, v=0.0, mode='same',
verbose=False):
if verbose: print 'isotropic: be patient, convolution is slow'
self.kernel = kernel
self.grid = grid
if verbose: print 'isotropic: creating noise'
self.noise = rfunc(self.grid.shape)
#kernel and grid must have the same spacing
self.factor = self.grid.dx/self.kernel.spacing
assert self.factor >= 1.0
if verbose: print 'isotropic: factor = %.3e' % self.factor
if self.factor > 1.0:
# calculate on grid with same spacing as kernel
self.z_grid = lm.grid.Grid(grid.lx, grid.ly, grid.lz)
self.z_grid.refine(self.factor)
# zoom in random noise
if verbose: print 'isotropic: interpolating noise'
self.z_noise = ndimage.zoom(self.noise , zoom=self.factor)
# convolve kernel and noise
if verbose: print 'isotropic: convoluting'
self.z_image = signal.convolve(self.z_noise, self.kernel.v,
mode=mode)
# zoom out on image
if verbose: print 'isotropic: interpolating image'
self.image = ndimage.zoom(self.z_image , zoom=1.0/self.factor)
else:
# no zooming needed
self.z_grid = self.grid
self.z_noise = self.noise
if verbose: print 'isotropic: convoluting'
self.z_image = signal.convolve(self.z_noise, self.kernel.v,
mode=mode)
self.image = self.z_image
if verbose: print ''
#print 'grid == z_grid ', self.grid is self.z_grid
#print 'noise == z_noise ', self.noise is self.z_noise
#print 'image == z_image ', self.image is self.z_image
#print 'thresh == z_thresh', self.thresh is self.z_thresh
def __str__(self):
s = '[Isotropic]\n'
s += ' %-6s = %s, %s\n' % ('grid', self.grid.shape,
self.z_grid.shape )
s += ' %-6s = %s, %s\n' % ('noise', self.noise.shape,
self.z_noise.shape )
s += ' %-6s = %s, %s\n' % ('image', self.image.shape,
self.z_image.shape )
return s
def f_isotropic(x, y, z, sx, sy, sz, full=False):
"""
Surface function f(x,y,z) for isotropic morphology according to Jake.
:param x: x-value(s)
:param y: y-value(s)
:param z: z-value(s)
:param sx: sigma x
:param sy: sigma y
:param sz: sigma z
:type x: float
:type y: float
:type z: float
:type sx: float
:type sy: float
:type sz: float
>>> x, y, z = np.mgrid[0:10:100j,0:10:100j,0:10:100j]
>>> isotropic(x, y, z, 1, 1, 1)
"""
xsize = int(x.shape[0])
ysize = int(x.shape[1])
zsize = int(x.shape[2])
k = GaussianKernel(sx, sy, sz, spacing=1.0)
g = lm.grid.Grid(xsize, ysize, zsize)
i = Isotropic(g, k)
if full:
return i
return i.image
def isotropic(x, y, z, wx, wy, wz, full=True):
return f_isotropic(x, y, z, wx.wavelength, wx.wavelength, wx.wavelength,
full)
class FFT(object):
def __init__(self):
self.window = None
@staticmethod
def _delta(a, s0, s1):
a = np.asanyarray(a)
try:
delta = abs(a[s1] - a[s0])
except IndexError:
delta = 1.0
return delta
class FFT1D(FFT):
def __init__(self, x, s, detrend=True, window=False, lmap=True, **kwargs):
FFT.__init__(self)
# r-space
self.x = np.asanyarray(x)
self.s = np.asanyarray(s)
assert len(self.x.shape) == 1
assert self.x.shape == self.s.shape
assert self.x.size == self.s.size
# r-space spacing
self.dx = self._delta(self.x, np.index_exp[0], np.index_exp[1])
# r-space samples
self.n0 = self.x.shape[0]
# r-space lengths
self.lx = self.n0 * self.dx
# k-space
self.u = fftpack.fftshift(fftpack.fftfreq(self.n0))
# k-space spacing
self.du = self._delta(self.x, np.index_exp[0], np.index_exp[1])
# k-space lengths
self.lu = self.n0 * self.du
# nyquist
try:
self.nyquist_u = 0.5/self.dx
except ZeroDivisionError:
self.nyquist_u = 0.0
self.k = self.u
if detrend:
self.s = signal.detrend(self.s)
if window:
self._window()
self.s = self.s * self.window
self.fft = fftpack.fftshift(fftpack.fft(self.s))
self.power = self.fft.real**2 + self.fft.imag**2
if lmap:
self.power = lm.surface.linear_mapping(self.power)
def _window(self):
self.window = signal.hamming(self.u.size)
def __str__(self):
s = '[FFT1D]\n'
s += ' %-6s = %s\n' % ('shape', self.n0)
return s
class FFT2D(FFT):
def __init__(self, x, y, s, detrend=True, window=False, lmap=True, **kwargs):
FFT.__init__(self)
# r-space
self.x = np.asanyarray(x)
self.y = np.asanyarray(y)
self.s = np.asanyarray(s)
assert len(self.x.shape) == 2
assert self.x.shape == self.y.shape == self.s.shape
assert self.x.size == self.y.size == self.s.size
# r-space spacing
self.dx = self._delta(self.x, np.index_exp[0,0], np.index_exp[1,0])
self.dy = self._delta(self.y, np.index_exp[0,0], np.index_exp[0,1])
# r-space samples
self.n0 = self.x.shape[0]
self.n1 = self.x.shape[1]
# r-space lengths
self.lx = self.n0 * self.dx
self.ly = self.n1 * self.dy
# k-space
u = fftpack.fftshift(fftpack.fftfreq(self.n0))
v = fftpack.fftshift(fftpack.fftfreq(self.n1))
self.u, self.v = np.meshgrid(u, v, indexing='ij')
# k-space spacing
self.du = self._delta(self.u, np.index_exp[0,0], np.index_exp[1,0])
self.dv = self._delta(self.v, np.index_exp[0,0], np.index_exp[0,1])
# k-space lengths
self.lu = self.n0 * self.du
self.lv = self.n1 * self.dv
# nyquist
try:
self.nyquist_u = 0.5/self.dx
except ZeroDivisionError:
self.nyquist_u = 0.0
try:
self.nyquist_v = 0.5/self.dy
except ZeroDivisionError:
self.nyquist_v = 0.0
self.k = np.sqrt(self.u**2 + self.v**2)
if detrend:
self.s = signal.detrend(self.s)
if window:
self._window()
self.s = self.s * self.window
self.fft = fftpack.fftshift(fftpack.fft2(self.s))
self.power = self.fft.real**2 + self.fft.imag**2
if lmap:
self.power = lm.surface.linear_mapping(self.power)
def _window(self):
win_u = signal.hamming(self.u.size)
win_v = signal.hamming(self.v.size)
self.window = np.outer(win_u, win_v)
self.window = np.reshape(self.window, self.s.shape)
def __str__(self):
s = '[FFT2D]\n'
s += ' %-6s = %s, %s\n' % ('shape', self.n0, self.n1)
return s
class FFT3D(FFT):
def __init__(self, x, y, z, s, detrend=True, window=False, lmap=True, **kwargs):
FFT.__init__(self)
# r-space
self.x = np.asanyarray(x)
self.y = np.asanyarray(y)
self.z = np.asanyarray(z)
self.s = np.asanyarray(s)
assert len(self.x.shape) == 3
assert self.x.shape == self.y.shape == self.z.shape == self.s.shape
assert self.x.size == self.y.size == self.z.size == self.s.size
# r-space spacing
self.dx = self._delta(self.x, np.index_exp[0,0,0], np.index_exp[1,0,0])
self.dy = self._delta(self.y, np.index_exp[0,0,0], np.index_exp[0,1,0])
self.dz = self._delta(self.z, np.index_exp[0,0,0], np.index_exp[0,0,1])
# r-space samples
self.n0 = self.x.shape[0]
self.n1 = self.x.shape[1]
self.n2 = self.x.shape[2]
# r-space lengths
self.lx = self.n0 * self.dx
self.ly = self.n1 * self.dy
self.lz = self.n2 * self.dz
# k-space
u = fftpack.fftshift(fftpack.fftfreq(self.n0))
v = fftpack.fftshift(fftpack.fftfreq(self.n1))
w = fftpack.fftshift(fftpack.fftfreq(self.n2))
self.u, self.v, self.w = np.meshgrid(u, v, w, indexing='ij')
# k-space samples
self.du = self._delta(self.u, np.index_exp[0,0,0], np.index_exp[1,0,0])
self.dv = self._delta(self.v, np.index_exp[0,0,0], np.index_exp[0,1,0])
self.dw = self._delta(self.w, np.index_exp[0,0,0], np.index_exp[0,0,1])
# k-space lengths
self.lu = self.n0 * self.du
self.lv = self.n1 * self.dv
self.lw = self.n2 * self.dw
# nyquist
try:
self.nyquist_u = 0.5/self.dx
except ZeroDivisionError:
self.nyquist_u = 0.0
try:
self.nyquist_v = 0.5/self.dy
except ZeroDivisionError:
self.nyquist_v = 0.0
try:
self.nyquist_w = 0.5/self.dz
except ZeroDivisionError:
self.nyquist_w = 0.0
self.k = np.sqrt(self.u**2 + self.v**2 + self.w**2)
if detrend:
self.s = self.s - np.average(self.s)
if window:
self._window()
self.s = self.s * self.window
self.fft = fftpack.fftshift(fftpack.fftn(self.s))
self.power = self.fft.real**2 + self.fft.imag**2
if lmap:
self.power = lm.surface.linear_mapping(self.power)
def _window(self):
win_u = signal.hamming(self.u.size)
win_v = signal.hamming(self.v.size)
win_w = signal.hamming(self.w.size)
self.window = np.outer(np.outer(win_u, win_v), win_w)
self.window = np.reshape(self.window, self.s.shape)
def __str__(self):
s = '[FFT3D]\n'
s += ' %-6s = %s, %s, %s\n' % ('shape', self.n0, self.n1, self.n2)
return s
|
LangmuirSim/langmuir
|
LangmuirPython/langmuir/surface.py
|
Python
|
gpl-2.0
| 29,896
|
[
"Gaussian",
"ParaView"
] |
914b0a957753cf3fb93331fe304930b977939db2d84c2bf286b8a596064e0b76
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='homepage.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='about_nectr.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('nectr.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
url(r'^tutor/', include('nectr.tutor.urls', namespace='tutors')),
# Dashboard Management
url(r'^dashboard/', include('nectr.dashboard.urls', namespace='dashboard')),
# Your stuff: custom urls includes go here
url(r'^search/', include('nectr.search.urls')),
# Search the Hive
# url(r'^search_the_hive', TemplateView.as_view(template_name='look_nectr.html')),
# Join the Hive
url(r'^join_the_hive', TemplateView.as_view(template_name='joinpage_nectr.html'), name='join'),
# How it Works
url(r'^how_it_works', TemplateView.as_view(template_name='how_nectr.html'), name='how_it_works'),
url(r'^test_profile', TemplateView.as_view(template_name='profile/base_profile.html'), name='base_profile'),
# url(r'^test_joinpage_nectr', TemplateView.as_view(template_name='joinpage_nectr.html'), name='test5'),
# Messaging Include
url(r'^messages/', include('postman.urls', namespace='postman', app_name='postman')),
url(r'^chat/', include('nectr.chat.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
nectR-Tutoring/nectr
|
config/urls.py
|
Python
|
mit
| 2,898
|
[
"VisIt"
] |
c1fde64d0961a81256b8c594cffe86a1b08773a1f298567ad44835f149f4fb03
|
import numpy as np
np.seterr(divide='ignore') #ignore errors in log division
np.seterr(all='ignore') #ignore errors in log division
import sys
import time
##########################################################################################
def BGMCMC(LogPosterior,gp,post_args,ch_len,ep,gibbs_index,chain_filenames='MCMC_chain',n_chains=0,\
adapt_limits=(0,0,0),glob_limits=(0,0,0),thin=1,orth=0,acc=0.234):
"""
Generalisation of the MCMC.py code to allow for blocked Gibbs sampling. See MCMC
docstring for details. Here I've added the addition of a Gibbs index, which is just an
array of the same size as the inputs, which includes the order in which to index. 0
indicates the parmeters is not to vary.
eg gibbs_index = [0,1,0,1,2,2,3] will vary parameters 1, 2 then 3 in turn, evaluating
the log posterior at each and accepting them according to the MH rule.
Note the logPosterior will be evaluated ch_len * no_gibbs_steps times, and is especially
useful when using the InferGP class posterior, which only constructs/inverts the covariance
when necessary, and stores previous results
acc - target acceptance ratio - for infinite iid Gaussian dist -> 23.4%, for single par
is 44%. These will be varied independently for each block, and can have a different target
acceptance if an array is provided
When orth=0 (ie default) be aware that parameters in different blocks may still be
correlated. This is taken into account in the separate scaling to some degree, but
highly correlated variables should probably be in the same block, or alternatively set
orth = 1 for orthogonal steps (an intermediate solution is possible, but I couldn't be
bothered coding it right now)
"""
#first set chain filenames
if n_chains > 0: chain_filenames = ["MCMC_chain_%d" % ch_no for ch_no in range(1,n_chains+1)]
#print parameters
PrintParams(chain_filenames,ch_len,LogPosterior,adapt_limits,glob_limits,gp,ep,gibbs_index)
print ('-' * 80)
#prep gibbs array
gibbs = np.array(gibbs_index)
no_steps = gibbs.max()
gi = range(no_steps)
for q in range(1,no_steps+1):
gi[q-1] = np.where(gibbs==q) #get index to the gibbs steps
####### loop over chains ###############
for n,chain in enumerate(chain_filenames):
#initialise parameters
p,e = np.copy(gp),np.copy(ep)
p_acc,L_acc = np.copy(p),-np.inf
#arrays for storing results
ParArr = np.zeros((ch_len/thin)*len(p)).reshape(ch_len/thin,len(p))
PostArr = np.zeros(ch_len/thin)
AccArr = np.zeros(ch_len*no_steps).reshape(ch_len,no_steps) #acceptance rate for each Gibbs block
#jump parameters
#error array computed in advance - much faster to compute as a block
G = np.zeros(no_steps) #set default G depending on no of varying parameters per block
for q in range(1,no_steps+1): G[q-1] = (2.4**2/(gibbs==q).sum())
Garr = np.array([G[v-1] if v>0 else 0 for v in gibbs])
ACC = np.ones(no_steps) * acc
K = np.diag(e**2) #create starting (diagonal) covariance matrix
#RA = np.random.normal(0.,1.,len(p)*ch_len).reshape(ch_len,len(p)) * e * G
np.random.seed()
RandArr = np.random.np.random.multivariate_normal(np.zeros(p.size),K,ch_len) * Garr
#set columns to zero after too! - for large K sometimes zero variance parameters have small random scatter
RandArr[:][:,np.where(e==0.)[0]] = 0.
#print "Computing Chain %d: '%s' " % (n+1,chain),
start = time.time()
####### individual chain ###############
for i in xrange(ch_len):
if i % ((ch_len)/20) == 0:
PrintBar(n,chain,i,ch_len,AccArr,start,no_steps)
#sys.stdout.write('#'); sys.stdout.flush();
#Blocked Gibbs algorithm
#cycle over Gibbs steps
for q in range(no_steps):
#gi = np.where(gibbs==q) #get index to the gibbs steps
#print "step = ",q,
p_prop = np.copy(p_acc)
p_prop[gi[q]] += RandArr[i][gi[q]]
#print p_prop
L_prop = LogPosterior(p_prop,*post_args)
#Metropolis algorithm to accept step
if np.random.rand() < np.exp(L_prop - L_acc):
p_acc,L_acc = p_prop,L_prop
AccArr[i][q] = 1 #update acceptance array (store no. acceptances for gibbs)
# print "acc"
#add new posterior and parameters to chain
if i%thin==0: ParArr[i/thin],PostArr[i/thin] = p_acc,L_acc
#adaptive stepsizes
if (i <= adapt_limits[1]) and (i > adapt_limits[0]):
if (i-adapt_limits[0]) % ((adapt_limits[1]-adapt_limits[0])/adapt_limits[2]) == 0:
#RA = np.random.normal(0.,1.,len(p)*ch_len).reshape(ch_len,len(p)) * e * G
if orth: K = np.diag(((e + 4*ParArr[adapt_limits[0]/thin:i/thin].std(axis=0))/5.)**2.) #for diagonal covariance matrix
else: K = (K + 4.*np.cov(ParArr[adapt_limits[0]/thin:i/thin],rowvar=0))/5.
K[np.where(e==0.)],K[:,np.where(e==0.)] = 0.,0. #reset error=0. values to 0.
RandArr[i:] = np.random.np.random.multivariate_normal(np.zeros(p.size),K,ch_len-i) * Garr
RandArr[i:][:,np.where(e==0.)[0]] = 0. #set columns to zero after too!
#adaptive global step size
if (i <= glob_limits[1]) and (i > glob_limits[0]):
if (i-glob_limits[0]) % ((glob_limits[1]-glob_limits[0])/glob_limits[2]) == 0:
for q in range(no_steps): #update G for each block
G[q] *= (1./ACC[q]) * min(0.9,max(0.1,AccArr[:,q][i-(glob_limits[1]-glob_limits[0])/glob_limits[2]:i].sum()/((glob_limits[1]-glob_limits[0])/glob_limits[2])))
Garr = np.array([G[v-1] if v>0 else 0 for v in gibbs])
RandArr[i:] = np.random.np.random.multivariate_normal(np.zeros(p.size),K,ch_len-i) * Garr
RandArr[i:][:,np.where(e==0.)[0]] = 0.
#print G
####### end individual chain ###########
PrintBar(n,chain,i,ch_len,AccArr,start,no_steps); print
np.save(chain+".npy",np.concatenate([PostArr.reshape(PostArr.size,1),ParArr],axis=1))
####### end loop over chains ############
print ('-' * 80)
##########################################################################################
def PrintBar(n,chain,i,ch_len,AccArr,start,no_steps):
ts = time.time()-start
if i <= ch_len/5:
a_str = ""
a_str2 = ""
else:
a_str = "" if i <= ch_len/5 else ", acc = %.2f%%" % (100.*np.float(AccArr[ch_len/5:i].sum())/no_steps/(i-ch_len/5+1))
a_str2 = "["+"".join(["%.2f%%," % (100.*np.float(AccArr[ch_len/5:i].sum(axis=0)[q])/(i-ch_len/5+1)) for q in range(no_steps)])+"\b]"
print ("\rComputing Chain %d: '%s' %-20s t = %dm %.2fs%s" % (n+1,chain,'#'*(i/(ch_len/20)+1),ts // 60., ts % 60.,a_str),)
print (a_str2,)
sys.stdout.flush();
##########################################################################################
def PrintParams(ch_filenames,ch_len,posterior,adapt_limits,glob_limits,gp,ep,gibbs):
print ("Infer.BGMCMC runnning...")
print ("Blocked Gibbs MCMC parameters:")
print (" No Chains: %d" % len(ch_filenames))
print (" Chain Length: %d" % ch_len)
if(adapt_limits[2]): print (" Relative-step adaption limits: (%d,%d,%d)" % (adapt_limits[0],adapt_limits[1],adapt_limits[2]))
if(glob_limits[2]): print (" Global-step adaption limits: (%d,%d,%d)" % (glob_limits[0],glob_limits[1],glob_limits[2]))
print (" Computing chains:", ch_filenames)
print (" Posterior probability function: ", posterior)
print (" Function params <value prop_size [block]>:")
for q in range(len(gp)):
print (" p[%d] = %f +- %f [%d]" % (q,gp[q],ep[q],gibbs[q]))
##########################################################################################
|
nealegibson/Infer
|
src/MCMC_BGibbs.py
|
Python
|
gpl-3.0
| 7,699
|
[
"Gaussian"
] |
06fe5b2654cc54a6113623ec50c6729205e296945056ba0b5c7f75bcce6dc683
|
import datetime
from django.core.management.base import NoArgsCommand
from django.core.urlresolvers import reverse
from django.db import connection
from django.db.models import Q, F
from askbot.models import User, Post, PostRevision, Thread
from askbot.models import Activity, EmailFeedSetting
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.conf import settings as django_settings
from askbot.conf import settings as askbot_settings
from django.utils.datastructures import SortedDict
from django.contrib.contenttypes.models import ContentType
from askbot import const
from askbot.utils import mail
from askbot.utils.slug import slugify
DEBUG_THIS_COMMAND = False
def get_all_origin_posts(mentions):
origin_posts = set()
for mention in mentions:
post = mention.content_object
origin_posts.add(post.get_origin_post())
return list(origin_posts)
#todo: refactor this as class
def extend_question_list(
src, dst, cutoff_time = None,
limit=False, add_mention=False,
add_comment = False
):
"""src is a query set with questions
or None
dst - is an ordered dictionary
update reporting cutoff time for each question
to the latest value to be more permissive about updates
"""
if src is None:#is not QuerySet
return #will not do anything if subscription of this type is not used
if limit and len(dst.keys()) >= askbot_settings.MAX_ALERTS_PER_EMAIL:
return
if cutoff_time is None:
if hasattr(src, 'cutoff_time'):
cutoff_time = src.cutoff_time
else:
raise ValueError('cutoff_time is a mandatory parameter')
for q in src:
if q in dst:
meta_data = dst[q]
else:
meta_data = {'cutoff_time': cutoff_time}
dst[q] = meta_data
if cutoff_time > meta_data['cutoff_time']:
#the latest cutoff time wins for a given question
#if the question falls into several subscription groups
#this makes mailer more eager in sending email
meta_data['cutoff_time'] = cutoff_time
if add_mention:
if 'mentions' in meta_data:
meta_data['mentions'] += 1
else:
meta_data['mentions'] = 1
if add_comment:
if 'comments' in meta_data:
meta_data['comments'] += 1
else:
meta_data['comments'] = 1
def format_action_count(string, number, output):
if number > 0:
output.append(_(string) % {'num':number})
class Command(NoArgsCommand):
def handle_noargs(self, **options):
if askbot_settings.ENABLE_EMAIL_ALERTS:
try:
try:
self.send_email_alerts()
except Exception, e:
print e
finally:
connection.close()
def get_updated_questions_for_user(self,user):
"""
retreive relevant question updates for the user
according to their subscriptions and recorded question
views
"""
user_feeds = EmailFeedSetting.objects.filter(
subscriber=user
).exclude(
frequency__in=('n', 'i')
)
should_proceed = False
for feed in user_feeds:
if feed.should_send_now() == True:
should_proceed = True
break
#shortcircuit - if there is no ripe feed to work on for this user
if should_proceed == False:
return {}
#these are placeholders for separate query sets per question group
#there are four groups - one for each EmailFeedSetting.feed_type
#and each group has subtypes A and B
#that's because of the strange thing commented below
#see note on Q and F objects marked with todo tag
q_sel_A = None
q_sel_B = None
q_ask_A = None
q_ask_B = None
q_ans_A = None
q_ans_B = None
q_all_A = None
q_all_B = None
#base question query set for this user
#basic things - not deleted, not closed, not too old
#not last edited by the same user
base_qs = Post.objects.get_questions().exclude(
thread__last_activity_by=user
).exclude(
thread__last_activity_at__lt=user.date_joined#exclude old stuff
).exclude(
deleted=True
).exclude(
thread__closed=True
).order_by('-thread__last_activity_at')
if askbot_settings.ENABLE_CONTENT_MODERATION:
base_qs = base_qs.filter(approved = True)
#todo: for some reason filter on did not work as expected ~Q(viewed__who=user) |
# Q(viewed__who=user,viewed__when__lt=F('thread__last_activity_at'))
#returns way more questions than you might think it should
#so because of that I've created separate query sets Q_set2 and Q_set3
#plus two separate queries run faster!
#build two two queries based
#questions that are not seen by the user at all
not_seen_qs = base_qs.filter(~Q(viewed__who=user))
#questions that were seen, but before last modification
seen_before_last_mod_qs = base_qs.filter(
Q(
viewed__who=user,
viewed__when__lt=F('thread__last_activity_at')
)
)
#shorten variables for convenience
Q_set_A = not_seen_qs
Q_set_B = seen_before_last_mod_qs
for feed in user_feeds:
if feed.feed_type == 'm_and_c':
#alerts on mentions and comments are processed separately
#because comments to questions do not trigger change of last_updated
#this may be changed in the future though, see
#http://askbot.org/en/question/96/
continue
#each group of updates represented by the corresponding
#query set has it's own cutoff time
#that cutoff time is computed for each user individually
#and stored as a parameter "cutoff_time"
#we won't send email for a given question if an email has been
#sent after that cutoff_time
if feed.should_send_now():
if DEBUG_THIS_COMMAND == False:
feed.mark_reported_now()
cutoff_time = feed.get_previous_report_cutoff_time()
if feed.feed_type == 'q_sel':
q_sel_A = Q_set_A.filter(thread__followed_by=user)
q_sel_A.cutoff_time = cutoff_time #store cutoff time per query set
q_sel_B = Q_set_B.filter(thread__followed_by=user)
q_sel_B.cutoff_time = cutoff_time #store cutoff time per query set
elif feed.feed_type == 'q_ask':
q_ask_A = Q_set_A.filter(author=user)
q_ask_A.cutoff_time = cutoff_time
q_ask_B = Q_set_B.filter(author=user)
q_ask_B.cutoff_time = cutoff_time
elif feed.feed_type == 'q_ans':
q_ans_A = Q_set_A.filter(thread__posts__author=user, thread__posts__post_type='answer')
q_ans_A = q_ans_A[:askbot_settings.MAX_ALERTS_PER_EMAIL]
q_ans_A.cutoff_time = cutoff_time
q_ans_B = Q_set_B.filter(thread__posts__author=user, thread__posts__post_type='answer')
q_ans_B = q_ans_B[:askbot_settings.MAX_ALERTS_PER_EMAIL]
q_ans_B.cutoff_time = cutoff_time
elif feed.feed_type == 'q_all':
q_all_A = user.get_tag_filtered_questions(Q_set_A)
q_all_B = user.get_tag_filtered_questions(Q_set_B)
q_all_A = q_all_A[:askbot_settings.MAX_ALERTS_PER_EMAIL]
q_all_B = q_all_B[:askbot_settings.MAX_ALERTS_PER_EMAIL]
q_all_A.cutoff_time = cutoff_time
q_all_B.cutoff_time = cutoff_time
#build ordered list questions for the email report
q_list = SortedDict()
#todo: refactor q_list into a separate class?
extend_question_list(q_sel_A, q_list)
extend_question_list(q_sel_B, q_list)
#build list of comment and mention responses here
#it is separate because posts are not marked as changed
#when people add comments
#mention responses could be collected in the loop above, but
#it is inconvenient, because feed_type m_and_c bundles the two
#also we collect metadata for these here
try:
feed = user_feeds.get(feed_type='m_and_c')
if feed.should_send_now():
cutoff_time = feed.get_previous_report_cutoff_time()
comments = Post.objects.get_comments().filter(
added_at__lt = cutoff_time,
).exclude(
author = user
)
q_commented = list()
for c in comments:
post = c.parent
if post.author != user:
continue
#skip is post was seen by the user after
#the comment posting time
q_commented.append(post.get_origin_post())
extend_question_list(
q_commented,
q_list,
cutoff_time = cutoff_time,
add_comment = True
)
mentions = Activity.objects.get_mentions(
mentioned_at__lt = cutoff_time,
mentioned_whom = user
)
#print 'have %d mentions' % len(mentions)
#MM = Activity.objects.filter(activity_type = const.TYPE_ACTIVITY_MENTION)
#print 'have %d total mentions' % len(MM)
#for m in MM:
# print m
mention_posts = get_all_origin_posts(mentions)
q_mentions_id = [q.id for q in mention_posts]
q_mentions_A = Q_set_A.filter(id__in = q_mentions_id)
q_mentions_A.cutoff_time = cutoff_time
extend_question_list(q_mentions_A, q_list, add_mention=True)
q_mentions_B = Q_set_B.filter(id__in = q_mentions_id)
q_mentions_B.cutoff_time = cutoff_time
extend_question_list(q_mentions_B, q_list, add_mention=True)
except EmailFeedSetting.DoesNotExist:
pass
if user.email_tag_filter_strategy == const.INCLUDE_INTERESTING:
extend_question_list(q_all_A, q_list)
extend_question_list(q_all_B, q_list)
extend_question_list(q_ask_A, q_list, limit=True)
extend_question_list(q_ask_B, q_list, limit=True)
extend_question_list(q_ans_A, q_list, limit=True)
extend_question_list(q_ans_B, q_list, limit=True)
if user.email_tag_filter_strategy == const.EXCLUDE_IGNORED:
extend_question_list(q_all_A, q_list, limit=True)
extend_question_list(q_all_B, q_list, limit=True)
ctype = ContentType.objects.get_for_model(Post)
EMAIL_UPDATE_ACTIVITY = const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT
#up to this point we still don't know if emails about
#collected questions were sent recently
#the next loop examines activity record and decides
#for each question, whether it needs to be included or not
#into the report
for q, meta_data in q_list.items():
#this loop edits meta_data for each question
#so that user will receive counts on new edits new answers, etc
#and marks questions that need to be skipped
#because an email about them was sent recently enough
#also it keeps a record of latest email activity per question per user
try:
#todo: is it possible to use content_object here, instead of
#content type and object_id pair?
update_info = Activity.objects.get(
user=user,
content_type=ctype,
object_id=q.id,
activity_type=EMAIL_UPDATE_ACTIVITY
)
emailed_at = update_info.active_at
except Activity.DoesNotExist:
update_info = Activity(
user=user,
content_object=q,
activity_type=EMAIL_UPDATE_ACTIVITY
)
emailed_at = datetime.datetime(1970, 1, 1)#long time ago
except Activity.MultipleObjectsReturned:
raise Exception(
'server error - multiple question email activities '
'found per user-question pair'
)
cutoff_time = meta_data['cutoff_time']#cutoff time for the question
#skip question if we need to wait longer because
#the delay before the next email has not yet elapsed
#or if last email was sent after the most recent modification
if emailed_at > cutoff_time or emailed_at > q.thread.last_activity_at:
meta_data['skip'] = True
continue
#collect info on all sorts of news that happened after
#the most recent emailing to the user about this question
q_rev = PostRevision.objects.question_revisions().filter(
post=q,
revised_at__gt=emailed_at
)
q_rev = q_rev.exclude(author=user)
#now update all sorts of metadata per question
meta_data['q_rev'] = len(q_rev)
if len(q_rev) > 0 and q.added_at == q_rev[0].revised_at:
meta_data['q_rev'] = 0
meta_data['new_q'] = True
else:
meta_data['new_q'] = False
new_ans = Post.objects.get_answers().filter(
thread=q.thread,
added_at__gt=emailed_at,
deleted=False,
)
new_ans = new_ans.exclude(author=user)
meta_data['new_ans'] = len(new_ans)
ans_rev = PostRevision.objects.answer_revisions().filter(
# answer__question = q
post__thread=q.thread,
post__deleted = False,
revised_at__gt = emailed_at
).distinct()
ans_rev = ans_rev.exclude(author=user)
meta_data['ans_rev'] = len(ans_rev)
comments = meta_data.get('comments', 0)
mentions = meta_data.get('mentions', 0)
#print meta_data
#finally skip question if there are no news indeed
if len(q_rev) + len(new_ans) + len(ans_rev) + comments + mentions == 0:
meta_data['skip'] = True
#print 'skipping'
else:
meta_data['skip'] = False
#print 'not skipping'
update_info.active_at = datetime.datetime.now()
if DEBUG_THIS_COMMAND == False:
update_info.save() #save question email update activity
#q_list is actually an ordered dictionary
#print 'user %s gets %d' % (user.username, len(q_list.keys()))
#todo: sort question list by update time
return q_list
def send_email_alerts(self):
#does not change the database, only sends the email
#todo: move this to template
for user in User.objects.all():
user.add_missing_askbot_subscriptions()
#todo: q_list is a dictionary, not a list
q_list = self.get_updated_questions_for_user(user)
if len(q_list.keys()) == 0:
continue
num_q = 0
for question, meta_data in q_list.items():
if meta_data['skip']:
del q_list[question]
else:
num_q += 1
if num_q > 0:
url_prefix = askbot_settings.APP_URL
threads = Thread.objects.filter(id__in=[qq.thread_id for qq in q_list.keys()])
tag_summary = Thread.objects.get_tag_summary_from_threads(threads)
question_count = len(q_list.keys())
subject_line = ungettext(
'%(question_count)d updated question about %(topics)s',
'%(question_count)d updated questions about %(topics)s',
question_count
) % {
'question_count': question_count,
'topics': tag_summary
}
#todo: send this to special log
#print 'have %d updated questions for %s' % (num_q, user.username)
text = ungettext(
'<p>Dear %(name)s,</p><p>The following question has been updated '
'%(sitename)s</p>',
'<p>Dear %(name)s,</p><p>The following %(num)d questions have been '
'updated on %(sitename)s:</p>',
num_q
) % {
'num':num_q,
'name':user.username,
'sitename': askbot_settings.APP_SHORT_NAME
}
text += '<ul>'
items_added = 0
items_unreported = 0
for q, meta_data in q_list.items():
act_list = []
if meta_data['skip']:
continue
if items_added >= askbot_settings.MAX_ALERTS_PER_EMAIL:
items_unreported = num_q - items_added #may be inaccurate actually, but it's ok
else:
items_added += 1
if meta_data['new_q']:
act_list.append(_('new question'))
format_action_count('%(num)d rev', meta_data['q_rev'],act_list)
format_action_count('%(num)d ans', meta_data['new_ans'],act_list)
format_action_count('%(num)d ans rev',meta_data['ans_rev'],act_list)
act_token = ', '.join(act_list)
text += '<li><a href="%s?sort=latest">%s</a> <font color="#777777">(%s)</font></li>' \
% (url_prefix + q.get_absolute_url(), q.thread.title, act_token)
text += '</ul>'
text += '<p></p>'
#if len(q_list.keys()) >= askbot_settings.MAX_ALERTS_PER_EMAIL:
# text += _('There may be more questions updated since '
# 'you have logged in last time as this list is '
# 'abridged for your convinience. Please visit '
# 'the askbot and see what\'s new!<br>'
# )
link = url_prefix + reverse(
'user_subscriptions',
kwargs = {
'id': user.id,
'slug': slugify(user.username)
}
)
text += _(
'<p>Please remember that you can always <a '
'href="%(email_settings_link)s">adjust</a> frequency of the email updates or '
'turn them off entirely.<br/>If you believe that this message was sent in an '
'error, please email about it the forum administrator at %(admin_email)s.</'
'p><p>Sincerely,</p><p>Your friendly %(sitename)s server.</p>'
) % {
'email_settings_link': link,
'admin_email': django_settings.ADMINS[0][1],
'sitename': askbot_settings.APP_SHORT_NAME
}
if DEBUG_THIS_COMMAND == True:
recipient_email = django_settings.ADMINS[0][1]
else:
recipient_email = user.email
mail.send_mail(
subject_line = subject_line,
body_text = text,
recipient_list = [recipient_email]
)
|
tvenkat/askbot-devel
|
askbot/management/commands/send_email_alerts.py
|
Python
|
gpl-3.0
| 21,935
|
[
"VisIt"
] |
4545158c15f26f082960de116a7547deb1f320dce855c416662816d6322b6d42
|
import curses, curses.panel
import sys, os, math, copy
from testData import Test_DATA
class GUI():
DOWN = 1
UP = -1
LEFT = -1
RIGHT = 1
# Planet view options
IDX_PLANET = 0
def __init__(self):
# Variables for game
self.time = 0
# Variables for menu
self.optionRange = 0
self.deltaOptions = 0
#up down variables
self.topLineNum = 0
self.highlightLineNum = 0
self.bottomLineNum = 0
#left right variables
self.focusedPanelNum = 0
self.horozontalLineNum = 0
self.maxPanels = 2
self.planetViewOptions = []
self.panelStack = {}
self.Test_DATA = Test_DATA()
self.loadData()
curses.wrapper(self.mainMenu)
def terminalSize(self):
self.screenSize = curses.getmaxyx()
return True
def loadData(self):
self.user = self.Test_DATA.user
self.planets = self.Test_DATA.generatePlanets()
self.user["planets"] = copy.deepcopy(self.planets)
self.fleets = Test_DATA.fleets
def createWinAndPanel(self, name, l, h, y, x):
win = curses.newwin(l, h, y, x)
win.box()
win.border(0)
panel = curses.panel.new_panel(win)
#does this do a deepcopy? Does the existing panel go into garbage?
self.panelStack[name] = panel
panel.top()
panel.show()
self.screen.refresh
win.refresh()
curses.panel.update_panels()
return win, panel
def cursesSetup(self):
# @ TO DO
# PRIORITY 5
# Check and resize terminal
# True : correct size and continue
# False : close
if self.terminalSize():
# reserved for late
print True
curses.noecho()
curses.cbreak()
curses.curs_set(0)
def displayPlanet(self, panelList):
self.screen.clear()
self.menuBox.addstr(8,8,"did it work")
if self.focusedPanelNum == 0:
idx = self.highlightLineNum + self.topLineNum
self.focusedPlanetNum = idx
else:
idx = self.focusedPlanetNum
self.highlightLineNum = idx - self.topLineNum
#for panel in panelList:
#self.panelStack[panel].move(9,10)
#self.panelStack[panel].bottom()
#self.panelStack[panel].hide()
#self.screen.refresh()
#self.screen.clear()
try:
for panel in planetViewScreens:
self.panelStack[panel].show()
except:
self.planetListBox, self.planetListPanel = self.createWinAndPanel("p_List", self.screenSize[0], 20, 0, 0)
self.planetInfoBox, self.planetInfoPanel = self.createWinAndPanel("p_Info", (self.screenSize[0]/2), ((self.screenSize[1]-20)/2)+1, 0, 19)
self.gameInfoBox, self.gameInfoPanel = self.createWinAndPanel("game_info", (self.screenSize[0]/2),((self.screenSize[1]-20)/2)+1, 0, ((self.screenSize[1]-20)/2)+19 )
self.buildingInfoBox, self.buildingInfoPanel = self.createWinAndPanel("building_info", (self.screenSize[0]/2)+1, ((self.screenSize[1]-20)/2)+1, (self.screenSize[0]/2)-1, 19)
self.fleetInfoBox, self.fleetInfoPanel = self.createWinAndPanel("fleet_info", (self.screenSize[0]/2)+1, ((self.screenSize[1]-20)/2)+1, (self.screenSize[0]/2)-1, ((self.screenSize[1]-20)/2)+19)
self.planetViewScreens = [self.planetListPanel,self.planetInfoPanel, self.gameInfoPanel, self.buildingInfoPanel, self.fleetInfoPanel]
#self.planetListPanel.top()
#self.planetListPanel.show()
#self.planetListPanel.top()
#probably redundant but .refresh() is needed.
for panels in self.planetViewScreens:
if panels.hidden:
panels.show()
self.displayPlanetListPanel(self.planetListBox)
self.displayPlanetInfoPanel(self.planetInfoBox, idx)
panels.window().refresh()
#self.planetViewScreens[self.focusedPanelNum].window().addstr(1,1, "focused, %d" % self.focusedPanelNum)
#self.planetViewScreens[self.focusedPanelNum].window().addstr(2,1, "line, %d" % self.horozontalLineNum)
self.scrollFunc(self.planetViewScreens[self.focusedPanelNum].window(), function=[self.displayPlanetInfoPanel])
self.displayPlanet(panelList)
def displayFleets(self):
print True
def displayFleets(self):
print True
def displayScanners(self):
print True
def exit(self):
print True
def displayabout(self):
print True
def terminalSize(self):
self.screenSize = self.screen.getmaxyx()
#if self.terminalSize[0] < 1000 and self.terminalSize[1] < 1000:
#curses.resizeterm(20, 30)
return True
#else:
# return True
def logmessages(message):
print message
def scrollFunc(self, window, direction = ["up", "down", "left", "right"], **kwargs):
window.keypad(1)
c = window.getch()
if c == curses.KEY_UP and "up" in direction:
self.updown(self.UP)
window.addstr(7,7,"%d" % self.highlightLineNum)
elif c == curses.KEY_DOWN and "down" in direction:
self.updown(self.DOWN)
elif c == curses.KEY_LEFT and "left" in direction:
self.leftright(self.LEFT)
elif c == curses.KEY_RIGHT and "right" in direction:
self.leftright(self.RIGHT)
elif c == ord("\n"):
# Pass functions for menu via kwargs.
#try:
try:
kwargs["function"][self.highlightLineNum]([window])
except Exception as e:
window.addstr(1,1, str(e))
pass
#except IndexError:
# pass
# #kwargs["function"][self.focusedPanelNum](window,self.highlightLineNum)
#except TypeError:
# pass
# #kwargs["function"][self.focusedPanelNum](window,self.highlightLineNum)
#except AttributeError:
# pass
#self.functionList[self.highlightLineNum](["menu"])
def mainMenu(self, stdcsr):
self.screen = stdcsr
self.cursesSetup()
self.menuBox, menuPanel = self.createWinAndPanel("menu", 15, 30, 1, 1)
#self.menuBox = curses.newwin(15,30,1,1)
#self.panel1 = curses.panel.new_panel(self.menuBox)
curses.panel.update_panels()
self.menuBox.box()
self.menuBox.border(0)
self.functionList = [self.displayPlanet]
while True:
self.displayMenu()
self.bottom = 0
self.scrollFunc(self.menuBox, function=self.functionList)
def leftright(self, increment):
nextPanNum = self.focusedPanelNum + increment
self.maxPanels = len(self.planetViewScreens)
print nextPanNum
# if increment == self.LEFT and nextPanNum != 0:
# self.horozontalLineNum +=self.LEFT
# return
# elif increment == self.RIGHT and nextPanNum == self.maxPanels:
# self.horozontalLineNum +=self.RIGHT
# return
if increment == self.LEFT and nextPanNum != -1:
self.focusedPanelNum = nextPanNum
elif increment == self.RIGHT and nextPanNum < self.maxPanels:
self.focusedPanelNum = nextPanNum
def updown(self, increment):
nextLineNum = self.highlightLineNum + increment
# if next highlighted line is hidden, update the top line
if increment == self.UP and self.highlightLineNum == 0 and self.topLineNum != 0:
self.topLineNum += self.UP
return
elif increment == self.DOWN and nextLineNum == self.optionRange[1] and self.bottom != self.nOptions:
self.topLineNum += self.DOWN
return
if increment == self.UP and (self.topLineNum != 0 or self.highlightLineNum != 0):
#self.topLineNum +=self.UP
self.highlightLineNum = nextLineNum
elif increment == self.DOWN and self.highlightLineNum+1 != self.nOptions and self.bottom != self.nOptions:
#self.topLineNum += self.DOWN
self.highlightLineNum = nextLineNum
def displayMenu(self):
menuOptions = ["Planets", "Fleets", "Galaxy", "Scanners", "About", "Exit"]
self.nOptions = len(menuOptions)
self.optionRange = (0, self.nOptions)
menuFunctions = []
top = self.topLineNum
bottom = self.topLineNum+curses.LINES
for index, line in enumerate(menuOptions):
if index != self.highlightLineNum:
self.menuBox.addstr(index+1, 1, line)
else:
self.menuBox.addstr(index+1, 1, line, curses.A_BOLD)
self.menuBox.refresh()
def displayPlanetListPanel(self, window):
self.nOptions = len(self.planets)
window.addstr(1,2,"Planet List")
y, x = window.getmaxyx()
top = self.topLineNum
self.bottom = self.topLineNum + y - 4
bottom1 = y - 4
self.optionRange = (top, bottom1)
for idx, planet in enumerate(self.planets[top:self.bottom]):
linenum = self.topLineNum + idx
if idx!= self.highlightLineNum or self.focusedPanelNum != 0:
window.addstr(idx+3, 2, "%s" % (planet["name"]))
else:
window.addstr(idx+3, 2, "%s" % planet["name"], curses.A_BOLD)
def displayPlanetInfoPanel(self, window, idx=0):
planet = self.user["planets"][idx]
name = planet["name"]
population = planet["population"]
water = planet['multi']['water']
REE = planet['multi']['REE']
debris = planet['resource']['debris']
waterResource = planet['resource']['water']
foodResource = planet['resource']['food']
REEResource = planet['resource']['REE']
window.addstr(2,2, " Planet Info")
window.addstr(4,2, "Name %s" % name)
window.addstr(6,2, "Population %d" % population)
gui = GUI()
|
hydrius/schwarzschild-radius
|
old/SchwarzschildRadius1/main.py
|
Python
|
mit
| 10,314
|
[
"Galaxy"
] |
ec9d8454be1a211a04e6c4104d632bbac7ac625664719b3d6c6cebfe06d155e4
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Andrew Dykstra <andrew.r.dykstra@gmail.com>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD (3-clause)
from copy import deepcopy
import numpy as np
import warnings
from .baseline import rescale
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin,
equalize_channels)
from .filter import resample, detrend, FilterMixin
from .fixes import in1d
from .utils import check_fname, logger, verbose, object_hash, _time_mask
from .viz import (plot_evoked, plot_evoked_topomap, plot_evoked_field,
plot_evoked_image, plot_evoked_topo)
from .viz.evoked import _plot_evoked_white
from .externals.six import string_types
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tag import read_tag
from .io.tree import dir_tree_find
from .io.pick import channel_type, pick_types
from .io.meas_info import read_meas_info, write_meas_info
from .io.proj import ProjMixin
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_string, write_float_matrix,
write_id)
from .io.base import ToDataFrameMixin
_aspect_dict = {'average': FIFF.FIFFV_ASPECT_AVERAGE,
'standard_error': FIFF.FIFFV_ASPECT_STD_ERR}
_aspect_rev = {str(FIFF.FIFFV_ASPECT_AVERAGE): 'average',
str(FIFF.FIFFV_ASPECT_STD_ERR): 'standard_error'}
class Evoked(ProjMixin, ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
ToDataFrameMixin):
"""Evoked data
Parameters
----------
fname : string
Name of evoked/average FIF file to load.
If None no data is loaded.
condition : int, or str
Dataset ID number (int) or comment/name (str). Optional if there is
only one data set in file.
baseline : tuple or list of length 2, or None
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used. If None, no correction is applied.
proj : bool, optional
Apply SSP projection vectors
kind : str
Either 'average' or 'standard_error'. The type of data to read.
Only used if 'condition' is a str.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
info : dict
Measurement info.
ch_names : list of string
List of channels' names.
nave : int
Number of averaged epochs.
kind : str
Type of data, either average or standard_error.
first : int
First time sample.
last : int
Last time sample.
comment : string
Comment on dataset. Can be the condition.
times : array
Array of time instants in seconds.
data : array of shape (n_channels, n_times)
Evoked response.
verbose : bool, str, int, or None.
See above.
"""
@verbose
def __init__(self, fname, condition=None, baseline=None, proj=True,
kind='average', verbose=None):
if fname is None:
raise ValueError('No evoked filename specified')
self.verbose = verbose
logger.info('Reading %s ...' % fname)
f, tree, _ = fiff_open(fname)
with f as fid:
if not isinstance(proj, bool):
raise ValueError(r"'proj' must be 'True' or 'False'")
# Read the measurement info
info, meas = read_meas_info(fid, tree)
info['filename'] = fname
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
raise ValueError('Could not find processed data')
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
if len(evoked_node) == 0:
raise ValueError('Could not find evoked data')
# find string-based entry
if isinstance(condition, string_types):
if kind not in _aspect_dict.keys():
raise ValueError('kind must be "average" or '
'"standard_error"')
comments, aspect_kinds, t = _get_entries(fid, evoked_node)
goods = np.logical_and(in1d(comments, [condition]),
in1d(aspect_kinds,
[_aspect_dict[kind]]))
found_cond = np.where(goods)[0]
if len(found_cond) != 1:
raise ValueError('condition "%s" (%s) not found, out of '
'found datasets:\n %s'
% (condition, kind, t))
condition = found_cond[0]
elif condition is None:
if len(evoked_node) > 1:
_, _, conditions = _get_entries(fid, evoked_node)
raise TypeError("Evoked file has more than one "
"conditions, the condition parameters "
"must be specified from:\n%s" % conditions)
else:
condition = 0
if condition >= len(evoked_node) or condition < 0:
raise ValueError('Data set selector out of range')
my_evoked = evoked_node[condition]
# Identify the aspects
aspects = dir_tree_find(my_evoked, FIFF.FIFFB_ASPECT)
if len(aspects) > 1:
logger.info('Multiple aspects found. Taking first one.')
my_aspect = aspects[0]
# Now find the data in the evoked block
nchan = 0
sfreq = -1
chs = []
comment = None
for k in range(my_evoked['nent']):
my_kind = my_evoked['directory'][k].kind
pos = my_evoked['directory'][k].pos
if my_kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif my_kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif my_kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif my_kind == FIFF.FIFF_NCHAN:
tag = read_tag(fid, pos)
nchan = int(tag.data)
elif my_kind == FIFF.FIFF_SFREQ:
tag = read_tag(fid, pos)
sfreq = float(tag.data)
elif my_kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
if comment is None:
comment = 'No comment'
# Local channel information?
if nchan > 0:
if chs is None:
raise ValueError('Local channel information was not found '
'when it was expected.')
if len(chs) != nchan:
raise ValueError('Number of channels and number of '
'channel definitions are different')
info['chs'] = chs
info['nchan'] = nchan
logger.info(' Found channel information in evoked data. '
'nchan = %d' % nchan)
if sfreq > 0:
info['sfreq'] = sfreq
nsamp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms (%s)'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq'], comment))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Read the data in the aspect block
nave = 1
epoch = []
for k in range(my_aspect['nent']):
kind = my_aspect['directory'][k].kind
pos = my_aspect['directory'][k].pos
if kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comment = tag.data
elif kind == FIFF.FIFF_ASPECT_KIND:
tag = read_tag(fid, pos)
aspect_kind = int(tag.data)
elif kind == FIFF.FIFF_NAVE:
tag = read_tag(fid, pos)
nave = int(tag.data)
elif kind == FIFF.FIFF_EPOCH:
tag = read_tag(fid, pos)
epoch.append(tag)
logger.info(' nave = %d - aspect type = %d'
% (nave, aspect_kind))
nepoch = len(epoch)
if nepoch != 1 and nepoch != info['nchan']:
raise ValueError('Number of epoch tags is unreasonable '
'(nepoch = %d nchan = %d)'
% (nepoch, info['nchan']))
if nepoch == 1:
# Only one epoch
all_data = epoch[0].data.astype(np.float)
# May need a transpose if the number of channels is one
if all_data.shape[1] == 1 and info['nchan'] == 1:
all_data = all_data.T.astype(np.float)
else:
# Put the old style epochs together
all_data = np.concatenate([e.data[None, :] for e in epoch],
axis=0).astype(np.float)
if all_data.shape[1] != nsamp:
raise ValueError('Incorrect number of samples (%d instead of '
' %d)' % (all_data.shape[1], nsamp))
# Calibrate
cals = np.array([info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)
for k in range(info['nchan'])])
all_data *= cals[:, np.newaxis]
times = np.arange(first, last + 1, dtype=np.float) / info['sfreq']
self.info = info
# Put the rest together all together
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(str(self._aspect_kind), 'Unknown')
self.first = first
self.last = last
self.comment = comment
self.times = times
self.data = all_data
# bind info, proj, data to self so apply_proj can be used
self.data = all_data
if proj:
self.apply_proj()
# Run baseline correction
self.data = rescale(self.data, times, baseline, 'mean', copy=False)
def save(self, fname):
"""Save dataset to file.
Parameters
----------
fname : string
Name of the file where to save the data.
"""
write_evokeds(fname, self)
def __repr__(self):
s = "comment : '%s'" % self.comment
s += ", time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", n_epochs : %d" % self.nave
s += ", n_channels x n_times : %s x %s" % self.data.shape
return "<Evoked | %s>" % s
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.first = int(inst.times[0] * inst.info['sfreq'])
inst.last = len(inst.times) + inst.first - 1
inst.data = inst.data[:, mask]
return inst
def shift_time(self, tshift, relative=True):
"""Shift time scale in evoked data
Parameters
----------
tshift : float
The amount of time shift to be applied if relative is True
else the first time point. When relative is True, positive value
of tshift moves the data forward while negative tshift moves it
backward.
relative : bool
If true, move the time backwards or forwards by specified amount.
Else, set the starting time point to the value of tshift.
Notes
-----
Maximum accuracy of time shift is 1 / evoked.info['sfreq']
"""
times = self.times
sfreq = self.info['sfreq']
offset = self.first if relative else 0
self.first = int(tshift * sfreq) + offset
self.last = self.first + len(times) - 1
self.times = np.arange(self.first, self.last + 1,
dtype=np.float) / sfreq
def plot(self, picks=None, exclude='bads', unit=True, show=True, ylim=None,
xlim='tight', proj=False, hline=None, units=None, scalings=None,
titles=None, axes=None, gfp=False, window_title=None):
"""Plot evoked data as butterfly plots
Left click to a line shows the channel name. Selecting an area by
clicking and holding left mouse button plots a topographic map of the
painted area.
Note: If bad channels are not excluded they are shown in red.
Parameters
----------
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() at the end or not.
ylim : dict
ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e-6])
Valid keys are eeg, mag, grad
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
hline : list of floats | None
The values at which show an horizontal line.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
gfp : bool | 'only'
Plot GFP in green if True or "only". If "only", then the individual
channel traces will not be shown.
window_title : str | None
The title to put at the top of the figure window.
"""
return plot_evoked(self, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=ylim, proj=proj, xlim=xlim,
hline=hline, units=units, scalings=scalings,
titles=titles, axes=axes, gfp=gfp,
window_title=window_title)
def plot_image(self, picks=None, exclude='bads', unit=True, show=True,
clim=None, xlim='tight', proj=False, units=None,
scalings=None, titles=None, axes=None, cmap='RdBu_r'):
"""Plot evoked data as images
Parameters
----------
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() at the end or not.
clim : dict
clim for images. e.g. clim = dict(eeg=[-200e-6, 200e6])
Valid keys are eeg, mag, grad
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap
Colormap.
"""
return plot_evoked_image(self, picks=picks, exclude=exclude, unit=unit,
show=show, clim=clim, proj=proj, xlim=xlim,
units=units, scalings=scalings,
titles=titles, axes=axes, cmap=cmap)
def plot_topo(self, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=[0.0], fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
show=True):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
vline : list of floats | None
The values at which to show a vertical line.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | numpy ndarray
A background image for the figure. This must work with a call to
plt.imshow. Defaults to None.
axis_facecolor : str | obj
The face color to be used for each sensor plot. Defaults to black.
font_color : str | obj
The color of text in the colorbar and title. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
.. versionadded:: 0.10.0
"""
return plot_evoked_topo(self, layout=layout, layout_scale=layout_scale,
color=color, border=border, ylim=ylim,
scalings=scalings, title=title, proj=proj,
vline=vline, fig_facecolor=fig_facecolor,
fig_background=fig_background,
axis_facecolor=axis_facecolor,
font_color=font_color, show=show)
def plot_topomap(self, times="auto", ch_type=None, layout=None, vmin=None,
vmax=None, cmap='RdBu_r', sensors=True, colorbar=True,
scale=None, scale_time=1e3, unit=None, res=64, size=1,
cbar_fmt="%3.1f", time_format='%01d ms', proj=False,
show=True, show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None,
axes=None):
"""Plot topographic maps of specific time points
Parameters
----------
times : float | array of floats | "auto" | "peaks".
The time point(s) to plot. If "auto", the number of ``axes``
determines the amount of time point(s). If ``axes`` is also None,
10 topographies will be shown with a regular time spacing between
the first and last time instant. If "peaks", finds time points
automatically by checking for local maxima in Global Field Power.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collec-
ted in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct
layout file is inferred from the data; if no appropriate layout
file was found, the layout is automatically generated from the
sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.max(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. Defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3 (ms).
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
cbar_fmt : str
String format for colorbar values.
time_format : str
String format for topomap values. Defaults to ``"%01d ms"``.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
show : bool
Call pyplot.show() at the end.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals:
``dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)``.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be
drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as ``times`` (unless ``times`` is None). If
instance of Axes, ``times`` must be a float or a list of one float.
Defaults to None.
"""
return plot_evoked_topomap(self, times=times, ch_type=ch_type,
layout=layout, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors,
colorbar=colorbar, scale=scale,
scale_time=scale_time,
unit=unit, res=res, proj=proj, size=size,
cbar_fmt=cbar_fmt, time_format=time_format,
show=show, show_names=show_names,
title=title, mask=mask,
mask_params=mask_params,
outlines=outlines, contours=contours,
image_interp=image_interp,
average=average, head_pos=head_pos,
axes=axes)
def plot_field(self, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to run in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
return plot_evoked_field(self, surf_maps, time=time,
time_label=time_label, n_jobs=n_jobs)
def plot_white(self, noise_cov, show=True):
"""Plot whitened evoked response
Plots the whitened evoked response and the whitened GFP as described in
[1]_. If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise
covariance that has the highest log-likelihood. The left column will
depict the whitened GFPs based on each estimator separately for each
sensor type. Instead of numbers of channels the GFP display shows the
estimated rank. The rank estimation will be printed by the logger for
each noise covariance estimator that is passed.
Parameters
----------
noise_cov : list | instance of Covariance | str
The noise covariance as computed by ``mne.cov.compute_covariance``.
show : bool
Whether to show the figure or not. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG
signals, vol. 108, 328-342, NeuroImage.
Notes
-----
.. versionadded:: 0.9.0
"""
return _plot_evoked_white(self, noise_cov=noise_cov, scalings=None,
rank=None, show=show)
def as_type(self, ch_type='grad', mode='fast'):
"""Compute virtual evoked using interpolated fields in mag/grad
channels.
.. Warning:: Using virtual evoked to compute inverse can yield
unexpected results. The virtual channels have `'_virtual'` appended
at the end of the names to emphasize that the data contained in
them are interpolated.
Parameters
----------
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
Returns
-------
evoked : instance of mne.Evoked
The transformed evoked object containing only virtual channels.
Notes
-----
.. versionadded:: 0.9.0
"""
from .forward import _as_meg_type_evoked
return _as_meg_type_evoked(self, ch_type=ch_type, mode=mode)
def resample(self, sfreq, npad=100, window='boxcar'):
"""Resample data
This function operates in-place.
Parameters
----------
sfreq : float
New sample rate to use
npad : int
Amount to pad the start and end of the data.
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
"""
o_sfreq = self.info['sfreq']
self.data = resample(self.data, sfreq, o_sfreq, npad, -1, window)
# adjust indirectly affected variables
self.info['sfreq'] = sfreq
self.times = (np.arange(self.data.shape[1], dtype=np.float) / sfreq +
self.times[0])
self.first = int(self.times[0] * self.info['sfreq'])
self.last = len(self.times) + self.first - 1
def detrend(self, order=1, picks=None):
"""Detrend data
This function operates in-place.
Parameters
----------
order : int
Either 0 or 1, the order of the detrending. 0 is a constant
(DC) detrend, 1 is a linear detrend.
picks : array-like of int | None
If None only MEG, EEG and SEEG channels are detrended.
"""
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
stim=False, eog=False, ecg=False, emg=False,
seeg=True, exclude='bads')
self.data[picks] = detrend(self.data[picks], order, axis=-1)
def copy(self):
"""Copy the instance of evoked
Returns
-------
evoked : instance of Evoked
"""
evoked = deepcopy(self)
return evoked
def __add__(self, evoked):
"""Add evoked taking into account number of epochs"""
out = combine_evoked([self, evoked])
out.comment = self.comment + " + " + evoked.comment
return out
def __sub__(self, evoked):
"""Add evoked taking into account number of epochs"""
this_evoked = deepcopy(evoked)
this_evoked.data *= -1.
out = combine_evoked([self, this_evoked])
if self.comment is None or this_evoked.comment is None:
warnings.warn('evoked.comment expects a string but is None')
out.comment = 'unknown'
else:
out.comment = self.comment + " - " + this_evoked.comment
return out
def __hash__(self):
return object_hash(dict(info=self.info, data=self.data))
def get_peak(self, ch_type=None, tmin=None, tmax=None, mode='abs',
time_as_index=False):
"""Get location and latency of peak amplitude
Parameters
----------
ch_type : {'mag', 'grad', 'eeg', 'seeg', 'misc', None}
The channel type to use. Defaults to None. If more than one sensor
Type is present in the data the channel type has to be explicitly
set.
tmin : float | None
The minimum point in time to be considered for peak getting.
If None (default), the beginning of the data is used.
tmax : float | None
The maximum point in time to be considered for peak getting.
If None (default), the end of the data is used.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
time_as_index : bool
Whether to return the time index instead of the latency in seconds.
Returns
-------
ch_name : str
The channel exhibiting the maximum response.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
supported = ('mag', 'grad', 'eeg', 'seeg', 'misc', 'None')
data_picks = pick_types(self.info, meg=True, eeg=True, seeg=True,
ref_meg=False)
types_used = set([channel_type(self.info, idx) for idx in data_picks])
if str(ch_type) not in supported:
raise ValueError('Channel type must be `{supported}`. You gave me '
'`{ch_type}` instead.'
.format(ch_type=ch_type,
supported='` or `'.join(supported)))
elif ch_type is not None and ch_type not in types_used:
raise ValueError('Channel type `{ch_type}` not found in this '
'evoked object.'.format(ch_type=ch_type))
elif len(types_used) > 1 and ch_type is None:
raise RuntimeError('More than one sensor type found. `ch_type` '
'must not be `None`, pass a sensor type '
'value instead')
meg, eeg, misc, seeg, picks = False, False, False, False, None
if ch_type == 'mag':
meg = ch_type
elif ch_type == 'grad':
meg = ch_type
elif ch_type == 'eeg':
eeg = True
elif ch_type == 'misc':
misc = True
elif ch_type == 'seeg':
seeg = True
if ch_type is not None:
picks = pick_types(self.info, meg=meg, eeg=eeg, misc=misc,
seeg=seeg, ref_meg=False)
data = self.data if picks is None else self.data[picks]
ch_idx, time_idx = _get_peak(data, self.times, tmin,
tmax, mode)
return (self.ch_names[ch_idx],
time_idx if time_as_index else self.times[time_idx])
class EvokedArray(Evoked):
"""Evoked object from numpy array
Parameters
----------
data : array of shape (n_channels, n_times)
The channels' evoked response.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
tmin : float
Start time before event.
comment : string
Comment on dataset. Can be the condition. Defaults to ''.
nave : int
Number of averaged epochs. Defaults to 1.
kind : str
Type of data, either average or standard_error. Defaults to 'average'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
See Also
--------
EpochsArray, io.RawArray, create_info
"""
@verbose
def __init__(self, data, info, tmin, comment='', nave=1, kind='average',
verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 2:
raise ValueError('Data must be a 2D array of shape (n_channels, '
'n_samples)')
if len(info['ch_names']) != np.shape(data)[0]:
raise ValueError('Info (%s) and data (%s) must have same number '
'of channels.' % (len(info['ch_names']),
np.shape(data)[0]))
self.data = data
# XXX: this should use round and be tested
self.first = int(tmin * info['sfreq'])
self.last = self.first + np.shape(data)[-1] - 1
self.times = np.arange(self.first, self.last + 1,
dtype=np.float) / info['sfreq']
self.info = info
self.nave = nave
self.kind = kind
self.comment = comment
self.picks = None
self.verbose = verbose
self._projector = None
if self.kind == 'average':
self._aspect_kind = _aspect_dict['average']
else:
self._aspect_kind = _aspect_dict['standard_error']
def _get_entries(fid, evoked_node):
"""Helper to get all evoked entries"""
comments = list()
aspect_kinds = list()
for ev in evoked_node:
for k in range(ev['nent']):
my_kind = ev['directory'][k].kind
pos = ev['directory'][k].pos
if my_kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
comments.append(tag.data)
my_aspect = dir_tree_find(ev, FIFF.FIFFB_ASPECT)[0]
for k in range(my_aspect['nent']):
my_kind = my_aspect['directory'][k].kind
pos = my_aspect['directory'][k].pos
if my_kind == FIFF.FIFF_ASPECT_KIND:
tag = read_tag(fid, pos)
aspect_kinds.append(int(tag.data))
comments = np.atleast_1d(comments)
aspect_kinds = np.atleast_1d(aspect_kinds)
if len(comments) != len(aspect_kinds) or len(comments) == 0:
fid.close()
raise ValueError('Dataset names in FIF file '
'could not be found.')
t = [_aspect_rev.get(str(a), 'Unknown') for a in aspect_kinds]
t = ['"' + c + '" (' + tt + ')' for tt, c in zip(t, comments)]
t = ' ' + '\n '.join(t)
return comments, aspect_kinds, t
def _get_evoked_node(fname):
"""Helper to get info in evoked file"""
f, tree, _ = fiff_open(fname)
with f as fid:
_, meas = read_meas_info(fid, tree)
evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED)
return evoked_node
def grand_average(all_evoked, interpolate_bads=True):
"""Make grand average of a list evoked data
The function interpolates bad channels based on `interpolate_bads`
parameter. If `interpolate_bads` is True, the grand average
file will contain good channels and the bad channels interpolated
from the good MEG/EEG channels.
The grand_average.nave attribute will be equal the number
of evoked datasets used to calculate the grand average.
Note: Grand average evoked shall not be used for source localization.
Parameters
----------
all_evoked : list of Evoked data
The evoked datasets.
interpolate_bads : bool
If True, bad MEG and EEG channels are interpolated.
Returns
-------
grand_average : Evoked
The grand average data.
Notes
-----
.. versionadded:: 0.9.0
"""
# check if all elements in the given list are evoked data
if not all(isinstance(e, Evoked) for e in all_evoked):
raise ValueError("Not all the elements in list are evoked data")
# Copy channels to leave the original evoked datasets intact.
all_evoked = [e.copy() for e in all_evoked]
# Interpolates if necessary
if interpolate_bads:
all_evoked = [e.interpolate_bads() if len(e.info['bads']) > 0
else e for e in all_evoked]
equalize_channels(all_evoked) # apply equalize_channels
# make grand_average object using combine_evoked
grand_average = combine_evoked(all_evoked, weights='equal')
# change the grand_average.nave to the number of Evokeds
grand_average.nave = len(all_evoked)
# change comment field
grand_average.comment = "Grand average (n = %d)" % grand_average.nave
return grand_average
def combine_evoked(all_evoked, weights='nave'):
"""Merge evoked data by weighted addition
Data should have the same channels and the same time instants.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Parameters
----------
all_evoked : list of Evoked
The evoked datasets.
weights : list of float | str
The weights to apply to the data of each evoked instance.
Can also be ``'nave'`` to weight according to evoked.nave,
or ``"equal"`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
evoked : Evoked
The new evoked data.
Notes
-----
.. versionadded:: 0.9.0
"""
evoked = all_evoked[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_evoked], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_evoked)] * len(all_evoked)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_evoked):
raise ValueError('weights must be the same size as all_evoked')
ch_names = evoked.ch_names
for e in all_evoked[1:]:
assert e.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (evoked, e))
assert np.max(np.abs(e.times - evoked.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (evoked, e))
# use union of bad channels
bads = list(set(evoked.info['bads']).union(*(ev.info['bads']
for ev in all_evoked[1:])))
evoked.info['bads'] = bads
evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
evoked.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_evoked))), 1)
return evoked
@verbose
def read_evokeds(fname, condition=None, baseline=None, kind='average',
proj=True, verbose=None):
"""Read evoked dataset(s)
Parameters
----------
fname : string
The file name, which should end with -ave.fif or -ave.fif.gz.
condition : int or str | list of int or str | None
The index or list of indices of the evoked dataset to read. FIF files
can contain multiple datasets. If None, all datasets are returned as a
list.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction. If None do not apply
it. If baseline is (a, b) the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used and if b is None then
b is set to the end of the interval. If baseline is equal to
(None, None) all the time interval is used.
kind : str
Either 'average' or 'standard_error', the type of data to read.
proj : bool
If False, available projectors won't be applied to the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : Evoked (if condition is int or str) or list of Evoked (if
condition is None or list)
The evoked dataset(s).
See Also
--------
write_evokeds
"""
check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
return_list = True
if condition is None:
evoked_node = _get_evoked_node(fname)
condition = range(len(evoked_node))
elif not isinstance(condition, list):
condition = [condition]
return_list = False
out = [Evoked(fname, c, baseline=baseline, kind=kind, proj=proj,
verbose=verbose) for c in condition]
return out if return_list else out[0]
def write_evokeds(fname, evoked):
"""Write an evoked dataset to a file
Parameters
----------
fname : string
The file name, which should end with -ave.fif or -ave.fif.gz.
evoked : Evoked instance, or list of Evoked instances
The evoked dataset, or list of evoked datasets, to save in one file.
Note that the measurement info from the first evoked instance is used,
so be sure that information matches.
See Also
--------
read_evokeds
"""
check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz'))
if not isinstance(evoked, list):
evoked = [evoked]
# Create the file and save the essentials
with start_file(fname) as fid:
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if evoked[0].info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info['meas_id'])
# Write measurement info
write_meas_info(fid, evoked[0].info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
for e in evoked:
start_block(fid, FIFF.FIFFB_EVOKED)
# Comment is optional
if e.comment is not None and len(e.comment) > 0:
write_string(fid, FIFF.FIFF_COMMENT, e.comment)
# First and last sample
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last)
# The epoch itself
start_block(fid, FIFF.FIFFB_ASPECT)
write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind)
write_int(fid, FIFF.FIFF_NAVE, e.nave)
decal = np.zeros((e.info['nchan'], 1))
for k in range(e.info['nchan']):
decal[k] = 1.0 / (e.info['chs'][k]['cal'] *
e.info['chs'][k].get('scale', 1.0))
write_float_matrix(fid, FIFF.FIFF_EPOCH, decal * e.data)
end_block(fid, FIFF.FIFFB_ASPECT)
end_block(fid, FIFF.FIFFB_EVOKED)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def _get_peak(data, times, tmin=None, tmax=None, mode='abs'):
"""Get feature-index and time of maximum signal from 2D array
Note. This is a 'getter', not a 'finder'. For non-evoked type
data and continuous signals, please use proper peak detection algorithms.
Parameters
----------
data : instance of numpy.ndarray (n_locations, n_times)
The data, either evoked in sensor or source space.
times : instance of numpy.ndarray (n_times)
The times in seconds.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
Returns
-------
max_loc : int
The index of the feature with the maximum value.
max_time : int
The time point of the maximum response, index.
"""
modes = ('abs', 'neg', 'pos')
if mode not in modes:
raise ValueError('The `mode` parameter must be `{modes}`. You gave '
'me `{mode}`'.format(modes='` or `'.join(modes),
mode=mode))
if tmin is None:
tmin = times[0]
if tmax is None:
tmax = times[-1]
if tmin < times.min():
raise ValueError('The tmin value is out of bounds. It must be '
'within {0} and {1}'.format(times.min(), times.max()))
if tmax > times.max():
raise ValueError('The tmin value is out of bounds. It must be '
'within {0} and {1}'.format(times.min(), times.max()))
if tmin >= tmax:
raise ValueError('The tmin must be smaller than tmax')
time_win = (times >= tmin) & (times <= tmax)
mask = np.ones_like(data).astype(np.bool)
mask[:, time_win] = False
maxfun = np.argmax
if mode == 'pos':
if not np.any(data > 0):
raise ValueError('No positive values encountered. Cannot '
'operate in pos mode.')
elif mode == 'neg':
if not np.any(data < 0):
raise ValueError('No negative values encountered. Cannot '
'operate in neg mode.')
maxfun = np.argmin
masked_index = np.ma.array(np.abs(data) if mode == 'abs' else data,
mask=mask)
max_loc, max_time = np.unravel_index(maxfun(masked_index), data.shape)
return max_loc, max_time
|
yousrabk/mne-python
|
mne/evoked.py
|
Python
|
bsd-3-clause
| 53,456
|
[
"Mayavi"
] |
292412df39610e31b2ba97ceff611b23d76f39b76b7cfa7c92631cbefda2894f
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cfg module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.platform import test
class CountingVisitor(cfg.GraphVisitor):
def __init__(self, graph):
super(CountingVisitor, self).__init__(graph)
self.counts = {}
def init_state(self, _):
return None
def visit_node(self, node):
self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
return False # visit only once
class GraphVisitorTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs, node
def test_basic_coverage_forward(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_forward()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
# The return node should be unreachable in forward direction.
self.assertNotIn(node.body[0].body[2], visitor.counts)
self.assertEqual(visitor.counts[node.body[1]], 1)
def test_basic_coverage_reverse(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_reverse()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
self.assertTrue(visitor.counts[node.body[0].body[2]], 1)
self.assertEqual(visitor.counts[node.body[1]], 1)
class AstToCfgTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs
def _repr_set(self, node_set):
return frozenset(repr(n) for n in node_set)
def _as_set(self, elements):
if elements is None:
return frozenset()
elif isinstance(elements, str):
return frozenset((elements,))
else:
return frozenset(elements)
def assertGraphMatches(self, graph, edges):
"""Tests whether the CFG contains the specified edges."""
for prev, node_repr, next_ in edges:
matched = False
for cfg_node in graph.index.values():
if repr(cfg_node) == node_repr:
if (self._as_set(prev) == frozenset(map(repr, cfg_node.prev)) and
self._as_set(next_) == frozenset(map(repr, cfg_node.next))):
matched = True
break
if not matched:
self.fail(
'match failed for node "%s" in graph:\n%s' % (node_repr, graph))
def assertStatementEdges(self, graph, edges):
"""Tests whether the CFG contains the specified statement edges."""
for prev_node_reprs, node_repr, next_node_reprs in edges:
matched = False
partial_matches = []
self.assertSetEqual(
frozenset(graph.stmt_next.keys()), frozenset(graph.stmt_prev.keys()))
for stmt_ast_node in graph.stmt_next:
ast_repr = '%s:%s' % (stmt_ast_node.__class__.__name__,
stmt_ast_node.lineno)
if ast_repr == node_repr:
actual_next = frozenset(map(repr, graph.stmt_next[stmt_ast_node]))
actual_prev = frozenset(map(repr, graph.stmt_prev[stmt_ast_node]))
partial_matches.append((actual_prev, node_repr, actual_next))
if (self._as_set(prev_node_reprs) == actual_prev and
self._as_set(next_node_reprs) == actual_next):
matched = True
break
if not matched:
self.fail('edges mismatch for %s: %s' % (node_repr, partial_matches))
def test_straightline(self):
def test_fn(a):
a += 1
a = 2
a = 3
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'a += 1'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', 'return'),
('a = 3', 'return', None),
),
)
def test_straightline_no_return(self):
def test_fn(a, b):
a = b + 1
a += max(a)
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a, b', 'a = b + 1'),
('a = b + 1', 'a += max(a)', None),
),
)
def test_unreachable_code(self):
def test_fn(a):
return
a += 1 # pylint:disable=unreachable
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'return'),
('a', 'return', None),
(None, 'a += 1', None),
),
)
def test_if_straightline(self):
def test_fn(a):
if a > 0:
a = 1
else:
a += -1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('(a > 0)', 'a = 1', None),
('(a > 0)', 'a += -1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_nested(self):
def test_fn(a):
if a > 0:
if a > 1:
a = 1
else:
a = 2
else:
if a > 2:
a = 3
else:
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', ('(a > 1)', '(a > 2)')),
('(a > 0)', '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', None),
('(a > 1)', 'a = 2', None),
('(a > 0)', '(a > 2)', ('a = 3', 'a = 4')),
('(a > 2)', 'a = 3', None),
('(a > 2)', 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'If:2', None),
('(a > 0)', 'If:3', None),
('(a > 0)', 'If:8', None),
),
)
def test_branch_straightline_semi(self):
def test_fn(a):
if a > 0:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', 'a = 1'),
('(a > 0)', 'a = 1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_return(self):
def test_fn(a):
if a > 0:
return
else:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('return', 'a = 1')),
('(a > 0)', 'a = 1', 'a = 2'),
('(a > 0)', 'return', None),
('a = 1', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', 'a = 2'),),
)
def test_branch_return_minimal(self):
def test_fn(a):
if a > 0:
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'return'),
('(a > 0)', 'return', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_while_straightline(self):
def test_fn(a):
while a > 0:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 2'),),
)
def test_while_else_straightline(self):
def test_fn(a):
while a > 0:
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 3'),),
)
def test_while_else_continue(self):
def test_fn(a):
while a > 0:
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', '(a > 0)'),
('a = 0', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', '(a > 0)')),
),
)
def test_while_else_break(self):
def test_fn(a):
while a > 0:
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_while_else_return(self):
def test_fn(a):
while a > 0:
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', 'a = 1'),
),
)
def test_while_nested_straightline(self):
def test_fn(a):
while a > 0:
while a > 1:
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
),
)
def test_while_nested_continue(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 3:
continue
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'continue', 'a = 1'), '(a > 1)', ('(a > 3)', 'a = 2')),
('(a > 1)', '(a > 3)', ('continue', 'a = 1')),
('(a > 3)', 'continue', '(a > 1)'),
('(a > 3)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', '(a > 1)')),
),
)
def test_while_nested_break(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 2:
break
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(graph, (
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
('(a > 1)', '(a > 2)', ('break', 'a = 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'a = 1', '(a > 1)'),
(('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
))
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', 'a = 2')),
),
)
def test_for_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 2'),),
)
def test_for_else_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 3'),),
)
def test_for_else_continue(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', 'range(0, a)'),
('(a > 1)', 'a = 0', 'a = 1'),
('a = 0', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'range(0, a)')),
),
)
def test_for_else_break(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_for_else_return(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', 'a = 1'),
),
)
def test_for_nested_straightline(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('b += 1', 'a = 2')),
('range(1, a)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
),
)
def test_for_nested_continue(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 3:
continue
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'continue', 'b += 1'), 'range(1, a)',
('(a > 3)', 'a = 2')),
('range(1, a)', '(a > 3)', ('continue', 'b += 1')),
('(a > 3)', 'continue', 'range(1, a)'),
('(a > 3)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'range(1, a)')),
),
)
def test_for_nested_break(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 2:
break
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('(a > 2)', 'a = 2')),
('range(1, a)', '(a > 2)', ('break', 'b += 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'b += 1', 'range(1, a)'),
(('range(1, a)', 'break'), 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'a = 2')),
),
)
def test_complex(self):
def test_fn(a):
b = 0
while a > 0:
for b in range(0, a):
if a > 2:
break
if a > 3:
if a > 4:
continue
else:
max(a)
break
b += 1
else: # for b in range(0, a):
return a
a = 2
for a in range(1, a):
return b
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('b = 0', 'a = 2'), '(a > 0)', ('range(0, a)', 'range(1, a)')),
(
('(a > 0)', 'continue', 'b += 1'),
'range(0, a)',
('(a > 2)', 'return a'),
),
('range(0, a)', '(a > 2)', ('(a > 3)', 'break')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', '(a > 3)', ('(a > 4)', 'b += 1')),
('(a > 3)', '(a > 4)', ('continue', 'max(a)')),
('(a > 4)', 'max(a)', 'break'),
('max(a)', 'break', 'a = 2'),
('(a > 4)', 'continue', 'range(0, a)'),
('(a > 3)', 'b += 1', 'range(0, a)'),
('range(0, a)', 'return a', None),
('break', 'a = 2', '(a > 0)'),
('(a > 0)', 'range(1, a)', ('return b', 'a = 3')),
('range(1, a)', 'return b', None),
('range(1, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('b = 0', 'While:3', 'range(1, a)'),
('(a > 0)', 'For:4', 'a = 2'),
('range(0, a)', 'If:5', ('(a > 3)', 'a = 2')),
('(a > 2)', 'If:7', ('b += 1', 'a = 2', 'range(0, a)')),
('(a > 3)', 'If:8', ('a = 2', 'range(0, a)')),
('(a > 0)', 'For:17', 'a = 3'),
),
)
def test_finally_straightline(self):
def test_fn(a):
try:
a += 1
finally:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a += 1', 'a = 2'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
def test_return_finally(self):
def test_fn(a):
try:
return a
finally:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'return a', 'a = 1'),
('return a', 'a = 1', None),
(None, 'a = 2', None),
),
)
def test_break_finally(self):
def test_fn(a):
while a > 0:
try:
break
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'break'),
('(a > 0)', 'break', 'a = 1'),
('break', 'a = 1', None),
),
)
def test_continue_finally(self):
def test_fn(a):
while a > 0:
try:
continue
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', 'continue'),
('(a > 0)', 'continue', 'a = 1'),
('continue', 'a = 1', '(a > 0)'),
),
)
def test_with_straightline(self):
def test_fn(a):
with max(a) as b:
a = 0
return b
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'max(a)', 'a = 0'),
('max(a)', 'a = 0', 'return b'),
('a = 0', 'return b', None),
),
)
def test_lambda_basic(self):
def test_fn(a):
a = lambda b: a + b
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = lambda b: a + b', 'return a'),
('a = lambda b: a + b', 'return a', None),
),
)
def test_pass(self):
def test_fn(a): # pylint:disable=unused-argument
pass
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'pass', None),
),
)
def test_try_finally(self):
def test_fn(a):
try:
a = 1
finally:
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 1', 'a = 2', 'return a'),
('a = 2', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
),
)
def test_try_except_single_bare(self):
def test_fn(a):
try:
a = 1
a = 2
except: # pylint:disable=bare-except
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
def test_try_except_single(self):
def test_fn(a):
try:
a = 1
a = 2
except Exception1: # pylint:disable=undefined-variable
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
def test_try_except_single_aliased(self):
def test_fn(a):
try:
a = 1
except Exception1 as e: # pylint:disable=undefined-variable,unused-variable
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
def test_try_except_single_tuple_aliased(self):
def test_fn(a):
try:
a = 1
except (Exception1, Exception2) as e: # pylint:disable=undefined-variable,unused-variable
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
def test_try_except_multiple(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable
a = 2
except Exception2: # pylint:disable=undefined-variable
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'return a')),
(('a = 1', 'a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
('a = 1', 'ExceptHandler:6', 'return a'),
),
)
def test_try_except_finally(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable
a = 2
except Exception2: # pylint:disable=undefined-variable
a = 3
finally:
a = 4
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'a = 4')),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', 'return a'),
('a = 4', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'a = 4'),
('a = 1', 'ExceptHandler:6', 'a = 4'),
),
)
def test_try_in_if(self):
def test_fn(a):
try:
if a > 0:
a = 1
else:
a = 2
except Exception1: # pylint:disable=undefined-variable
a = 3
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', ('a = 3', 'a = 4')),
('(a > 0)', 'a = 2', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'a = 3', 'a = 4'),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'a = 4'),
('a', 'If:3', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'ExceptHandler:7', 'a = 4'),
),
)
def test_try_in_if_all_branches_exit(self):
def test_fn(a, b):
try:
if a > 0:
raise b
else:
return 0
except b:
return 1
graph, = self._build_cfg(test_fn).values()
# TODO(mdan): raise and return should have an edge to the except blocks.
self.assertGraphMatches(
graph,
(
('a, b', '(a > 0)', ('raise b', 'return 0')),
('(a > 0)', 'raise b', None),
('(a > 0)', 'return 0', None),
(None, 'return 1', None),
),
)
self.assertStatementEdges(
graph,
(
('a, b', 'Try:2', None),
('a, b', 'If:3', None),
(None, 'ExceptHandler:7', None),
),
)
def test_list_comprehension(self):
def test_fn(a):
c = [b for b in a]
return c
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'c = [b for b in a]', 'return c'),
('c = [b for b in a]', 'return c', None),
),
)
if __name__ == '__main__':
test.main()
|
alsrgv/tensorflow
|
tensorflow/python/autograph/pyct/cfg_test.py
|
Python
|
apache-2.0
| 31,083
|
[
"VisIt"
] |
3696c2e8f50e42db984df5c6149e32d50115eb8cfcbff1bf672f6a413364a153
|
""" Class to solve the creation, serialisation etc. of NetCDF files used
in the BioVel Biome-BGC project. The module is used from the
:mod:`ResultEvaluator`.
"""
# Copyright (c) 2014 MTA Centre for Ecological Research
# Distributed under the GNU GPL v3. For full terms see the file LICENSE.
from Scientific.IO import NetCDF
class BBGCNetCDF:
""" Container of the result data of a MonteCarlo Biome-BGC
experiment
"""
#-----------------------------------------------------------------------
def __init__(self, file_name, repeat_num):
"""
BBGC NetCDF output.
:param file_name: Name of the netcdf file.
:type file_name: str.
"""
#self.project_dir = project_dir
#self.project_name = project_name
self.netcdf = NetCDF.NetCDFFile(file_name, 'w')
#print('RepeatNum: '+str(repeat_num))
self.netcdf.createDimension('repeatNum', repeat_num)
#-----------------------------------------------------------------------
def insert_rand_input_params(self, param_names, param_values):
"""
Insert the values into a matrix and the names of randomised
input variables into a 2D character array, where width
of the array is the length of the longest name.
:param param_names: List of the randomised input parameter names.
:type param_names: List of strings.
:param param_values: Matrix of the input parameters * repeat num.
:type param_values: List of float lists.
"""
# parameter_names matrix
max_name_len = max(map(len, param_names))
name_list = []
for name in param_names:
name_list.append(list(name.encode('ascii', 'ignore')) + ([' '] * (max_name_len - len(name)) ))
#print(name_list)
self.netcdf.createDimension('inputParamNum', len(param_names))
self.netcdf.createDimension('inputParamMaxLen', max_name_len)
self.netcdf.createVariable('inputParamNames','c',('inputParamNum','inputParamMaxLen'))
tmp_var = self.netcdf.variables['inputParamNames']
for i in range(0,len(param_names)):
for j in range(0,max_name_len):
#print(name_list[i][j])
tmp_var[i,j] = name_list[i][j]
#tmp_var.assignValue(name_list)
# randomised input parameter matrix
self.netcdf.createVariable('inputParams', 'f', ('repeatNum','inputParamNum'))
tmp_var = self.netcdf.variables['inputParams']
tmp_var.assignValue(param_values)
#-----------------------------------------------------------------------
def insert_outputs(self, annual_variables, daily_variables,
annout=None, monavgout=None, annavgout=None, dayout=None):
""" Insert the output variables into the NetCDF file
:param annual_variables: Ids of annual output variables.
:type daily_variables: List of ints.
:param daily_variables: Ids of daily output variables.
:type daily_variables: List of ints.
:param annout: Values of annual output. Repeat num x var num. x years.
:type annout: 3D float List.
"""
year_num_dim = False
ann_var_dim_name = 'annualVarNum'
ann_var_ids_name = 'annualVarIds'
self.netcdf.createDimension('annualVarNum',len(annual_variables))
self.netcdf.createVariable('annualVarIds', 'i', ('annualVarNum',))
self.netcdf.variables['annualVarIds'].assignValue(annual_variables)
day_var_dim_name = 'dailyVarNum'
day_var_ids_name = 'dailyVarIds'
self.netcdf.createDimension(day_var_dim_name,len(daily_variables))
self.netcdf.createVariable(day_var_ids_name, 'i', (day_var_dim_name,))
self.netcdf.variables[day_var_ids_name].assignValue(daily_variables)
if annout:
self.netcdf.createDimension('yearNum',len(annout[0]))
year_num_dim = True
self.netcdf.createVariable('annout', 'f', ('repeatNum' ,'yearNum' ,ann_var_dim_name))
self.netcdf.variables['annout'].assignValue(annout)
if monavgout:
self.netcdf.createDimension('monthNum',len(monavgout[0]))
self.netcdf.createVariable('monavgout', 'f', ('repeatNum','monthNum' ,day_var_dim_name))
self.netcdf.variables['monavgout'].assignValue(monavgout)
if annavgout:
if not year_num_dim:
self.netcdf.createDimension('yearNum',len(annavgout[0]))
self.netcdf.createVariable('annavgout', 'f', ('repeatNum','yearNum' ,day_var_dim_name))
self.netcdf.variables['annavgout'].assignValue(annavgout)
if dayout:
self.netcdf.createDimension('dayNum',len(dayout[0]))
self.netcdf.createVariable('dayout', 'f', ('repeatNum','dayNum' ,day_var_dim_name))
self.netcdf.variables['dayout'].assignValue(dayout)
#-----------------------------------------------------------------------
def close(self):
self.netcdf.close()
|
icce-p/bbgc-ws
|
BBGCNetCDF.py
|
Python
|
gpl-3.0
| 5,082
|
[
"NetCDF"
] |
b18eddcf8b1fee20e2ef883e29992c193a78e9e311304f7949b7e442577a2d39
|
#!/usr/bin/python
#FILE DESCRIPTION=======================================================
#~ Python script used for post-processing automatization of flow on an
#~ inclined (?textured?) plate (minimal postprocessing to test the results)
#~
#~ NOTES:
#~ - still unfinished BUT improvement
#~ USAGE:
#~ paraFoam --script=./postProcMinimal.py
#LICENSE================================================================
# prostProcMinimal.py
#
# Copyright 2015 Martin Isoz <martin@Poctar>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# PARAMETERS============================================================
xAll = 0.025
ySkip= 0.07
# POSTPROCESSING INITIATION=============================================
import glob
mainCase = glob.glob('./*.OpenFOAM') #works only for the cases with 1 foam file
paraview.simple._DisableFirstRenderCameraReset()
activeSource_OpenFOAM = GetActiveSource()
# enable all available fields
activeSource_OpenFOAM.VolumeFields = ['alpha.liquid', 'p_rgh', 'U']
# show all for internal mesh
activeSource_OpenFOAM.MeshParts = ['internalMesh']
# I dont want to see the main mesh / I do want to see it as transparent wireframe
allIntMeshRepresentation = GetDisplayProperties( activeSource_OpenFOAM )
allIntMeshRepresentation.Visibility = 0
allIntMeshRepresentation.Representation = 'Wireframe'
allIntMeshRepresentation.Opacity = 0.1
# set up black background (seems prettier)
RenderView1 = GetRenderView()
RenderView1.UseTexturedBackground = 0
RenderView1.Background = [0.0, 0.0, 0.0]
activeSource_OpenFOAM = FindSource( mainCase[0] )
# CREATE A SCALAR CLIP - SHOW ONLY THE RIVULET==========================
liqOnly = Clip( ClipType="Scalar", guiName="liqOnly" )
liqOnly.Scalars = ['POINTS', 'alpha.liquid']
liqOnly.Value = 0.5
liqOnlyRepresentation = Show()
liqOnlyRepresentation.Representation = 'Surface'
liqOnlyRepresentation.Visibility = 0
# COLOR THE FILM BY FILM THICKNESS (CALCULATOR+PROPER COLORING)=========
fThCalc = Calculator( guiName="fThCalc" )
fThCalc.Function = 'coordsZ'
fThCalc.ResultArrayName = 'hFun'
fThCalcRepresentation = Show()
fThCalcRepresentation.Visibility = 1
# SCALAR BAR============================================================
# ADD CASE TITLE========================================================
# ADD ANOTATE TIME SOURCE===============================================
annotTime = AnnotateTime()
annotTimeRepresentation = Show()
annotTime.Format = '$\mathrm{Time:\,%5.2f\,s}$'
annotTimeRepresentation.FontFamily = 'Courier'
annotTimeRepresentation.Position = [xAll, 0.025]
annotTimeRepresentation.Visibility = 1
Render()
# POST RUNNING MODIFICATIONS============================================
AnimationScene1 = GetAnimationScene()
AnimationScene1.GoToLast()
Render()
# SCALAR BAR============================================================
source = fThCalc #where to get the data
data = source.GetPointDataInformation()
#get the array and the respective min-max
array = data.GetArray('hFun')
dataRange = array.GetRange(0) #-1 for magnitude
colorObjectRepresentation = fThCalcRepresentation #what object will be colored
a0_hFun_PVLookupTable = GetLookupTableForArray( "hFun", 0,
#~ RGBPoints=[0.0, 0.0, 0.0, 0.0, dataRange[1], 1.0, 1.0, 1.0], #grayscale coloring
#~ ColorSpace='RGB',
RGBPoints=[0.0, 0.0, 0.0, 1.0, dataRange[1], 1.0, 0.0, 0.0], #classical rainbow coloring
ColorSpace='HSV',
ScalarRangeInitialized=1.0 )
a0_hFun_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] )
colorObjectRepresentation.Representation = 'Surface'
colorObjectRepresentation.ColorArrayName = ('POINT_DATA', 'hFun')
colorObjectRepresentation.LookupTable = a0_hFun_PVLookupTable
a0_hFun_PVLookupTable.ScalarOpacityFunction = a0_hFun_PiecewiseFunction
Render()
ScalarBarWidgetRepresentation = CreateScalarBar( Title='$h(x,y),[\mathrm{m}]$',
ComponentTitle = '',
LabelFontSize = 12,
Enabled = 1,
LookupTable = a0_hFun_PVLookupTable,
TitleFontSize = 14,
AutomaticLabelFormat = 0,
LabelFormat = '$%-#5.2e$',
RangeLabelFormat = '$%-#5.2e$',
)
RenderView1.Representations.append(ScalarBarWidgetRepresentation)
Render()
# SET PROPER CAMERA POSITION============================================
ResetCamera()
RenderView1 = GetRenderView()
RenderView1.CameraViewUp = [-0.9, 0.1, 0.4]
RenderView1.CameraPosition = [0.06,0.14,0.09]
Render()
# LOAD THE CASE AGAIN TO DISPLAY THE CHANNEL============================
showWalls = PV4FoamReader(FileName=mainCase[0], guiName='showWalls')
showWalls.MeshParts = ['wall - group']
showWalls.VolumeFields = []
showWallsRepresentation = Show()
showWallsRepresentation.Representation = 'Surface'
showWallsRepresentation.Visibility = 1
showWallsRepresentation.DiffuseColor = [0.5529411764705883, 0.5529411764705883, 0.5529411764705883]
Render()
# ANIMATION SAVING (PURE IMAGES, NOT BLENDER)===========================
#~ eTime = float("%s"%AnimationScene1.GetProperty('Duration'))
#~
#~ AnimationScene1.GoToFirst()
#~ k = 0;
#~ cTime = float("%s"%AnimationScene1.GetProperty('AnimationTime'))
#~
#~ while (cTime < eTime):
#~ Render()
#~ #x3dExporter=exporters.X3DExporter(FileName='./x3dFiles/rivulet_%03d.x3d'% (k))
#~ #x3dExporter.SetView(GetActiveView()) # <===== NEW LINE
#~ #x3dExporter.Write()
#~ WriteImage('pvAnimation/plate_%03d.png'%k)
#~ AnimationScene1.GoToNext()
#~ k = k+1
#~ cTime = float("%s"%AnimationScene1.GetProperty('AnimationTime'))
|
MartinIsoz/CFD_oF
|
03_texturedPlate/03_pyrTexture/00_Scripts/postProcMinimalV2.py
|
Python
|
gpl-2.0
| 6,470
|
[
"ParaView"
] |
3c16fb4de454e170d220ddf1fc34893a2c8fb4d5bc22d4dc6feb5ee4a1b6a65d
|
from collections import OrderedDict
from numpy import argmax, append, exp, zeros, pi, asarray, argsort, diff, concatenate, int8, where, array, sqrt, square, ones, power, sum, mean, std, linspace, max, round, median, polyfit, vstack, random, greater, less, searchsorted, sort
from numpy import float as npfloat, log as nplog, round as np_round, around, float32, invert
from numpy.linalg import lstsq
from pymc import deterministic, stochastic, Normal, Uniform, MCMC, Bernoulli, stochastic_from_dist
from scipy.integrate import simps
from scipy.signal import argrelextrema
from uncertainties import ufloat
from bces_script import bces
from linfit_script import linfit
from lnr_script import kelly
from lmfit import Parameters, minimize as lmfit_minimize, fit_report
from pyneb import Atom
from lmfit.models import GaussianModel
class Fitting_Gaussians():
def __init__(self):
self.Combined = None
self.MontecarloCheck = True
self.MC_Iterations = 10
self.NComps = 0
self.GaussianSampling = 101
self.Fitting_dict = OrderedDict()
self.Fitting_dict['Deblend check'] = False #This logic is true when a blended group is observed
self.Fitting_dict['Fitting method'] = None
self.Fitting_dict['start treatment'] = False #This logic is true to force the deblending process
self.Fitting_dict['line group'] = None #This integer provides the index for the group of lines describes the blended lines
self.Fitting_dict['line number'] = None
self.Fitting_dict['line label'] = None #This string describes the label for the current blended line we are mesuring: H1_6563A
self.Fitting_dict['blended label'] = None #This string should be used for the blended label for the lines log
self.Fitting_dict['blended number'] = None #This integer describes the number of blended lines. Currently it must be equal to the number in the blended list elements.
self.Fitting_dict['blended wavelengths']= None #This list contains the wavelengths of the blended lines
self.Fitting_dict['blended index'] = None #This integer provides the index of the current line in a blended group
self.Fitting_dict['kmpfit_dict'] = None #This dictionaries containes the parameters for kmpfit
self.Fitting_dict['Wide component'] = False #This keyword informs if there is a wide component on the emission line
self.Fitting_dict['line type'] = None #Spectral feature type string: 'Absorption', 'Emission'
self.Fitting_dict['peak_waves'] = None #This list contains the wavelenghts of all the peaks
self.Fitting_dict['y_scaler'] = None #This is the magnitude used to scale the y flux (normaly the line peak or line higher peak)
self.Fitting_dict['x_scaler'] = None #This is the magnitude used to scale the wavelength values (normaly the line middle wavelength)
self.Fitting_dict['x_resample'] = None #x Gaussian resampling of the line required for plotting
self.Fitting_dict['y_resample'] = None #y Gaussian resampling of the line required for plotting
self.Fitting_dict['y_resample_total'] = None #y Gaussian resampling of the blended group line required for plotting
self.Fitting_dict['ContinuumFlux'] = None #Continuum intensity across the whole plotting region
self.Fitting_dict['m_Continuum'] = None #Assuming a line this is the gradient (m)
self.Fitting_dict['n_Continuum'] = None #Assuming a line this is the y axis interception point (n)
self.Fitting_dict['zerolev_resample'] = None #Continuum level resampling at the line
self.Fitting_dict['zerolev_median'] = None #Mean level at the line center
self.Fitting_dict['zerolev_sigma'] = None #Continuum dispersion assuming linear shape
self.Fitting_dict['ContinuumWidth'] = None #This is the number of pixels manually selected
self.Fitting_dict['x_norm'] = None
self.Fitting_dict['y_norm'] = None
self.Fitting_dict['zerolev_norm'] = None
self.Fitting_dict['sig_zerolev_norm'] = None
self.Fitting_dict['lmfit_params'] = None #lmfit parameters dict
self.Fitting_dict['lmfit_params_wide'] = None #lmfit parameters dict
self.Fitting_dict['MC_iteration'] = None #Number of iterations for calculation (1 for normal 1000 for MC)
self.Fitting_dict['lmfit_output'] = None
self.Fitting_dict['FluxI_N_vector'] = None #This vectors holds all the fluxes calculated for the case of a gaussian fit
self.Fitting_dict['parameters_list'] = None #This list contains all the parameters which are fitted for a line: A1, mu1, sigma1, A2, mu2, sigma2 ...
self.Fitting_dict['FluxI'] = None
self.Fitting_dict['Add_wideComponent'] = None #Extra step to procced to the wide component calculation
self.Fitting_dict['WC_theowavelength'] = None #Gaussian_Coefficients coefficients
self.Fitting_dict['wide mask'] = None
self.Fitting_dict['Maxima_Waves'] = None #To store the data from the best peaks location
self.Fitting_dict['Maxima_peaks'] = None
self.GHcoeffs = {}
self.GHcoeffs['c0'] = sqrt(6.0) / 4.0
self.GHcoeffs['c1'] = -sqrt(3.0)
self.GHcoeffs['c2'] = -sqrt(6.0)
self.GHcoeffs['c3'] = 2.0 * sqrt(3.0) / 3.0
self.GHcoeffs['c4'] = sqrt(6.0) / 3.0
self.skeness_limit = {'fixed':(False)}
self.kutorsis_limit = {'fixed':(False)}
self.skeness_Glimit = {'fixed':(True)}
self.kutorsis_Glimit = {'fixed':(True)}
N2 = Atom('N', 2)
N2_6548A = N2.getEmissivity(tem=10000, den=100, wave=6548)
N2_6584A = N2.getEmissivity(tem=10000, den=100, wave=6584)
self.N2_Ratio = N2_6584A / N2_6548A
self.sqrt2pi = sqrt(2*pi)
def lmfit_gaussian_Residual(self, params, x, y, zerolev, Ncomps, err):
return (self.gaussian_curve_SingleMixture(params.valuesdict(), x, zerolev, Ncomps) - y) / err
def lmfit_gaussian_Residual_wide(self, params, x, y, zerolev, Ncomps, err):
return (self.gaussian_curve_SingleMixture_wide(params.valuesdict(), x, zerolev, Ncomps) - y) / err
def gaussian_curve_SingleMixture(self, params, x, zerolev, Ncomps):
y_model = 0.0
for i in range(Ncomps):
index = str(i)
A = params['A' + index]
mu = params['mu' + index]
sigma = params['sigma' + index]
y_model = y_model + A * exp(-(x-mu)*(x-mu)/(2*sigma*sigma))
return y_model + zerolev
def gaussian_curve_SingleMixture_wide(self, params, x, zerolev, Ncomps):
y_model = 0.0
for index in Ncomps:
A = params['A' + index]
mu = params['mu' + index]
sigma = params['sigma' + index]
y_model = y_model + A * exp(-(x-mu)*(x-mu)/(2*sigma*sigma))
return y_model + zerolev
def gaussian_curve_SingleMixture_from_dict(self, dict, x, zerolev, Ncomps):
y_model = 0.0
for comps in Ncomps:
A = dict['A' + comps].nominal_value
mu = dict['mu' + comps].nominal_value
sigma = dict['sigma' + comps].nominal_value
y_model = y_model + A * exp(-(x-mu)*(x-mu)/(2*sigma*sigma))
return y_model + zerolev
def Load_lmfit_parameters(self, x, y, zerolev, err_zerolev, n_comps, wide_component = False, A_limits = 0.30, mu_precission = 1, sigma_limit = 5):
#Scale parameters
ind_max = argmax(y)
self.Fitting_dict['x_scaler'] = x[ind_max]
self.Fitting_dict['y_scaler'] = y[ind_max]
#Scale the range
self.Fitting_dict['x_norm'] = x - self.Fitting_dict['x_scaler']
self.Fitting_dict['y_norm'] = y / self.Fitting_dict['y_scaler']
self.Fitting_dict['zerolev_norm'] = zerolev / self.Fitting_dict['y_scaler']
self.Fitting_dict['sig_zerolev_norm'] = err_zerolev / self.Fitting_dict['y_scaler']
#Get line maxima and minima
peak_wave, peak_flux, minima_wave, minima_flux = self.get_lines_peaks(ind_max, n_comps)
#Store peaks location for log
self.Fitting_dict['peak_waves'] = peak_wave + self.Fitting_dict['x_scaler']
self.Fitting_dict['peak_Maxima'] = peak_flux * self.Fitting_dict['y_scaler']
#Lmfit dictionary
params = Parameters()
for i in range(n_comps):
index = str(i)
params.add('A' + index, value = peak_flux[i] - mean(self.Fitting_dict['zerolev_norm']), min = 0.0)
params.add('mu' + index, value = peak_wave[i], min = peak_wave[i] - mu_precission, max = peak_wave[i] + mu_precission)
params.add('sigma' + index, value = 1, min = 0)
#params.add('fwhm' + index, expr = '2.354820045 * {sigma}'.format(sigma = 'sigma' + index))
params.add('FluxG' + index, expr = '{A} * {sigma} * {sqrt2pi}'.format(A = 'A' + index, sigma = 'sigma' + index, sqrt2pi = self.sqrt2pi))
#params.add('FluxG' + index, expr = '({A}*{fwhm})/(2.35*0.3989)'.format(A = 'A' + index, fwhm = 'fwhm' + index))
#For blended components we set the same wavelength:
if n_comps > 1:
Highest_index = argmax(self.Fitting_dict['peak_Maxima'])
small_components = range(n_comps)
del small_components[Highest_index]
for indx in small_components:
#We set the same sigma
index_small = str(indx)
expresion = 'sigma{index_big} * ( (mu{index_small} + {scaller}) / (mu{index_big} + {scaller}) )'.format(index_big = Highest_index, index_small = index_small, scaller = self.Fitting_dict['x_scaler'])
params['sigma' + index_small].set(expr = expresion)
#We force the theoretical - biggest mu
#expresion = '{mu_small} - mu{index_big}'.format(mu_small = self.Fitting_dict['blended wavelengths'][indx] - self.Fitting_dict['x_scaler'], index_big = Highest_index)
#params['mu' + index_small].set(expr = expresion)
#Special condition: Wide componentine in Halpha
Wide_params_list = []
if self.Fitting_dict['Add_wideComponent']:
#Additional fitter
params_W = Parameters()
#TRICK TO ADD AN ADDITIONAL VALUE
n_nindex = str(n_comps)
params_W.add('A' + n_nindex, value = 0.2, min = 0)
params_W.add('mu' + n_nindex, value = 0.0)
params_W.add('sigma' + n_nindex, value = 6, min = 3, max = 20.0)
#params_W.add('fwhm' + n_nindex, expr = '2.354820045 * {sigma}'.format(sigma = 'sigma' + n_nindex))
#params_W.add('FluxG'+ n_nindex, expr = '({A}*{fwhm})/(2.35*0.3989)'.format(A = 'A' + n_nindex, fwhm = 'fwhm' + n_nindex))
params_W.add('FluxG' + n_nindex, expr = '{A} * {sigma} * {sqrt2pi}'.format(A = 'A' + n_nindex, sigma = 'sigma' + n_nindex, sqrt2pi = self.sqrt2pi))
Wide_params_list = params_W.keys()
#Update for Nitrogen relation: Mode 1 adjuxt the fluxes
params['FluxG0'].set(expr = 'FluxG2 / {N2_ratio}'.format(N2_ratio = 2.94))
#Update for Nitrogen relation: Mode 2 adjust the amplitudes
# params['A0'].set(expr = '(A2*sigma2) / ({N2_ratio}*sigma0) '.format(N2_ratio = 2.94))
self.Fitting_dict['lmfit_params_wide'] = params_W
#Store the data
self.Fitting_dict['lmfit_params'] = params
self.Fitting_dict['parameters_list'] = array(params.keys() + Wide_params_list)
return
def get_lines_peaks(self, ind_max, Ncomps):
target_wavelengths = None
#Get feature geometry:
#-- Single line
if self.Fitting_dict['Deblend check'] == False:
peak_flux = array([self.Fitting_dict['y_norm'][ind_max]])
peak_wave = array([self.Fitting_dict['x_norm'][ind_max]])
minima_wave = 0.0
minima_flux = 0.0
#--Blended line
else:
max_index, min_index = argrelextrema(self.Fitting_dict['y_norm'], greater)[0], argrelextrema(self.Fitting_dict['y_norm'], less)[0]
List_blended_lines = self.Fitting_dict['Blended list'][1][self.Fitting_dict['line group']]
#With wide component #ONLY WORKS FOR THE BLENDED HALPHA SCHEME
if self.Fitting_dict['Add_wideComponent'] == False:
target_wavelengths = array(List_blended_lines) - self.Fitting_dict['x_scaler']
else:
target_wavelengths = array(List_blended_lines + [List_blended_lines[1]]) - self.Fitting_dict['x_scaler']
maxima_wavelengths = sort(self.Fitting_dict['x_norm'][max_index])
minima_wavelengths = sort(self.Fitting_dict['x_norm'][min_index])
if len(max_index) == Ncomps:
peak_flux, minima_flux = self.Fitting_dict['y_norm'][max_index], self.Fitting_dict['y_norm'][min_index]
peak_wave, minima_wave = maxima_wavelengths, minima_wavelengths
else:
closest_indeces = self.search_targets_in_array(maxima_wavelengths, target_wavelengths)
peak_wave, peak_flux = self.Fitting_dict['x_norm'][max_index][closest_indeces], self.Fitting_dict['y_norm'][max_index][closest_indeces]
if self.Fitting_dict['Add_wideComponent']:
if len(peak_wave) == len(target_wavelengths) - 1:
peak_wave = append(peak_wave, [0])
peak_flux = append(peak_flux, [0.1])
minima_wave, minima_flux = self.Fitting_dict['x_norm'][min_index], self.Fitting_dict['y_norm'][min_index]
self.Fitting_dict['Maxima_Waves'] = peak_wave + self.Fitting_dict['x_scaler']
self.Fitting_dict['Maxima_peaks'] = peak_flux * self.Fitting_dict['y_scaler']
return peak_wave, peak_flux, minima_wave, minima_flux
def search_targets_in_array(self, known_array, test_array):
#This function gives the indeces of the closest values within a sorted array
index_sorted = argsort(known_array)
known_array_sorted = known_array[index_sorted]
known_array_middles = known_array_sorted[1:] - diff(known_array_sorted.astype('f'))/2
idx1 = searchsorted(known_array_middles, test_array)
indices = index_sorted[idx1]
return indices
def fit_line(self, x, y, zero_lev, err_continuum, Ncomps, fitting_parameters, fitting_parameters_wide, iterations):
#Number of x points in the spectral line
x_Grid_Length = len(x)
#Generate empty containers to store the data
self.Fitting_dict['FluxI_N_vector'] = zeros(iterations)
for key in self.Fitting_dict['parameters_list']:
self.Fitting_dict[key + '_norm'] = zeros(iterations)
self.Fitting_dict[key + '_norm_er'] = zeros(iterations)
#Loop through the iterations (Only 1 if it is not a bootstrap)
for i in range(iterations):
#Fit narrow component
if i == 0:
y_new = y
else:
noise_array = random.normal(0.0, err_continuum, x_Grid_Length).astype(float32)
y_new = y + noise_array
fit_Output = lmfit_minimize(self.lmfit_gaussian_Residual, fitting_parameters, args=(x, y_new, zero_lev, Ncomps, err_continuum))
output_params = fit_Output.params
#Case with a wide component
if self.Fitting_dict['Add_wideComponent']:
#Case for halpha
sigma_limit = fit_Output.params['sigma1'].value
limit_0 = 6548.05 - self.Fitting_dict['x_scaler'] - sigma_limit * 1.5
limit_1 = 6548.05 - self.Fitting_dict['x_scaler'] + sigma_limit * 1.5
limit_2 = 0 - sigma_limit * 4
limit_3 = 0 + sigma_limit * 4
limit_4 = 6583.46 - self.Fitting_dict['x_scaler'] - sigma_limit * 3
limit_5 = 6583.46 - self.Fitting_dict['x_scaler'] + sigma_limit * 3
indeces = ((x >= limit_0) & (x <= limit_1)) + ((x >= limit_2) & (x <= limit_3)) + ((x >= limit_4) & (x <= limit_5))
mask = invert(indeces)
self.Fitting_dict['wide mask'] = mask
x_wide = x[mask]
y_wide = y_new[mask]
zero_wide = zero_lev[mask]
Ncomps_wide = ['3']
fit_Output_wide = lmfit_minimize(self.lmfit_gaussian_Residual_wide, fitting_parameters_wide, args=(x_wide, y_wide, zero_wide, Ncomps_wide, err_continuum))
y_wide = self.gaussian_curve_SingleMixture_wide(fit_Output_wide.params.valuesdict(), x, zero_lev, Ncomps_wide)
y_pure_emission = y_new - y_wide + zero_lev
self.Fitting_dict['emis_limpio'] = y_pure_emission
fit_Output_emission = lmfit_minimize(self.lmfit_gaussian_Residual, fitting_parameters, args=(x, y_pure_emission, zero_lev, Ncomps, err_continuum))
output_params = fit_Output_emission.params + fit_Output_wide.params
#Store the integrated flux
self.Fitting_dict['FluxI_N_vector'][i] = simps(y_new, x) - simps(zero_lev, x)
#Store the fitting parameters
for key in self.Fitting_dict['parameters_list']:
self.Fitting_dict[key + '_norm'][i] = output_params[key].value
self.Fitting_dict[key + '_norm_er'][i] = output_params[key].stderr
#Store the output fit (Only used for single line output)
self.Fitting_dict['lmfit_output'] = fit_Output
#Finally increase the number of components in case of a wide component
if self.Fitting_dict['Add_wideComponent']:
self.Fitting_dict['blended number'] = self.Fitting_dict['blended number'] + 1
return
# def fit_line_together(self, x, y, zero_lev, err_continuum, Ncomps, fitting_parameters, fitting_parameters_wide, iterations):
#
# #Number of x points in the spectral line
# x_Grid_Length = len(x)
#
# #Generate empty containers to store the data
# self.Fitting_dict['FluxI_N_vector'] = zeros(iterations)
# for key in self.Fitting_dict['parameters_list']:
# self.Fitting_dict[key + '_norm'] = zeros(iterations)
# self.Fitting_dict[key + '_norm_er'] = zeros(iterations)
#
# #Loop through the iterations (Only 1 if it is not a bootstrap)
# for i in range(iterations):
#
# #Fit narrow component
# if i == 0:
# y_new = y
# else:
# noise_array = random.normal(0.0, err_continuum, x_Grid_Length).astype(float32)
# y_new = y + noise_array
#
# fit_Output = lmfit_minimize(self.lmfit_gaussian_Residual, fitting_parameters, args=(x, y_new, zero_lev, Ncomps, err_continuum))
# output_params = fit_Output.params
#
# #Store the integrated flux
# self.Fitting_dict['FluxI_N_vector'][i] = simps(y_new, x) - simps(zero_lev, x)
#
# #Store the fitting parameters
# for key in self.Fitting_dict['parameters_list']:
# self.Fitting_dict[key + '_norm'][i] = output_params[key].value
# self.Fitting_dict[key + '_norm_er'][i] = output_params[key].stderr
#
# #Store the output fit (Only used for single line output)
# self.Fitting_dict['lmfit_output'] = fit_Output
#
# return
def scale_lmfit_params(self, line_wave, line_flux, fit_output, Ncomps, x_scale, y_scale, fitting_method):
#Scale the integrated flux (The same for all schemes)
flux_I_N = mean(self.Fitting_dict['FluxI_N_vector'])
#Scale the gaussian parameters (divided since they have different error calculation)
#--Simple fitting
if 'MC' not in fitting_method:
#For simple fitting the integrated flux has no error, but the gaussian components do
self.Fitting_dict['FluxI'] = ufloat(flux_I_N, 0.0) * y_scale
for i in range(Ncomps):
index = str(i)
self.Fitting_dict['A' + index] = ufloat(self.Fitting_dict['A' + index + '_norm'], self.Fitting_dict['A' + index + '_norm_er']) * y_scale
self.Fitting_dict['mu' + index] = ufloat(self.Fitting_dict['mu' + index + '_norm'], self.Fitting_dict['mu' + index + '_norm_er']) + x_scale
self.Fitting_dict['sigma' + index] = ufloat(self.Fitting_dict['sigma' + index + '_norm'], self.Fitting_dict['sigma' + index + '_norm_er'])
self.Fitting_dict['FluxG' + index] = ufloat(self.Fitting_dict['FluxG' + index + '_norm'], self.Fitting_dict['FluxG' + index + '_norm_er']) * y_scale
#--Bootstrap
else:
#For MC fitting the integrated flux has error
self.Fitting_dict['FluxI'] = ufloat(flux_I_N, std(self.Fitting_dict['FluxI_N_vector'])) * y_scale
for i in range(Ncomps):
index = str(i)
self.Fitting_dict['A' + index] = ufloat(mean(self.Fitting_dict['A' + index + '_norm']), std(self.Fitting_dict['A' + index + '_norm'])) * y_scale
self.Fitting_dict['mu' + index] = ufloat(mean(self.Fitting_dict['mu' + index + '_norm']), std(self.Fitting_dict['mu' + index + '_norm'])) + x_scale
self.Fitting_dict['sigma' + index] = ufloat(mean(self.Fitting_dict['sigma' + index + '_norm']), std(self.Fitting_dict['sigma' + index + '_norm']))
self.Fitting_dict['FluxG' + index] = ufloat(mean(self.Fitting_dict['FluxG' + index + '_norm']), std(self.Fitting_dict['FluxG' + index + '_norm'])) * y_scale
self.Fitting_dict['FluxI'] = ufloat(flux_I_N, std(self.Fitting_dict['FluxI_N_vector'])) * y_scale
#Calculate the gaussian curve for plotting
self.Fitting_dict['x_resample'] = linspace(line_wave[0], line_wave[-1], 50 * Ncomps)
self.Fitting_dict['zerolev_resample'] = self.Fitting_dict['m_Continuum'] * self.Fitting_dict['x_resample'] + self.Fitting_dict['n_Continuum']
self.Fitting_dict['y_resample'] = [self.gaussian_curve_SingleMixture_from_dict(self.Fitting_dict, self.Fitting_dict['x_resample'], self.Fitting_dict['zerolev_resample'], Ncomps = map(str, range(Ncomps)))]
#We store all the gaussians for plotting alongside the big component
for j in range(Ncomps):
Comp = [str(j)]
self.Fitting_dict['y_resample'].append(self.gaussian_curve_SingleMixture_from_dict(self.Fitting_dict, self.Fitting_dict['x_resample'], self.Fitting_dict['zerolev_resample'], Ncomps = Comp))
return
class Bayesian_regressions():
def __init__(self):
self.Methodology = None
self.prob_threshold = 0.40
def lr_ChiSq(self, x_array, y_array, m_0, n_0):
m = Normal('m', m_0, 0.01)
n = Normal('n', n_0, 0.01)
sigma = Uniform('sigma', 0.0, 5.0)
@stochastic(observed=True)
def model(value = self.y_error, x_values = self.x_array, m = m, n = n, sigma = sigma):
value_theo = m*x_values + n
chi_sq = sum(square(value - value_theo) / square(sigma))
log_ChiSq = - chi_sq / 2.0
return log_ChiSq
return locals()
def inference_outliers(self, x_array, y_array, m_0, n_0, spread_vector):
outlier_points = Uniform('outlier_points', 0, 1.0, value=0.1)
mean_outliers = Uniform('mean_outliers', -100, 100, value=0)
spread_outliers = Uniform('spread_outliers', -100, 100, value=0)
@stochastic
def slope_and_intercept(slope = m_0):
prob_slope = nplog(1. / (1. + slope ** 2))
return prob_slope
@deterministic
def model_(x=x_array, slope_and_intercept=slope_and_intercept):
slope, intercept = slope_and_intercept
fit = slope * x + intercept
return fit
inlier = Bernoulli('inlier', p=1 - outlier_points, value=zeros(x_array.size))
def log_posterior_likelihood_of_outlier(y_with_outlier, mu, spread_vector, inlier, mean_outliers, spread_outliers):
inlier_posterior = sum(inlier * (nplog(2 * pi * spread_vector ** 2) + (y_with_outlier - mu) ** 2 / (spread_vector ** 2)))
outlier_posterior = sum((1 - inlier) * (nplog(2 * pi * ((spread_vector ** 2) + (spread_outliers ** 2))) + (y_with_outlier - mean_outliers) ** 2 / ((spread_vector ** 2) + (spread_outliers ** 2))))
return -0.5 * (inlier_posterior + outlier_posterior)
outlier_distribution = stochastic_from_dist('outlier_distribution', logp=log_posterior_likelihood_of_outlier, dtype=npfloat, mv=True)
outlier_dist = outlier_distribution('outlier_dist', mu=model_, spread_vector=spread_vector, mean_outliers=mean_outliers, spread_outliers=spread_outliers, inlier=inlier, value=y_array, observed=True)
return locals()
class Linear_Regressions(Bayesian_regressions):
def __init__(self):
Bayesian_regressions.__init__(self)
self.x_array = None
self.x_error = None
self.y_array = None
self.y_error = None
def load_obs_data(self, x_values, y_values, x_errors = None, y_errors = None):
#Default case we input all the values manually
self.x_array = x_values
self.x_error = x_errors
self.y_array = y_values
self.y_error = y_errors
def perform_regression(self, Methodology):
if Methodology == 'bces':
fit_dict = self.bces_regression()
elif Methodology == 'Max_Likelihood':
fit_dict = self.max_likelihood_regression()
elif Methodology == 'linfit':
fit_dict = self.linfit_regression()
elif Methodology == 'scipy':
fit_dict = self.scipy_regression()
elif Methodology == 'kmpfit':
fit_dict = self.kmpfit_regression()
elif Methodology == 'kelly':
fit_dict = self.kellyBces_regression()
elif 'Inference' in Methodology:
fit_dict = self.inference_model(Methodology)
return fit_dict
def inference_model(self, Methodology):
if Methodology == 'Inference - ChiSq':
Inf_dict = self.inference_ChiSq()
if Methodology == 'Inference - Outliers':
Inf_dict = self.Outliers_Krough()
return Inf_dict
def linfit_regression(self):
fit_dict = OrderedDict()
fit_dict['methodology'] = 'Linfit'
Regression_Fit, Uncertainty_Matrix, fit_dict['red_ChiSq'], fit_dict['residuals'] = linfit(x_true = self.x_array, y = self.y_array, sigmay = self.y_error, relsigma = False, cov = True, chisq = True, residuals = True)
m_n_Matrix = [sqrt(Uncertainty_Matrix[t,t]) for t in range(2)]
fit_dict['R_factor'] = Uncertainty_Matrix[0,1]/(m_n_Matrix[0]*m_n_Matrix[1])
fit_dict['m'], fit_dict['m_error'] = Regression_Fit[0], m_n_Matrix[0]
fit_dict['n'], fit_dict['n_error'] = Regression_Fit[1], m_n_Matrix[1]
return fit_dict
def bces_regression(self, cov = None):
#Rodrigo Nemmen, http://goo.gl/8S1Oo
fit_dict = OrderedDict()
if cov == None:
#This is the covariance between the measurements. If none provided it is assume error is independent between measurments for the
cov = zeros(len(self.x_array))
fit_dict['methodology'] = (r'OLS(Y|X)$_{bces}$', r'OLS(X|Y)$_{bces}$', r'bisector$_{bces}$', r'Orthogonal$_{bces}$')
fit_dict['m'],fit_dict['n'],fit_dict['m_error'],fit_dict['n_error'],fit_dict['cov'] = bces(self.x_array, self.x_error, self.y_array, self.y_error, cov)
return fit_dict
def scipy_regression(self):
#ODR Method
fit_dict = OrderedDict()
fit_dict['methodology'] = r'ODR$_{ScIPy}$'
beta_0 = (0, 1)
fit_dict['m'], fit_dict['n'], fit_dict['m_error'], fit_dict['n_error'], fit_dict['cov'], fit_dict['chiSq'], fit_dict['red_ChiSq'] = scipy_ODR(self.x_array, self.y_array, self.y_array, self.y_error, beta_0)
return fit_dict
def kmpfit_regression(self):
#Kmpfit methodology using an effective variance method
fit_dict = OrderedDict()
fit_dict['methodology'] = r'Effective Variance$_{kmpfit}$'
scipy_guess_dict = self.scipy_regression()
beta_0 = (scipy_guess_dict['n'], scipy_guess_dict['m'])
fit_dict['m'], fit_dict['n'], fit_dict['m_error'], fit_dict['n_error'], fit_dict['cov'], fit_dict['chiSq'], fit_dict['red_ChiSq'] = kmpfit_effectivevariance(self.x_array, self.y_array, self.x_error, self.y_error, beta_0)
return fit_dict
def bayesian_regression(self, Methodology):
fit_dict = OrderedDict()
fit_dict['methodology'] = r'Inference $\chi^{2}$ model'
#Initial guess for the fitting:
Np_lsf = polyfit(self.x_array, self.y_array, 1)
m_0, n_0 = Np_lsf[0], Np_lsf[1]
MCMC_dict = self.lr_ChiSq(self.x_array, self.y_array, m_0, n_0)
myMCMC = MCMC(MCMC_dict)
myMCMC.sample(iter=10000, burn=1000)
fit_dict['m'], fit_dict['n'], fit_dict['m_error'], fit_dict['n_error'] = myMCMC.stats()['m']['mean'], myMCMC.stats()['n']['mean'], myMCMC.stats()['m']['standard deviation'], myMCMC.stats()['n']['standard deviation']
return fit_dict
def kellyBces_regression(self):
fit_dict = OrderedDict()
fit_dict['methodology'] = (r'Inferences$_{bces}$')
n_tuple, m_tuple, cov = kelly(x1=self.x_array, x2=self.y_array, x1err=self.x_error, x2err=self.y_error)
fit_dict['m'],fit_dict['n'],fit_dict['m_error'],fit_dict['n_error'],fit_dict['cov'] = m_tuple[0], n_tuple[0], m_tuple[1], n_tuple[1], cov
return fit_dict
def Outliers_Krough(self):
fit_dict = OrderedDict()
fit_dict['methodology'] = r'Outliers Krough'
#Initial Guess for fitting
Bces_guess = self.bces_regression()
m_0, n_0 = Bces_guess['m'][0], Bces_guess['n'][0]
Spread_vector = ones(len(self.x_array))
#Model for outliers detection
Outliers_dect_dict = self.inference_outliers(self.x_array, self.y_array, m_0, n_0, Spread_vector)
mcmc = MCMC(Outliers_dect_dict)
mcmc.sample(100000, 20000)
#Extract the data with the outliers coordinates
probability_of_points = mcmc.trace('inlier')[:].astype(float).mean(0)
fit_dict['x_coords_outliers'] = self.x_array[probability_of_points < self.prob_threshold]
fit_dict['y_coords_outliers'] = self.y_array[probability_of_points < self.prob_threshold]
return fit_dict
def Python_linfit(x_true, y, y_err, errors_output = True):
Regression_Fit, Uncertainty_Matrix, Red_Chi_Sq, Residuals = linfit(x_true, y, y_err, cov=True, relsigma=False, chisq=True, residuals=True)
m_n_Matrix = [sqrt(Uncertainty_Matrix[t,t]) for t in range(2)]
R_Factor = Uncertainty_Matrix[0,1]/(m_n_Matrix[0]*m_n_Matrix[1])
m, m_error = Regression_Fit[0], m_n_Matrix[0]
n, n_error = Regression_Fit[1], m_n_Matrix[1]
if errors_output:
return m, m_error, n, n_error
else:
return m, n
def NumpyRegression(x, y):
Matrix_Coefficient = vstack([x, ones(len(x))]).T
m, n = lstsq(Matrix_Coefficient, y)[0]
return m, n
# from collections import OrderedDict
#
# from kapteyn import kmpfit
# from numpy import exp, zeros, asarray, argsort, diff, concatenate, int8, where, array, sqrt, square, ones, power, sum, mean, std, linspace, max, round, median, polyfit
# from numpy import float as npfloat
# from numpy import log as nplog
# from numpy import pi, sum, zeros
# from numpy.linalg import lstsq
# from pymc import deterministic, stochastic, Normal, Uniform, MCMC, Bernoulli, stochastic_from_dist
# from scipy.integrate import simps
# from scipy.interpolate import interp1d
# from scipy.optimize import minimize, curve_fit, leastsq
# from uncertainties import unumpy
#
# from bces_script import bces
# from kmpfit_script import kmpfit_effectivevariance, scipy_ODR
# from linfit_script import linfit
# from lnr_script import kelly
# import random as random
#
#
# class Fitting_Gaussians():
# '''This class contains the methods to measure gaussins in a scheme similar to IRAF'''
#
# def __init__(self):
#
# self.x_Norm = None
# self.y_Norm = None
#
# self.A_Norm = None
# self.mu_Norm = None
# self.sigma_Norm = None
#
# self.zerolev_Norm = None
# self.sigma_zerolev_Norm = None
#
# self.Combined = None
# self.MontecarloCheck = True
# self.Iterations = 1000
#
# self.A_Vector = None
# self.mu_Vector = None
# self.sigma_Vector = None
# self.y_New = None
# self.Area_Norm = None
#
# self.Kmpfit_Dictionary = []
#
# self.Fitting_dict = OrderedDict()
# self.Fitting_dict['Deblend check'] = False #This logic is true when a blended group is observed
# self.Fitting_dict['start deblend'] = False #This logic is true when a blended group is observed
# self.Fitting_dict['line group'] = None #This integer provides the index for the group of lines describes the blended lines
# self.Fitting_dict['line number'] = None #This integer describes which line from the current blended group we are measuring. It starts at 0
# self.Fitting_dict['line label'] = None #This string describes the label for the current blended line we are mesuring: H1_6563A
# self.Fitting_dict['blended label'] = None #This string should be used for the blended label for the lines log
# self.Fitting_dict['blended number'] = None #This integer describes the number of blended lines. Currently it must be equal to the number in the blended list elements.
# self.Fitting_dict['p1_norm'] = {} #List with the normalized estimations for the gaussian fitting: [A, mu, sigma]
# self.Fitting_dict['kmpfit_dict'] = {} #This dictionaries containes the parameters for kmpfit
# self.Fitting_dict['p0'] = None
# self.Fitting_dict['p1'] = None
# self.Fitting_dict['p_1_std'] = None
# self.Fitting_dict['p_1_Area'] = None
# self.Fitting_dict['p_1_Area_Std'] = None
#
# self.NComps = 0
#
# self.GaussianSampling = 101
#
# self.Max_y = None
# self.Mean_x = None
#
# def Calculate_EW(self, SubWave, SubInt, Flux_Brute, No_StellarContribution, Current_Wavelength, TableAddress, TableHeaderSize):
#
# #Case in which we are measuring the line intensity from a zero level continuum (that means without the stellar continuum)
# if No_StellarContribution == True:
# LocalMedian = float(self.GetDataInTable("TheoWavelength", "Continuum_Median",Current_Wavelength, TableAddress,TableHeaderSize))
# else:
# LocalMedian = self.LocalMedian
#
# #Single line
# if self.Fitting_dict['Deblend check'] == False:
# self.EqW = Flux_Brute / LocalMedian
# self.Classic_EmLine_Error(SubWave, SubInt, self.Flux_Brute, self.EqW)
# self.SigmaEW_MCMC = 0.0
#
# #Blended line
# else:
# self.Fitting_dict['Eqw'] = zeros(self.Fitting_dict['blended number'])
# for i in range(len(self.Fitting_dict['blended number'])):
# self.Fitting_dict['Eqw'][i] = self.Fitting_dict['Flux_Gauss'][i] / LocalMedian
#
# return
#
#
#
# def SingleGaussian_Cont(self, Ind_Variables, A, mu, sigma):
#
# #In order to increase somehow the speed we simplify the code by no asigning many variables
#
# #x_true = Ind_Variables[0]
# #continuum = Ind_Variables[1]
#
# #y = A * exp(-(x_true-mu)*(x_true-mu)/(2.0*sigma*sigma)) + continuum
#
# return A * exp(-(Ind_Variables[0]-mu)*(Ind_Variables[0]-mu)/(2.0*sigma*sigma)) + Ind_Variables[1]
#
# def MixtureGaussian_Cont_Short(self, Ind_Variables, p, NComps):
# y = 0.0
# for i in range(NComps):
# A, mu, sigma = p[i*3:(i+1)*3]
# y += A * exp(-(Ind_Variables[0]-mu)*(Ind_Variables[0]-mu)/(2.0*sigma*sigma))
#
# return y + Ind_Variables[1]
#
# def MixtureGaussian_Cont(self, Ind_Variables, p_0):
#
# y = 0.0
# x_true = Ind_Variables[0]
# zerolev = Ind_Variables[1]
#
# N_Comps = int(len(p_0) / 3)
#
# for i in range(N_Comps):
# A, mu, sigma = p_0[i*3:(i+1)*3]
# y += A * exp(-(x_true-mu)*(x_true-mu)/(2.0*sigma*sigma))
#
# return y + zerolev
#
# def Residuals_Gaussian(self, p, data):
# x_true, y, zerolev, sigma_zerolev = data[0], data[1], data[2], data[3]
# return (y - (self.SingleGaussian_Cont((x_true, zerolev), p[0], p[1], p[2]))) / sigma_zerolev
#
# def Residuals_Gaussian_MCMC(self, p, x_true, y, zerolev):
# return y - (self.SingleGaussian_Cont((x_true, zerolev), p[0], p[1], p[2]))
#
# def Residuals_GaussianMixture(self, p, data):
# x_true, y, zerolev, sigma_zerolev = data[0], data[1], data[2], data[3]
# return (y - (self.MixtureGaussian_Cont_v2((x_true, zerolev), p))) / sigma_zerolev
#
# def MixtureGaussian_Cont_v2(self, Ind_Variables, p):
# y = 0.0
# for i in range( int(len(p) / 3)):
# A, mu, sigma = p[i*3:(i+1)*3]
# y += A * exp(-(Ind_Variables[0]-mu)*(Ind_Variables[0]-mu)/(2.0*sigma*sigma))
# return y + Ind_Variables[1]
#
# def Chi2(self, p_0, x_true, y, cont_Vector, sig_Cont):
#
# #In order to increase somehow the speed we simplify the code by no asigning many variables
# # Chi2 = sum(power((y - self.SingleGaussian_Cont((x_true, cont_Vector), p_0[0], p_0[1], p_0[2]))/sig_Cont, 2))
#
# return sum(power((y - self.SingleGaussian_Cont((x_true, cont_Vector), p_0[0], p_0[1], p_0[2]))/sig_Cont, 2))
#
# def Scale_Parameters(self, x_true, y, p_0, zerolev, sigma_zerolev, SingleLine = True):
#
# x_new = zeros(len(x_true))
# x_list = x_true.tolist()
#
# #This rounding is necesary but how does this affect the precission?
# for i in range(len(x_list)):
# x_new[i] = round(x_list[i], 3)
#
# if SingleLine:
#
# p_0[1] = round(p_0[1], 3)
#
# self.x_Norm = x_new - p_0[1]
# self.y_Norm = y / p_0[0]
# self.A_Norm = 1.0
# self.mu_Norm = 0.0
# self.sigma_Norm = round(p_0[2], 3)
# self.zerolev_Norm = zerolev / p_0[0]
# self.sigma_zerolev_Norm = sigma_zerolev / p_0[0]
#
# else:
#
# self.Max_y = max(p_0[0])
# self.Mean_x = round(median(p_0[1]), 3)
#
# self.x_Norm = round(x_new - self.Mean_x, 3)
# self.y_Norm = round(y / self.Max_y, 4)
# self.A_Norm = round(p_0[0] / self.Max_y, 4)
# self.mu_Norm = round(p_0[1] - self.Mean_x, 3)
# self.sigma_Norm = round(p_0[2] , 3)
# self.zerolev_Norm = round(zerolev / self.Max_y, 4)
# self.sigma_zerolev_Norm = round(sigma_zerolev / self.Max_y, 4)
#
# return
#
# def Calculate_G_Parameters(self, x_true, y, p_0, zerolev, sigma_zerolev, Method, line_type = True):
#
# #Check for the line type
# if line_type == 'Absorption':
# y = 2 * zerolev - y
#
# #Normalize the parameters to improve the fitting quality
# self.Scale_Parameters(x_true, y, p_0, zerolev, sigma_zerolev)
#
# if Method == 'Min_Chi2':
# Minimize_Output = minimize(self.Chi2, x0 = [self.A_Norm, self.mu_Norm, self.sigma_Norm], args=(self.x_Norm, self.y_Norm, self.zerolev_Norm, self.sigma_zerolev_Norm), method = 'Nelder-Mead')
# self.Fitting_dict['p1_norm'] = Minimize_Output['x_true']
# self.Fitting_dict['p_1_std_Norm'] = None
# self.Fitting_dict['p_1_Area_Norm'] = None
#
# elif Method == 'CurveFit':
# self.Fitting_dict['p1_norm'], conv_curFit = curve_fit(self.SingleGaussian_Continuum, (self.x_Norm, self.zerolev_Norm), self.y_Norm, [self.A_Norm, self.mu_Norm, self.sigma_Norm])
# self.Fitting_dict['p_1_std_Norm'] = None
# self.Fitting_dict['p_1_Area_Norm'] = None
#
# elif Method == 'leastsqr':
# self.Fitting_dict['p1_norm'], conv = leastsq(self.Residuals_Gaussian, [self.A_Norm, self.mu_Norm, self.sigma_Norm], args=(self.x_Norm, self.y_Norm, self.zerolev_Norm, [self.sigma_zerolev_Norm] * len(self.x_Norm)))
# self.Fitting_dict['p_1_std_Norm'] = None
# self.Fitting_dict['p_1_Area_Norm'] = None
#
# elif Method == 'kmpfit':
# fitobj = kmpfit.Fitter(residuals=self.Residuals_Gaussian, data=([self.x_Norm, self.y_Norm, self.zerolev_Norm, self.sigma_zerolev_Norm]))
# fitobj.parinfo = self.Kmpfit_Dictionary
# fitobj.fit(params0 = [self.A_Norm, self.mu_Norm, self.sigma_Norm])
# self.Fitting_dict['p1_norm'] = fitobj.params
# self.Fitting_dict['p_1_std_Norm'] = None
# self.Fitting_dict['p_1_Area_Norm'] = None
#
# elif 'MCMC' in Method:
# self.MCMC_Scheme()
#
# #De normalize the fitting parameters to the physical units
# self.ReScale_Parameters(self.Fitting_dict['p1_norm'], p_0, self.Fitting_dict['p_1_std_Norm'], self.Fitting_dict['p_1_Area_Norm']) # ADAPT THE RESCALING FOR AN ABSORPTION LINE
#
# return
#
# def Calculate_GM_Parameters_v3(self, p_0, subWave, zerolev, Method):
#
# #Reshaping the initial guesses vector to: [A_0, A_1, A_2..., mu_0, mu_1, mu_2..., sigma_0, sigma_1, sigma_2...]
# p_O_Reshape = zeros(self.Fitting_dict['blended number'] * 3)
# for i in range(self.Fitting_dict['blended number']):
# p_O_Reshape[i*3:(i+1)*3] = self.A_Norm[i], self.mu_Norm[i], self.sigma_Norm[i]
#
# #Perfom fit
# if Method == 'leastsqr':
# self.Fitting_dict['p1_norm'], conv = leastsq(self.Residuals_GaussianMixture, args=([self.x_Norm, self.y_Norm, self.zerolev_Norm, self.sigma_zerolev_Norm]), x0 = p_O_Reshape)
# self.Fitting_dict['p_1_std_Norm'] = None
# self.Fitting_dict['p_1_Area_Norm'] = None
#
#
# if Method == 'kmpfit':
# fitobj = kmpfit.Fitter(residuals=self.Residuals_GaussianMixture, data=([self.x_Norm, self.y_Norm, self.zerolev_Norm, self.sigma_zerolev_Norm]))
# fitobj.parinfo = self.Kmpfit_Dictionary
# fitobj.fit(params0 = p_O_Reshape)
# self.Fitting_dict['p1_norm'] = fitobj.params
# self.Fitting_dict['p_1_std_Norm'] = None
# self.Fitting_dict['p_1_Area_Norm'] = None
# self.Fitting_dict['p_1_Area_Std_Norm'] = None
#
# elif 'MCMC' in Method:
# self.MCMC_Scheme_Blend(Method)
#
# #De normalize the fitting parameters to the physical units
# self.ReScale_Parameters(self.Fitting_dict['p1_norm'], p_0, self.Fitting_dict['p_1_std_Norm'], self.Fitting_dict['p_1_Area_Norm'], SingleLine=False)
#
# return
#
# def ReScale_Parameters(self, p_1_Norm, p_0, p_1_stdev_Norm = None, p_1_Area_Norm = None, p_1_Area_std_Norm=None, SingleLine = True):
#
# #Single Line
# if SingleLine:
# A = p_1_Norm[0] * p_0[0]
# mu = p_1_Norm[1] + p_0[1]
# sigma = p_1_Norm[2]
#
# p_1 = [A, mu[0], sigma]
#
# if (p_1_stdev_Norm == None) and (p_1_Area_Norm == None):
# self.Fitting_dict['p1'] = p_1
# return
#
# else:
# p_1_Area = p_1_Area_Norm * p_0[0]
# p_1_stdev = [p_1_stdev_Norm[0] * p_0[0], p_1_stdev_Norm[1], p_1_stdev_Norm[2]]
#
# self.Fitting_dict['p1'] = p_1
# self.Fitting_dict['p_1_std'] = p_1_stdev
# self.Fitting_dict['p_1_Area'] = p_1_Area
# return
#
# #Blended line
# else:
# p_1 = ones(len(p_1_Norm))
#
# for i in range(self.Fitting_dict['blended number']):
# A, mu, sigma = p_1_Norm[i*3:(i+1)*3]
# p_1[i*3:(i+1)*3] = A * self.Max_y, mu + self.Mean_x, sigma
#
# if (p_1_stdev_Norm == None) and (p_1_Area_Norm == None):
# self.Fitting_dict['p1'] = p_1
# return
#
# else:
# p_1_stdev = zeros(len(p_1_stdev_Norm))
#
# for i in range(self.Fitting_dict['blended number']):
# A_std, mu_std, sigma_std = p_1_stdev_Norm[i*3:(i+1)*3]
# p_1_stdev[i*3:(i+1)*3] = A_std * self.Max_y, mu_std, sigma_std
#
# p_1_Area = p_1_Area_Norm * self.Max_y
# p_1_Area_Std = p_1_Area_std_Norm * self.Max_y
#
# self.Fitting_dict['p1'] = p_1
# self.Fitting_dict['p_1_std'] = p_1_stdev
# self.Fitting_dict['p_1_Area'] = p_1_Area
# self.Fitting_dict['p_1_Area_Std'] = p_1_Area
#
# return
#
# def MCMC_Scheme(self):
#
# #Number of x points in the spectral line
# x_Grid_Length = len(self.x_Norm)
#
# #Define vector to store MCMC predictions
# A_Vector = zeros(self.MC_Iterations)
# mu_Vector = zeros(self.MC_Iterations)
# sigma_Vector = zeros(self.MC_Iterations)
# AreaB_Norm = zeros(self.MC_Iterations)
# AreaG_Norm = zeros(self.MC_Iterations)
#
# #Resample x grid for the gaussian flux
# self.x_Norm_Resample = linspace(self.x_Norm[0], self.x_Norm[-1], self.GaussianSampling, endpoint=True)
#
# #Resampling the linear continuum for the fitting #CONFLITH WITH MARTAS APPROACH?
# Interpolation = interp1d(self.x_Norm, self.zerolev_Norm, kind = 'slinear')
# self.zerolev_Resample = Interpolation(self.x_Norm_Resample)
#
# #Choose MC Gaussian scheme
# if self.Fitting_dict['Fitting method'] == 'MCMC_kmpfit':
#
# #Loop through the bootstrap method
# for i in range(self.MC_Iterations):
#
# #Array of simulated line
# y_New_Norm = self.sigma_zerolev_Norm * random.randn(x_Grid_Length) + self.y_Norm
#
# #Run line Gaussian fitting
# fitobj = kmpfit.Fitter(residuals=self.Residuals_GaussianMixture, data=([self.x_Norm, y_New_Norm, self.zerolev_Norm, self.sigma_zerolev_Norm]))
# fitobj.parinfo = self.Fitting_dict['kmpfit_dict']
# fitobj.fit(params0 = [self.A_Norm, self.mu_Norm, self.sigma_Norm])
# p_1_i = fitobj.params
#
# #Save parameters fitting
# A_Vector[i] = p_1_i[0]
# mu_Vector[i] = p_1_i[1]
# sigma_Vector[i] = p_1_i[2]
#
# #Calculate Gaussian flux
# y_resample = self.SingleGaussian_Cont((self.x_Norm_Resample, self.zerolev_Resample), p_1_i[0], p_1_i[1], p_1_i[2])
# AreaG_Norm[i] = self.Integrate_Gaussian(self.x_Norm_Resample, y_resample, self.zerolev_Resample)
#
# #Calculate Brute flux
# AreaB_Norm[i] = self.Integrate_Gaussian(self.x_Norm, y_New_Norm, self.zerolev_Norm)
#
# #Case the fitting method is not recognized
# else:
# print 'WARNING: MCMC could not determine gaussian of emission line'
# print '--Method', self.Methodology
# return
#
# #Store the mean values from the bootstrap into the dictionary
# self.Fitting_dict['p1_norm'] = [mean(A_Vector), mean(mu_Vector), mean(sigma_Vector)]
# self.Fitting_dict['p1_std_Norm'] = [std(A_Vector), std(mu_Vector), std(sigma_Vector)]
# self.Fitting_dict['p1_Area_Norm'] = mean(AreaG_Norm)
# self.Fitting_dict['p1_Area_Std_Norm'] = std(AreaG_Norm)
# self.Fitting_dict['p1_AreaB_Norm'] = mean(AreaB_Norm)
# self.Fitting_dict['p1_AreaB_Std_Norm'] = std(AreaB_Norm)
#
# return
#
# def MCMC_Scheme_Blend(self, Method):
#
# p_1_Norm = []
# p_1_std_Norm = []
# p_1_Area_Norm = []
# p_1_Area_std_Norm = []
#
# self.A_Vector = zeros((self.Fitting_dict['blended number'], self.Iterations))
# self.mu_Vector = zeros((self.Fitting_dict['blended number'], self.Iterations))
# self.sigma_Vector = zeros((self.Fitting_dict['blended number'], self.Iterations))
#
# self.Area_Norm = zeros((self.Fitting_dict['blended number'], self.Iterations))
# Length_y = len(self.x_Norm)
# self.y_New = zeros(Length_y)
#
# #We put this here to improve speed
# self.x_Norm_Resample = linspace(self.x_Norm[0], self.x_Norm[-1], self.GaussianSampling, endpoint=True)
# Interpolation = interp1d(self.x_Norm, self.x_Norm, kind = 'slinear')
# self.zerolev_Resample = Interpolation(self.x_Norm_Resample)
#
# p_O_Reshape = zeros(self.Fitting_dict['blended number'] * 3)
# for i in range(self.Fitting_dict['blended number']):
# p_O_Reshape[i*3:(i+1)*3] = self.A_Norm[i], self.mu_Norm[i], self.sigma_Norm[i]
#
# if Method == 'MCMC_leastsqr':
# for i in range(self.Iterations):
# for z in range(Length_y):
# self.y_New[z] = random.gauss(self.y_Norm[z], self.sigma_zerolev_Norm)
#
# p_1_i, conv_mcmc_leastsqr = leastsq(self.Residuals_GaussianMixture, args=([self.x_Norm, self.y_New, self.zerolev_Norm, self.sigma_zerolev_Norm]), x0 = p_O_Reshape)
#
# y_resample_c = zeros(Length_y)
# for j in range(self.Fitting_dict['blended number']):
# self.A_Vector[j][i], self.mu_Vector[j][i], self.sigma_Vector[j][i] = p_1_i[j*3:(j+1)*3]
# y_resample_c = self.A_Vector[j][i] * exp(-(self.x_Norm_Resample-self.mu_Vector[j][i])*(self.x_Norm_Resample-self.mu_Vector[j][i])/(2.0*self.sigma_Vector[j][i]*self.sigma_Vector[j][i])) + self.zerolev_Resample
# self.Area_Norm[j][i] = self.Integrate_Gaussian(self.x_Norm_Resample, y_resample_c, self.zerolev_Resample)
#
# if Method == 'MCMC_kmpfit':
# for i in range(self.Iterations):
# for z in range(Length_y):
# self.y_New[z] = random.gauss(self.y_Norm[z], self.sigma_zerolev_Norm)
#
# fitobj = kmpfit.Fitter(residuals=self.Residuals_GaussianMixture, data=([self.x_Norm, self.y_New, self.zerolev_Norm, self.sigma_zerolev_Norm]))
# fitobj.parinfo = self.Kmpfit_Dictionary
# fitobj.fit(params0 = p_O_Reshape)
#
# p_1_i = fitobj.params
#
# y_resample_c = zeros(Length_y)
# for j in range(self.Fitting_dict['blended number']):
# self.A_Vector[j][i], self.mu_Vector[j][i], self.sigma_Vector[j][i] = p_1_i[j*3:(j+1)*3]
# y_resample_c = self.A_Vector[j][i] * exp(-(self.x_Norm_Resample-self.mu_Vector[j][i])*(self.x_Norm_Resample-self.mu_Vector[j][i])/(2.0*self.sigma_Vector[j][i]*self.sigma_Vector[j][i])) + self.zerolev_Resample
# self.Area_Norm[j][i] = self.Integrate_Gaussian(self.x_Norm_Resample, y_resample_c, self.zerolev_Resample)
#
#
# else:
# print 'WARNING: MCMC could not determine gaussian of emission line'
# print '--Method', self.Methodology
# self.Fitting_dict['p1_norm'] = None
# return
#
# for j in range(self.Fitting_dict['blended number']):
#
# p_1_Norm.append(mean(self.A_Vector[j]))
# p_1_Norm.append(mean(self.mu_Vector[j]))
# p_1_Norm.append(mean(self.sigma_Vector[j]))
#
# p_1_std_Norm.append(std(self.A_Vector[j]))
# p_1_std_Norm.append(std(self.mu_Vector[j]))
# p_1_std_Norm.append(std(self.sigma_Vector[j]))
#
# p_1_Area_Norm.append(mean(self.Area_Norm[j]))
# p_1_Area_std_Norm.append(std(self.Area_Norm[j]))
#
# self.Fitting_dict['p1_norm'] = array(p_1_Norm)
# self.Fitting_dict['p_1_std_Norm'] = array(p_1_std_Norm)
# self.Fitting_dict['p_1_Area_Norm'] = array(p_1_Area_Norm)
# self.Fitting_dict['p_1_Area_Std_Norm'] = array(p_1_Area_std_Norm)
#
# return
#
# def Resample_Gaussian(self, x_true, zerolev, sampling = None, Emission_Gaussian = True, SingleLine = True):
#
# #Case of a single line
# if SingleLine == True:
# if sampling == None:
# sampling = self.GaussianSampling
#
# x_resample = linspace(x_true[0], x_true[-1], self.GaussianSampling, endpoint=True)
#
# #THIS interpolation is kind of stupid
# Interpolation = interp1d(x_true, zerolev, kind = 'slinear')
# zerolev_resample = Interpolation(x_resample)
#
# if Emission_Gaussian == 'Emission':
# y_resample = self.SingleGaussian_Cont((x_resample, zerolev_resample), self.Fitting_dict['p1'][0], self.Fitting_dict['p1'][1], self.Fitting_dict['p1'][2])
# self.Fitting_dict['y_plotting'] = self.SingleGaussian_Cont((x_resample, zerolev_resample), self.Fitting_dict['p1'][0], self.Fitting_dict['p1'][1], self.Fitting_dict['p1'][2])
#
# else:
# y_resample = 2 * zerolev_resample - self.SingleGaussian_Cont((x_resample, zerolev_resample), self.Fitting_dict['p1'][0], self.Fitting_dict['p1'][1], self.Fitting_dict['p1'][2])
# self.Fitting_dict['y_plotting'] = self.SingleGaussian_Cont((x_resample, zerolev_resample), self.Fitting_dict['p1'][0], self.Fitting_dict['p1'][1], self.Fitting_dict['p1'][2])
#
# self.Fitting_dict['x_resample'] = x_resample
# self.Fitting_dict['y_resample'] = y_resample
#
# #Case of a blended line
# else:
# #UPDATE THIS FOR ABSORPTIONS
# x_resample_complete = linspace(x_true[0], x_true[-1], self.GaussianSampling * self.NComps, endpoint=True)
# self.zerolev_Resample = self.Continuum_Gradient * x_resample_complete + self.Continuum_n
# y_resample_Complete = self.MixtureGaussian_Cont_Short((x_resample, self.zerolev_Resample), self.p_1, self.NComps)
#
# self.Fitting_dict['x_resample'] = [x_resample_complete]
# self.Fitting_dict['y_resample'] = [y_resample_Complete]
# self.Fitting_dict['y_plotting'] = [y_resample_Complete]
#
# #Individual regions
# x_resample = linspace(x_true[0], x_true[-1], self.GaussianSampling, endpoint=True)
# for i in range(len(self.Fitting_dict['blended number'])):
# A_i, mu_i, sigma_i = self.Fitting_dict['p1'][i*3:(i+1)*3]
# y_resample_i = self.SingleGaussian_Cont((x_resample, zerolev_resample), A_i, mu_i, sigma_i)
#
# self.Fitting_dict['x_resample'].append(x_resample_complete)
# self.Fitting_dict['y_resample'].append(y_resample_i)
# self.Fitting_dict['y_plotting'].append(y_resample_i)
#
# return
#
# def Resample_Gaussian_MCMC(self, x_true, A, mu, sigma, zerolev):
#
# return self.SingleGaussian_Continuum((x_true, zerolev), A, mu, sigma)
#
# def Integrate_Gaussian(self, x_true, y, zerolev):
#
# return simps(y, x_true) - simps(zerolev,x_true)
#
# def Integrate_EmissionLine(self, x_true, y, zerolev):
#
# return simps(y, x_true) - simps(zerolev,x_true)
#
# def FindMaxima(self, xval, yval, MinLevel, ListLines, Deblend_Check):
#
# xval = asarray(xval)
# yval = asarray(yval)
# GroupLines = asarray(ListLines)
#
# sort_idx = argsort(xval)
# yval = yval[sort_idx]
# gradient = diff(yval)
# maxima = diff((gradient > 0).view(int8))
# ListIndeces = concatenate((([0],) if gradient[0] < 0 else ()) + (where(maxima == -1)[0] + 1,) + (([len(yval)-1],) if gradient[-1] > 0 else ()))
# self.X_Maxima, self.Y_Maxima = [], []
#
# for index in ListIndeces:
# if yval[index] > MinLevel:
# self.X_Maxima.append(xval[index])
# self.Y_Maxima.append(yval[index])
#
# A_List = []
# mu_List = []
#
# for i in range(len(GroupLines)):
# TheoWave = GroupLines[i]
# Closest_Index = abs(self.X_Maxima-TheoWave).argmin()
# A_List.append(self.Y_Maxima[Closest_Index])
# mu_List.append(self.X_Maxima[Closest_Index])
#
# if len(ListIndeces) == 1:
# Deblend_Check = True #Not sure what this is supposed to mean
# A_List = list(set(A_List))
# mu_List = list(set(mu_List))
# self.Fitting_dict['blended number'] = 1
#
# A_max = max(A_List)
# Mean_x = round(median(mu_List))
#
# for i in range(self.Fitting_dict['blended number']):
# if A_List[i] < 0.40 * A_max:
# self.Kmpfit_Dictionary.append({})
# self.Kmpfit_Dictionary.append({'limits':(mu_List[i]-Mean_x-0.25,mu_List[i]-Mean_x+0.25)})
# self.Kmpfit_Dictionary.append({'limits':(0,1.2)})
# else:
# self.Kmpfit_Dictionary.append({})
# self.Kmpfit_Dictionary.append({})
# self.Kmpfit_Dictionary.append({'limits':(0,10)})
#
# return array(mu_List), array(A_List), Deblend_Check
#
# class Bayesian_regressions():
#
# def __init__(self):
#
# self.Methodology = None
# self.prob_threshold = 0.40
#
# def lr_ChiSq(self, x_array, y_array, m_0, n_0):
#
# m = Normal('m', m_0, 0.01)
# n = Normal('n', n_0, 0.01)
# sigma = Uniform('sigma', 0.0, 5.0)
#
# @stochastic(observed=True)
# def model(value = self.y_error, x_values = self.x_array, m = m, n = n, sigma = sigma):
#
# value_theo = m*x_values + n
# chi_sq = sum(square(value - value_theo) / square(sigma))
# log_ChiSq = - chi_sq / 2.0
# return log_ChiSq
#
# return locals()
#
# def inference_outliers(self, x_array, y_array, m_0, n_0, spread_vector):
#
# print m_0
#
# outlier_points = Uniform('outlier_points', 0, 1.0, value=0.1)
# mean_outliers = Uniform('mean_outliers', -100, 100, value=0)
# spread_outliers = Uniform('spread_outliers', -100, 100, value=0)
#
# @stochastic
# def slope_and_intercept(slope = m_0):
# prob_slope = nplog(1. / (1. + slope ** 2))
# print 'y este', prob_slope
# return prob_slope
#
# @deterministic
# def model_(x=x_array, slope_and_intercept=slope_and_intercept):
# slope, intercept = slope_and_intercept
# fit = slope * x + intercept
# return fit
#
# inlier = Bernoulli('inlier', p=1 - outlier_points, value=zeros(x_array.size))
#
# def log_posterior_likelihood_of_outlier(y_with_outlier, mu, spread_vector, inlier, mean_outliers, spread_outliers):
# inlier_posterior = sum(inlier * (nplog(2 * pi * spread_vector ** 2) + (y_with_outlier - mu) ** 2 / (spread_vector ** 2)))
# outlier_posterior = sum((1 - inlier) * (nplog(2 * pi * ((spread_vector ** 2) + (spread_outliers ** 2))) + (y_with_outlier - mean_outliers) ** 2 / ((spread_vector ** 2) + (spread_outliers ** 2))))
# return -0.5 * (inlier_posterior + outlier_posterior)
#
# outlier_distribution = stochastic_from_dist('outlier_distribution', logp=log_posterior_likelihood_of_outlier, dtype=npfloat, mv=True)
#
# outlier_dist = outlier_distribution('outlier_dist', mu=model_, spread_vector=spread_vector, mean_outliers=mean_outliers, spread_outliers=spread_outliers, inlier=inlier, value=y_array, observed=True)
#
# return locals()
#
# class Linear_Regressions(Bayesian_regressions):
#
# def __init__(self):
#
# Bayesian_regressions.__init__(self)
#
# self.x_array = None
# self.x_error = None
#
# self.y_array = None
# self.y_error = None
#
# def load_obs_data(self, x_values, y_values, x_errors = None, y_errors = None):
#
# #Default case we input all the values manually
# self.x_array = x_values
# self.x_error = x_errors
#
# self.y_array = y_values
# self.y_error = y_errors
#
# def perform_regression(self, Methodology):
#
# if Methodology == 'bces':
# fit_dict = self.bces_regression()
#
# elif Methodology == 'Max_Likelihood':
# fit_dict = self.max_likelihood_regression()
#
# elif Methodology == 'linfit':
# fit_dict = self.linfit_regression()
#
# elif Methodology == 'scipy':
# fit_dict = self.scipy_regression()
#
# elif Methodology == 'kmpfit':
# fit_dict = self.kmpfit_regression()
#
# elif Methodology == 'kelly':
# fit_dict = self.kellyBces_regression()
#
# elif 'Inference' in Methodology:
# fit_dict = self.inference_model(Methodology)
#
# return fit_dict
#
# def inference_model(self, Methodology):
#
# if Methodology == 'Inference - ChiSq':
#
# Inf_dict = self.inference_ChiSq()
#
# if Methodology == 'Inference - Outliers':
#
# Inf_dict = self.Outliers_Krough()
#
# return Inf_dict
#
# def linfit_regression(self):
#
# fit_dict = OrderedDict()
#
# fit_dict['methodology'] = 'Linfit'
#
# Regression_Fit, Uncertainty_Matrix, fit_dict['red_ChiSq'], fit_dict['residuals'] = linfit(x_true = self.x_array, y = self.y_array, sigmay = self.y_error, relsigma = False, cov = True, chisq = True, residuals = True)
#
# m_n_Matrix = [sqrt(Uncertainty_Matrix[t,t]) for t in range(2)]
# fit_dict['R_factor'] = Uncertainty_Matrix[0,1]/(m_n_Matrix[0]*m_n_Matrix[1])
# fit_dict['m'], fit_dict['m_error'] = Regression_Fit[0], m_n_Matrix[0]
# fit_dict['n'], fit_dict['n_error'] = Regression_Fit[1], m_n_Matrix[1]
#
# return fit_dict
#
# def bces_regression(self, cov = None):
# #Rodrigo Nemmen, http://goo.gl/8S1Oo
#
# fit_dict = OrderedDict()
#
# if cov == None:
# #This is the covariance between the measurements. If none provided it is assume error is independent between measurments for the
# cov = zeros(len(self.x_array))
#
# fit_dict['methodology'] = (r'OLS(Y|X)$_{bces}$', r'OLS(X|Y)$_{bces}$', r'bisector$_{bces}$', r'Orthogonal$_{bces}$')
#
# fit_dict['m'],fit_dict['n'],fit_dict['m_error'],fit_dict['n_error'],fit_dict['cov'] = bces(self.x_array, self.x_error, self.y_array, self.y_error, cov)
#
# return fit_dict
#
# def scipy_regression(self):
# #ODR Method
#
# fit_dict = OrderedDict()
#
# fit_dict['methodology'] = r'ODR$_{ScIPy}$'
#
# beta_0 = (0, 1)
#
# fit_dict['m'], fit_dict['n'], fit_dict['m_error'], fit_dict['n_error'], fit_dict['cov'], fit_dict['chiSq'], fit_dict['red_ChiSq'] = scipy_ODR(self.x_array, self.y_array, self.y_array, self.y_error, beta_0)
#
# return fit_dict
#
# def kmpfit_regression(self):
#
# #Kmpfit methodology using an effective variance method
# fit_dict = OrderedDict()
#
# fit_dict['methodology'] = r'Effective Variance$_{kmpfit}$'
#
# scipy_guess_dict = self.scipy_regression()
#
# beta_0 = (scipy_guess_dict['n'], scipy_guess_dict['m'])
#
# print 'input values', beta_0
#
# fit_dict['m'], fit_dict['n'], fit_dict['m_error'], fit_dict['n_error'], fit_dict['cov'], fit_dict['chiSq'], fit_dict['red_ChiSq'] = kmpfit_effectivevariance(self.x_array, self.y_array, self.x_error, self.y_error, beta_0)
#
# return fit_dict
#
# def bayesian_regression(self, Methodology):
#
# fit_dict = OrderedDict()
#
# fit_dict['methodology'] = r'Inference $\chi^{2}$ model'
#
# #Initial guess for the fitting:
# Np_lsf = polyfit(self.x_array, self.y_array, 1)
# m_0, n_0 = Np_lsf[0], Np_lsf[1]
#
# print 'Bayesian guesses:', m_0, n_0
#
# MCMC_dict = self.lr_ChiSq(self.x_array, self.y_array, m_0, n_0)
#
# myMCMC = MCMC(MCMC_dict)
#
# myMCMC.sample(iter=10000, burn=1000)
#
# fit_dict['m'], fit_dict['n'], fit_dict['m_error'], fit_dict['n_error'] = myMCMC.stats()['m']['mean'], myMCMC.stats()['n']['mean'], myMCMC.stats()['m']['standard deviation'], myMCMC.stats()['n']['standard deviation']
#
# return fit_dict
#
# def kellyBces_regression(self):
#
# fit_dict = OrderedDict()
#
# fit_dict['methodology'] = (r'Inferences$_{bces}$')
#
# n_tuple, m_tuple, cov = kelly(x1=self.x_array, x2=self.y_array, x1err=self.x_error, x2err=self.y_error)
#
# fit_dict['m'],fit_dict['n'],fit_dict['m_error'],fit_dict['n_error'],fit_dict['cov'] = m_tuple[0], n_tuple[0], m_tuple[1], n_tuple[1], cov
#
# return fit_dict
#
# def Outliers_Krough(self):
#
# fit_dict = OrderedDict()
#
# fit_dict['methodology'] = r'Outliers Krough'
#
# #Initial Guess for fitting
# Bces_guess = self.bces_regression()
# m_0, n_0 = Bces_guess['m'][0], Bces_guess['n'][0]
#
# print 'estos son...', m_0, n_0
#
# Spread_vector = ones(len(self.x_array))
#
# #Model for outliers detection
# Outliers_dect_dict = self.inference_outliers(self.x_array, self.y_array, m_0, n_0, Spread_vector)
#
# mcmc = MCMC(Outliers_dect_dict)
# mcmc.sample(100000, 20000)
#
# #Extract the data with the outliers coordinates
# probability_of_points = mcmc.trace('inlier')[:].astype(float).mean(0)
# fit_dict['x_coords_outliers'] = self.x_array[probability_of_points < self.prob_threshold]
# fit_dict['y_coords_outliers'] = self.y_array[probability_of_points < self.prob_threshold]
#
# return fit_dict
#
# def Python_linfit(x_true, y, y_err, errors_output = True):
#
# Regression_Fit, Uncertainty_Matrix, Red_Chi_Sq, Residuals = linfit(x_true, y, y_err, cov=True, relsigma=False, chisq=True, residuals=True)
# m_n_Matrix = [sqrt(Uncertainty_Matrix[t,t]) for t in range(2)]
# R_Factor = Uncertainty_Matrix[0,1]/(m_n_Matrix[0]*m_n_Matrix[1])
# m, m_error = Regression_Fit[0], m_n_Matrix[0]
# n, n_error = Regression_Fit[1], m_n_Matrix[1]
#
# if errors_output:
# return m, m_error, n, n_error
#
# else:
# return m, n
#
# class LineMesurer_Log():
#
# def __init__(self, Conf_Folder, LinesLogHeader_Name):
#
# self.ColumnHeaderVector, LineLog_Width, LineLog_Format = loadtxt(Conf_Folder + LinesLogHeader_Name, dtype=str, usecols = [1, 2, 3], skiprows=1, unpack=True)
# self.LineLog_FormatDict = dict(zip(self.ColumnHeaderVector, add(LineLog_Width, LineLog_Format)))
#
# self.TableHeaderSize = 2
# self.ColumnWidth = str(50 + 2) #len(max(self.ColumnHeaderVector,key=len)) + 2
#
# def CleanTableMaker(self, TableAddress, RemakeLog, ColumnsHeaders, ColumnsWidth):
#
# if (exists(TableAddress) == False) or (RemakeLog == True):
# myTextFile = open(TableAddress, "w")
# IntroductionLines = ["-File address: " + TableAddress]
#
# myTextFile.write(IntroductionLines[0]+'\n')
#
# Width = "%" + str(ColumnsWidth) + "s"
#
# BlankLine = "".join(Width % i for i in ColumnsHeaders)
# myTextFile.write(BlankLine + "\n")
#
# myTextFile.close()
#
# def GetDataInTable(self, ColumnInput,ColumnOutput,RowOutput,TableAddress,LinesBeforeNumbers):
#
# TableFile = open(TableAddress,"r")
# TableLines = TableFile.readlines()
# TableFile.close()
#
# InPutIndex = "None"
# OutPutIndex = "None"
# HeaderRow = TableLines[LinesBeforeNumbers-1].split()
#
# for i in range(len(HeaderRow)):
# Header = HeaderRow[i]
# if ColumnInput == Header:
# InPutIndex = i
#
# if InPutIndex == "None":
# print "WARNING: Header not found in table (GetData1)"
# print "Column Input: " + ColumnInput
# print "Headers:"
# print HeaderRow
#
# for i in range(len(HeaderRow)):
# Header = HeaderRow[i]
# if ColumnOutput == Header:
# OutPutIndex = i
#
# if OutPutIndex == "None":
# print "WARNING: Header not found in table (GetData2)"
# print "ColumnOutput: " + ColumnOutput
# print "Headers:"
# print HeaderRow
#
# Parameter = "None"
# FoundCheck = False
#
# for j in range(LinesBeforeNumbers,len(TableLines)):
# EmissionRow = TableLines[j].split()
# if str(RowOutput) in str(EmissionRow[InPutIndex]): #This is ineficient: The code should locate the row to interpolate with
# Parameter = EmissionRow[OutPutIndex]
# FoundCheck = True
#
# if FoundCheck == False:
# print "WARNING: Damned Parameter not found in table"
# print "Column Name " + str(ColumnOutput)
# print "Row output " + str(RowOutput)
# print "Ouput Index " + str(OutPutIndex)
# print TableAddress
#
# return Parameter
#
# def InsertNewLine(self, Elemental_Line_String, Current_Em_El, TableAddress, TableHeaderSize):
#
# TableFile = open(TableAddress,"r")
# TableLines = TableFile.readlines()
# TableFile.close()
#
# #Check if emission line has been previously measured
# EL_Previously_Measured = False
#
# for j in range(TableHeaderSize,len(TableLines)):
# EmissionRow = TableLines[j].split()
# if float(Current_Em_El) == float(EmissionRow[2]): #We compare the wavelength
# EL_Previously_Measured = True
#
# if EL_Previously_Measured == False:
#
# Wavelengths = []
# LineLocation = "None"
# if len(range(TableHeaderSize,len(TableLines))) > 0:
# for j in range(TableHeaderSize,len(TableLines)):
# Wave = float(TableLines[j].split()[2])
# Wavelengths.append(Wave)
#
# LineLocation = bisect(Wavelengths, Current_Em_El)
#
# if LineLocation == "None":
# print "WARNING: Emission Line cannot be inserted in data list"
#
# TableLines.insert(LineLocation + TableHeaderSize ,Elemental_Line_String+"\n") #Insert the line in the right location. Remember to include the size of the header!
#
# out = open(TableAddress, 'w')
# out.writelines(TableLines)
# out.close()
#
# else:
#
# TableLines.append(Elemental_Line_String + "\n")
# out = open(TableAddress, 'w')
# out.writelines(TableLines)
# out.close()
# return
#
# def Replace_Row(self, Index, RowName, DataDict, TableAddress, TableHeaderSize):
#
# #Load the table
# TableFile = open(TableAddress,"r")
# TableLines = TableFile.readlines()
# TableFile.close()
#
# #Find row Index
# WavelengthColumn = loadtxt(TableAddress, usecols = [2], skiprows=TableHeaderSize)
# RowIndex = where(WavelengthColumn == RowName)[0] + TableHeaderSize
#
#
# NewRow = ""
# for i in range(len(self.ColumnHeaderVector)):
# formatel = "%" + str(self.LineLog_FormatDict[self.ColumnHeaderVector[i]])
# print 'formatel', formatel, type(formatel)
# NewRow.join(formatel % DataDict[str(self.ColumnHeaderVector[i])][Index])
#
# print 'The final row', NewRow
# #Replace the row
# TableLines[RowIndex] = NewRow + "\n"
#
# #Load the column
# out = open(TableAddress, 'w')
# out.writelines(TableLines)
# out.close()
#
# return
#
# def ReplaceDataInTable(self, ColumnName, RowName, Data, DataFormat, TableAddress, TableHeaderSize, ColumnWidth):
#
# #Load the table
# TableFile = open(TableAddress,"r")
# TableLines = TableFile.readlines()
# TableFile.close()
#
# #Define global columnd width
# # Width = "%" + str(ColumnWidth) + "s"
# DataFormat = '%' + str(DataFormat)
#
# #Find column index
# HeaderIndex = where(self.ColumnHeaderVector == ColumnName)[0]
#
# #Find row Index
# LabelsColumn = loadtxt(TableAddress, usecols = [2], skiprows=TableHeaderSize)
# RowIndex = where(LabelsColumn == RowName)[0] + TableHeaderSize
#
# #Change the new value
# EmissionRow = TableLines[RowIndex].split()
# if "s" in DataFormat:
# EmissionRow[HeaderIndex] = DataFormat % (str(Data))
# else:
# EmissionRow[HeaderIndex] = DataFormat % (float(Data))
#
# #Set row design
# # NewRow = "".join(Width % z for z in EmissionRow)
#
# NewRow = ""
# print 'ColumnWidth', type(ColumnWidth)
# for i in range(len(self.ColumnHeaderVector)):
# formatel = "%" + str(ColumnWidth[str(self.ColumnHeaderVector[i])])
# print 'formatel', formatel, type(EmissionRow[i])
# NewRow.join(formatel % EmissionRow[i])
#
# #Replace the row
# TableLines[RowIndex] = NewRow + "\n"
#
# #Load the column
# out = open(TableAddress, 'w')
# out.writelines(TableLines)
# out.close()
#
# def CreateEmissionLineReccord(self, Current_Em_El, Current_Ion, Current_Label, ColumnsHeaders, ColumnWidth):
#
# Width = "%" + ColumnWidth + "s"
# Wave_New, Label, Ion = "None", "None", "None"
#
# Elemental_Line_List = [Current_Label, Current_Ion, Current_Em_El] #This can be easily changed to a numpy array
#
# for j in range(len(ColumnsHeaders)-3):
# Elemental_Line_List.append("None")
#
# Elemental_Line_String = "".join(Width % z for z in Elemental_Line_List)
#
# return Elemental_Line_String
#
# def DeleteLine(self, Current_Em_Wave, TableAddress, TableHeaderSize, Deblend_Check, List_BlendedLines, Current_BlendedGroup):
# #WARNING CORRECT THIS BLENDED LINES
#
# #Open text file and import the lines
# TableFile = open(TableAddress,"r")
# TableLines = TableFile.readlines()
# TableFile.close()
#
# #Index for the lines to delete (CHANGE THIS TO THE ENUMERATE SCHEME
# LocationLineToDelete = None
#
# #FOR THE TIME BEING THIS ONLY WORKS IF WE ARE CURRENTLY MEASURING A LINE
# if Deblend_Check == False:
# Measured_Wavelengths = loadtxt(TableAddress, dtype=float, skiprows= TableHeaderSize, usecols = [2])
# LocationLineToDelete = int(where(Measured_Wavelengths==Current_Em_Wave)[0]) + TableHeaderSize
# del TableLines[LocationLineToDelete]
#
# else:
# print 'Current_BlendedGroup', Current_BlendedGroup
# grouped_lines_wavelengths = List_BlendedLines[1][Current_BlendedGroup]
# remove_list = []
#
# for Current_Em_Wave in grouped_lines_wavelengths:
# for j in range(TableHeaderSize,len(TableLines)):
# Wave = float(TableLines[j].split()[2])
# if Wave == Current_Em_Wave:
# remove_list.append(j)
#
# TableLines = [v for i, v in enumerate(TableLines) if i not in remove_list]
#
# out = open(TableAddress, 'w')
# out.writelines(TableLines)
# out.close()
#
# return
#
# def Get_DataLog_Parameter(self, Row_Label, Column_Label, TableAddress, TableHeaderSize):
#
# TableFile = open(TableAddress,"r")
# LogLines = TableFile.readlines()
# TableFile.close()
#
# Parameter = None
# HeaderIndex = None
#
# HeaderLine = LogLines[TableHeaderSize-1].split()
#
# for i in range(len(HeaderLine)):
# item = HeaderLine[i]
# if item == Column_Label:
# HeaderIndex = i
#
# if HeaderIndex == None:
# print "WARNING: Header not found"
# print Column_Label
# print HeaderLine
#
# for i in range(TableHeaderSize, len(LogLines)):
# LineElements = LogLines[i].split()
# if LineElements[0] == Row_Label:
# Parameter = LineElements[HeaderIndex]
#
# return Parameter
#
# def RangesR(self, Selections, RowName, TableAddress, TableHeaderSize):
#
# #Use this trick to check we are not measuring a line (and not load new data):
# if len(Selections) == 0:
#
# #Check if emission line has been previously measured (LOADTXT IS NOT USED PROPERLY GIVES ERROR WHEN NO LINES IN TXT FILE (ANY GIVES ERROR WHEN LOADTXT RETURNS A STRING INSTEAD OF A LIST))
# TextFile = open(TableAddress, "r")
# Filelines = TextFile.readlines()
# TextFile.close()
#
# if len(Filelines) <= TableHeaderSize:
# EL_Previously_Measured = False
#
# elif len(Filelines) == 3:
# Measured_Wavelengths = loadtxt(TableAddress, dtype=float, skiprows= TableHeaderSize, usecols = [2])
# print Measured_Wavelengths, type(Measured_Wavelengths)
# if Measured_Wavelengths == RowName:
# EL_Previously_Measured = True
# else:
# EL_Previously_Measured = False
#
# else:
# Measured_Wavelengths = loadtxt(TableAddress, dtype=float, skiprows= TableHeaderSize, usecols = [2])
# if any(Measured_Wavelengths == RowName):
# EL_Previously_Measured = True
# else:
# EL_Previously_Measured = False
#
# #If it was load the data
# if EL_Previously_Measured == True:
# Wave1 = self.GetDataInTable("TheoWavelength","Wave1",RowName,TableAddress,TableHeaderSize)
# Wave2 = self.GetDataInTable("TheoWavelength","Wave2",RowName,TableAddress,TableHeaderSize)
# Wave3 = self.GetDataInTable("TheoWavelength","Wave3",RowName,TableAddress,TableHeaderSize)
# Wave4 = self.GetDataInTable("TheoWavelength","Wave4",RowName,TableAddress,TableHeaderSize)
# Wave5 = self.GetDataInTable("TheoWavelength","Wave5",RowName,TableAddress,TableHeaderSize)
# Wave6 = self.GetDataInTable("TheoWavelength","Wave6",RowName,TableAddress,TableHeaderSize)
#
# if Wave1 != "None":
# Selections.append(float(Wave1))
# if Wave2 != "None":
# Selections.append(float(Wave2))
# if Wave3 != "None":
# Selections.append(float(Wave3))
# if Wave4 != "None":
# Selections.append(float(Wave4))
# if Wave5 != "None":
# Selections.append(float(Wave5))
# if Wave6 != "None":
# Selections.append(float(Wave6))
#
# return Selections
|
Delosari/dazer
|
bin/lib/Math_Libraries/FittingTools_old.py
|
Python
|
mit
| 89,714
|
[
"Gaussian"
] |
ec85710b767f1e8f8273faeb48b73e76e9be896d2c37f1b7c62ef0c21e380dc0
|
r"""smtrace module is used along with vtkSMTrace to generate Python trace for
ParaView. While this module is primarily designed to be used from the ParaView
GUI, Python scripts can use this module too to generate trace from the script
executed.
Typical usage is as follows::
from paraview import smtrace
config = smtracer.start_trace()
# config is an instance of vtkSMTrace. One can setup properties on this
# object to control the generated trace. e.g.
config.SetFullyTraceSupplementalProxies(True)
# do the actions to trace.
...
# stop trace. The generated trace is returned.
txt = smtracer.stop_trace()
=========================
Developer Documentation
=========================
This section describes the module design for developers wanted to extend this
module or use this module for advance tracing/state generation.
The design can be described as follows:
C++ code (either in ServerManager or the GUI layer) should trace actions.
This is done using SM_SCOPED_TRACE() macro provided by vtkSMTrace. When tracing
is enabled, each SM_SCOPED_TRACE() call creates a :class:`.TraceItem`. The TraceItem
instance is scoped, i.e. the object is finalized and destroyed when the scope
exits.
There are various types of TraceItem, ranging from those that trace specific
action such as :class:`.Show`, or those that trace any modified properties
(:class:`.PropertiesModified`). Generic TraceItem types, such as
:class:`.CallMethod` and :class:`.CallFunction` can be used to trace methods
called on vtkObject instances or functions in the global namespace.
TraceItems create or use :class:`.Accessor` instances. Accessors are objects
created for Proxies and Properties in ParaView. Accessor knows how to access
that proxy or property in the Python trace. TraceItems that create new proxies
such as :class:`.RegisterPipelineProxy` and :class:`.RegisterViewProxy`, create
new :class:`.ProxyAccessor` instances. Other such as
:class:`.PropertiesModified` trace item rely on accessors already created.
:class:`.Trace` can provide access to already created accessor as well as create
new accessor for proxies create before the tracing began
(:method:`.Trace.get_accessor`).
Additionally, there are filters such as :class:`.ProxyFilter`,
:class:`.PipelineProxyFilter`, etc. which are used to filter properties that get
traced and where they get traced i.e. in contructor call or right after it.
===============================
Notes about references
===============================
RealProxyAccessor keeps a hard reference to the servermanager.Proxy instance.
This is required. If we don't, then the Python object for vtkSMProxy also gets
garbage collected since there's no reference to it.
"""
import weakref
import paraview.servermanager as sm
import paraview.simple as simple
from paraview.vtk import vtkTimeStamp
class TraceOutput:
"""Internal class used to collect the trace output. Everytime anything is pushed into
this using the append API, we ensure that the trace is updated. Trace
doesn't put commands to the trace-output as soon as modifications are noticed
to try to consolidate the state changes."""
def __init__(self, data=None):
self.__data = []
self.append(data) if data else None
def append(self, data):
if isinstance(data, list):
self.__data += data
#print "\n".join(data),"\n"
elif isinstance(data, str):
self.__data.append(data)
#print data,"\n"
def append_separator(self):
try:
self.__data.append("") if self.__data[-1] != "" else None
except IndexError:
pass
def append_separated(self, data):
self.append_separator()
self.append(data)
def __str__(self):
return '\n'.join(self.__data)
def raw_data(self): return self.__data
def reset(self): self.__data = []
class Trace(object):
__REGISTERED_ACCESSORS = {}
Output = None
@classmethod
def reset(cls):
"""Resets the Output and clears all register accessors."""
cls.__REGISTERED_ACCESSORS.clear()
cls.Output = TraceOutput()
@classmethod
def get_registered_name(cls, proxy, reggroup):
"""Returns the registered name for `proxy` in the given `reggroup`."""
return proxy.SMProxy.GetSessionProxyManager().GetProxyName(reggroup, proxy.SMProxy)
@classmethod
def get_varname(cls, name):
"""returns an unique variable name given a suggested variable name. If
the suggested variable name is already taken, this method will try to
find a good suffix that's available."""
name = sm._make_name_valid(name)
name = name[0].lower() + name[1:]
original_name = name
suffix = 1
# build a set of existing variable names
varnameset = set([accessor.Varname for accessor in cls.__REGISTERED_ACCESSORS.values()])
while name in varnameset:
name = "%s_%d" % (original_name, suffix)
suffix += 1
return name
@classmethod
def register_accessor(cls, accessor):
"""Register an instance of an Accessor or subclass"""
cls.__REGISTERED_ACCESSORS[accessor.get_object()] = accessor
@classmethod
def unregister_accessor(cls, accessor):
del cls.__REGISTERED_ACCESSORS[accessor.get_object()]
@classmethod
def get_accessor(cls, obj):
"""Returns an accessor for obj. If none exists, a new one may be
created, if possible. Currently obj is expected to be a
:class:`servermanager.Proxy` instance. In future, we may change this to
be a vtkSMProxy instance instead."""
if obj is None:
return None
assert isinstance(obj, sm.Proxy)
try:
return cls.__REGISTERED_ACCESSORS[obj]
except KeyError:
# Create accessor if possible else raise
# "untraceable" exception.
if cls.create_accessor(obj):
return cls.__REGISTERED_ACCESSORS[obj]
#return "<unknown>"
raise Untraceable(
"%s is not 'known' at this point. Hence, we cannot trace "\
"it. Skipping this action." % repr(obj))
@classmethod
def has_accessor(cls, obj):
return cls.__REGISTERED_ACCESSORS.has_key(obj)
@classmethod
def create_accessor(cls, obj):
"""Create a new accessor for a proxy. This returns True when a
ProxyAccessor has been created, other returns False. This is needed to
bring into trace proxies that were either already created when the trace
was started or were created indirectly and hence not explictly traced."""
if isinstance(obj, sm.SourceProxy):
# handle pipeline source/filter proxy.
pname = obj.SMProxy.GetSessionProxyManager().GetProxyName("sources", obj.SMProxy)
if pname:
if obj == simple.GetActiveSource():
accessor = ProxyAccessor(cls.get_varname(pname), obj)
cls.Output.append_separated([\
"# get active source.",
"%s = GetActiveSource()" % accessor])
else:
accessor = ProxyAccessor(cls.get_varname(pname), obj)
cls.Output.append_separated([\
"# find source",
"%s = FindSource('%s')" % (accessor, pname)])
return True
if obj.SMProxy.IsA("vtkSMViewProxy"):
# handle view proxy.
pname = obj.SMProxy.GetSessionProxyManager().GetProxyName("views", obj.SMProxy)
if pname:
trace = TraceOutput()
accessor = ProxyAccessor(cls.get_varname(pname), obj)
if obj == simple.GetActiveView():
trace.append("# get active view")
trace.append("%s = GetActiveViewOrCreate('%s')" % (accessor, obj.GetXMLName()))
else:
ctor_args = "'%s', viewtype='%s'" % (pname, obj.GetXMLName())
trace.append("# find view")
trace.append("%s = FindViewOrCreate(%s)" % (accessor, ctor_args))
# trace view size, if present. We trace this commented out so
# that the playback in the GUI doesn't cause issues.
viewSizeAccessor = accessor.get_property("ViewSize")
if viewSizeAccessor:
trace.append([\
"# uncomment following to set a specific view size",
"# %s" % viewSizeAccessor.get_property_trace(in_ctor=False)])
cls.Output.append_separated(trace.raw_data())
return True
if obj.SMProxy.IsA("vtkSMRepresentationProxy"):
# handle representations.
if hasattr(obj, "Input"):
inputAccsr = cls.get_accessor(obj.Input)
view = simple.LocateView(obj)
viewAccessor = cls.get_accessor(view)
pname = obj.SMProxy.GetSessionProxyManager().GetProxyName("representations", obj.SMProxy)
if pname:
varname = "%sDisplay" % inputAccsr
accessor = ProxyAccessor(cls.get_varname(varname), obj)
cls.Output.append_separated([\
"# get display properties",
"%s = GetDisplayProperties(%s, view=%s)" %\
(accessor, inputAccsr, viewAccessor)])
return True
if cls.get_registered_name(obj, "lookup_tables"):
pname = cls.get_registered_name(obj, "lookup_tables")
if cls._create_accessor_for_tf(obj, pname):
return True
if cls.get_registered_name(obj, "piecewise_functions"):
pname = cls.get_registered_name(obj, "piecewise_functions")
if cls._create_accessor_for_tf(obj, pname):
return True
if cls.get_registered_name(obj, "scalar_bars"):
# trace scalar bar.
lutAccessor = cls.get_accessor(obj.LookupTable)
view = simple.LocateView(obj)
viewAccessor = cls.get_accessor(view)
varname = cls.get_varname("%sColorBar" % lutAccessor)
accessor = ProxyAccessor(varname, obj)
trace = TraceOutput()
trace.append(\
"# get color legend/bar for %s in view %s" % (lutAccessor, viewAccessor))
trace.append(accessor.trace_ctor(\
"GetScalarBar",
SupplementalProxy(ScalarBarProxyFilter()),
ctor_args="%s, %s" % (lutAccessor, viewAccessor)))
cls.Output.append_separated(trace.raw_data())
return True
if cls.get_registered_name(obj, "animation"):
return cls._create_accessor_for_animation_proxies(obj)
if cls.get_registered_name(obj, "layouts"):
view = simple.GetActiveView()
if view and obj.GetViewLocation(view.SMProxy) != -1:
viewAccessor = cls.get_accessor(view)
varname = cls.get_varname(cls.get_registered_name(obj, "layouts"))
accessor = ProxyAccessor(varname, obj)
cls.Output.append_separated([\
"# get layout",
"%s = GetLayout()" % accessor])
return True
if obj.SMProxy.IsA("vtkSMTimeKeeperProxy"):
tkAccessor = ProxyAccessor(cls.get_varname(cls.get_registered_name(obj, "timekeeper")), obj)
cls.Output.append_separated([\
"# get the time-keeper",
"%s = GetTimeKeeper()" % tkAccessor])
return True
return False
@classmethod
def _create_accessor_for_tf(cls, proxy, regname):
import re
m = re.match("^[0-9.]*(.+)\\.%s$" % proxy.GetXMLName(), regname)
if m:
arrayName = m.group(1)
if proxy.GetXMLGroup() == "lookup_tables":
varsuffix = "LUT"
comment = "color transfer function/color map"
method = "GetColorTransferFunction"
else:
varsuffix = "PWF"
comment = "opacity transfer function/opacity map"
method = "GetOpacityTransferFunction"
varname = cls.get_varname("%s%s" % (arrayName, varsuffix))
accessor = ProxyAccessor(varname, proxy)
#cls.Output.append_separated([\
# "# get %s for '%s'" % (comment, arrayName),
# "%s = %s('%s')" % (accessor, method, arrayName)])
trace = TraceOutput()
trace.append("# get %s for '%s'" % (comment, arrayName))
trace.append(accessor.trace_ctor(\
method, SupplementalProxy(TransferFunctionProxyFilter()), ctor_args="'%s'" % arrayName))
cls.Output.append_separated(trace.raw_data())
return True
return False
@classmethod
def _create_accessor_for_animation_proxies(cls, obj):
pname = cls.get_registered_name(obj, "animation")
if obj == simple.GetAnimationScene():
sceneAccessor = ProxyAccessor(cls.get_varname(pname), obj)
cls.Output.append_separated([\
"# get animation scene",
"%s = GetAnimationScene()" % sceneAccessor])
return True
if obj == simple.GetTimeTrack():
accessor = ProxyAccessor(cls.get_varname(pname), obj)
cls.Output.append_separated([\
"# get time animation track",
"%s = GetTimeTrack()" % accessor])
return True
if obj.GetXMLName() == "CameraAnimationCue":
# handle camera animation cue.
view = obj.AnimatedProxy
viewAccessor = cls.get_accessor(view)
accessor = ProxyAccessor(cls.get_varname(pname), obj)
cls.Output.append_separated([\
"# get camera animation track for the view",
"%s = GetCameraTrack(view=%s)" % (accessor, viewAccessor)])
return True
if obj.GetXMLName() == "KeyFrameAnimationCue":
animatedProxyAccessor = cls.get_accessor(obj.AnimatedProxy)
animatedElement = int(obj.AnimatedElement)
animatedPropertyName = obj.AnimatedPropertyName
varname = cls.get_varname("%s%sTrack" % (animatedProxyAccessor, animatedPropertyName))
accessor = ProxyAccessor(varname, obj)
cls.Output.append_separated([\
"# get animation track",
"%s = GetAnimationTrack('%s', index=%d, proxy=%s)" %\
(accessor, animatedPropertyName, animatedElement, animatedProxyAccessor)])
return True
if obj.GetXMLName() == "PythonAnimationCue":
raise Untraceable("PythonAnimationCue's are currently not supported in trace")
return False
class Untraceable(Exception):
def __init__(self, logmessage="<unspecified>"):
self.LogMessage = logmessage
def __str__(self):
return repr(self.LogMessage)
class Accessor(object):
def __init__(self, varname, obj):
self.Varname = varname
self.__Object = obj
Trace.register_accessor(self)
def finalize(self):
Trace.unregister_accessor(self)
self.__Object = None
def __str__(self):
return self.Varname
def get_object(self):
return self.__Object
class RealProxyAccessor(Accessor):
__CreateCallbacks = []
@classmethod
def register_create_callback(cls, function):
cls.__CreateCallbacks.insert(0, function)
@classmethod
def unregister_create_callback(cls, function):
cls.__CreateCallbacks.remove(function)
@classmethod
def create(cls, *args, **kwargs):
for x in cls.__CreateCallbacks:
try:
return x(*args, **kwargs)
except NotImplementedError: pass
return RealProxyAccessor(*args, **kwargs)
def __init__(self, varname, proxy):
Accessor.__init__(self, varname, proxy)
self.OrderedProperties = []
# Create accessors for properties on this proxy.
oiter = sm.vtkSMOrderedPropertyIterator()
oiter.SetProxy(proxy.SMProxy)
while not oiter.IsAtEnd():
prop_name = oiter.GetKey()
prop_label = oiter.GetPropertyLabel()
sanitized_label = sm._make_name_valid(prop_label)
prop = proxy.GetProperty(prop_name)
if not type(prop) == sm.Property:
# Note: when PropertyTraceHelper for a property with ProxyListDomain is
# created, it creates accessors for all proxies in the domain as well.
prop_accessor = PropertyTraceHelper(sanitized_label, self)
self.OrderedProperties.append(prop_accessor)
oiter.Next()
del oiter
def finalize(self):
for x in self.OrderedProperties:
x.finalize()
Accessor.finalize(self)
def get_property(self, name):
for x in self.OrderedProperties:
if x.get_property_name() == name:
return x
return None
def get_properties(self):
return self.OrderedProperties[:]
def get_ctor_properties(self):
"""Returns a list of property accessors that should be specified
in the constructor."""
return [x for x in self.OrderedProperties if self.is_ctor_property(x)]
def is_ctor_property(self, prop):
return prop.get_object().IsA("vtkSMInputProperty") or \
prop.get_object().FindDomain("vtkSMFileListDomain") != None
def trace_properties(self, props, in_ctor):
joiner = ",\n " if in_ctor else "\n"
return joiner.join([x.get_property_trace(in_ctor) for x in props])
def trace_ctor(self, ctor, filter, ctor_args=None, skip_assignment=False):
args_in_ctor = str(ctor_args) if not ctor_args is None else ""
# trace any properties that the 'filter' tells us should be traced
# in ctor.
ctor_props = [x for x in self.OrderedProperties if filter.should_trace_in_ctor(x)]
ctor_props_trace = self.trace_properties(ctor_props, in_ctor=True)
if args_in_ctor and ctor_props_trace:
args_in_ctor = "%s, %s" % (args_in_ctor, ctor_props_trace)
else:
args_in_ctor += ctor_props_trace
# locate all the other properties that should be traced in create.
other_props = [x for x in self.OrderedProperties \
if filter.should_trace_in_create(x) and not filter.should_trace_in_ctor(x)]
trace = TraceOutput()
if not ctor is None:
if not skip_assignment:
trace.append("%s = %s(%s)" % (self, ctor, args_in_ctor))
else:
assert len(other_props) == 0
trace.append("%s(%s)" % (ctor, args_in_ctor))
return trace.raw_data()
# FIXME: would like trace_properties() to return a list instead of
# a string.
txt = self.trace_properties(other_props, in_ctor=False)
if txt: trace.append(txt)
# Now, if any of the props has ProxyListDomain, we should trace their
# "ctors" as well. Tracing ctors for ProxyListDomain proxies simply
# means tracing their property values.
pld_props = [x for x in self.OrderedProperties if x.has_proxy_list_domain()]
for prop in pld_props:
paccessor = Trace.get_accessor(prop.get_property_value())
sub_trace = paccessor.trace_ctor(None, filter)
if sub_trace:
trace.append_separated(\
"# init the %s selected for '%s'" % (prop.get_value(), prop.get_property_name()))
trace.append(sub_trace)
return trace.raw_data()
def ProxyAccessor(*args, **kwargs):
return RealProxyAccessor.create(*args, **kwargs)
class PropertyTraceHelper(object):
"""PropertyTraceHelper is used by RealProxyAccessor to help with tracing
properites. In its contructor, RealProxyAccessor creates a
PropertyTraceHelper for each of its properties that could potentially need
to be traced."""
def __init__(self, propertyname, proxyAccessor):
"""Constructor.
:param propertyname: name used to access the property. This is the
sanitized property label.
:param proxyAccessor: RealProxyAccessor instance for the proxy.
"""
assert isinstance(proxyAccessor, RealProxyAccessor)
assert type(propertyname) == str
self.__PyProperty = None
self.PropertyName = propertyname
self.ProxyAccessor = proxyAccessor
self.FullScopedName = "%s.%s" % (proxyAccessor, propertyname)
pyprop = self.get_object()
assert not pyprop is None
pld_domain = pyprop.FindDomain("vtkSMProxyListDomain")
self.HasProxyListDomain = isinstance(pyprop, sm.ProxyProperty) and pld_domain != None
self.ProxyListDomainProxyAccessors = []
if self.HasProxyListDomain:
# register accessors for proxies in the proxy list domain.
# This is cheating. Since there's no accessor for a proxy in the domain
# unless the proxy is "active" in the property. However, since ParaView
# UI never modifies the other properties, we cheat
for i in xrange(pld_domain.GetNumberOfProxies()):
domain_proxy = pld_domain.GetProxy(i)
plda = ProxyAccessor(self.get_varname(), sm._getPyProxy(domain_proxy))
self.ProxyListDomainProxyAccessors.append(plda)
def __del__(self):
self.finalize()
def finalize(self):
for x in self.ProxyListDomainProxyAccessors:
x.finalize()
self.ProxyListDomainProxyAccessors = []
def get_object(self):
"""Returns the servermanager.Property (or subclass) for the
vtkSMProperty this trace helper is helping with."""
if self.__PyProperty is None or self.__PyProperty() is None:
# This will raise Untraceable exception is the ProxyAccessor cannot
# locate the servermanager.Proxy for the SMProxy it refers to.
pyproperty = self.ProxyAccessor.get_object().GetProperty(self.get_property_name())
self.__PyProperty = weakref.ref(pyproperty)
return pyproperty
return self.__PyProperty()
def get_property_trace(self, in_ctor):
"""return trace-text for the property.
:param in_ctor: if False, the trace is generated trace will use
fully-scoped name when referring to the property e.g. sphere0.Radius=2,
else it will use just the property name e.g. Radius=2."""
varname = self.get_varname(in_ctor)
if in_ctor: return "%s=%s" % (varname, self.get_value())
else: return "%s = %s" % (varname, self.get_value())
def get_varname(self, not_fully_scoped=False):
"""Returns the variable name to use when referring to this property.
:param not_fully_scoped: if False, this will return
fully-scoped name when referring to the property e.g. sphere0.Radius,
else it will use just the property name e.g. Radius"""
return self.PropertyName if not_fully_scoped else self.FullScopedName
def get_value(self):
"""Returns the propery value as a string. For proxy properties, this
will either be a string used to refer to another proxy or a string used
to refer to the proxy in a proxy list domain."""
if isinstance(self.get_object(), sm.ProxyProperty):
data = self.get_object()[:]
if self.has_proxy_list_domain():
data = ["'%s'" % x.GetXMLLabel() for x in self.get_object()[:]]
else:
data = [str(Trace.get_accessor(x)) for x in self.get_object()[:]]
try:
if len(data) > 1:
return "[%s]" % (", ".join(data))
else:
return data[0]
except IndexError:
return "None"
else:
return str(self.get_object())
def has_proxy_list_domain(self):
"""Returns True if this property has a ProxyListDomain, else False."""
return self.HasProxyListDomain
def get_property_name(self):
return self.PropertyName
def get_property_value(self):
"""Return the Property value as would be returned by
servermanager.Proxy.GetPropertyValue()."""
return self.ProxyAccessor.get_object().GetPropertyValue(self.get_property_name())
# ===================================================================================================
# === Filters used to filter properties traced ===
# ===================================================================================================
class ProxyFilter(object):
def should_never_trace(self, prop, hide_gui_hidden=True):
if prop.get_object().GetIsInternal() or prop.get_object().GetInformationOnly():
return True
# should we hide properties hidden from panels? yes, generally, except
# Views.
if hide_gui_hidden == True and prop.get_object().GetPanelVisibility() == "never":
if prop.get_property_name() == "ViewSize": print "skipping hidden"
return True
# if a property is "linked" to settings, then skip it here too. We
# should eventually add an option for user to save, yes, save these too.
if prop.get_object().GetHints():
plink = prop.get_object().GetHints().FindNestedElementByName("PropertyLink")
return True if plink and plink.GetAttribute("group") == "settings" else False
return False
def should_trace_in_create(self, prop, user_can_modify_in_create=True):
if self.should_never_trace(prop): return False
setting = sm.vtkSMTrace.GetActiveTracer().GetPropertiesToTraceOnCreate()
if setting == sm.vtkSMTrace.RECORD_USER_MODIFIED_PROPERTIES and not user_can_modify_in_create:
# In ParaView, user never changes properties in Create. It's only
# afterwords, so skip all properties.
return False
trace_props_with_default_values = True \
if setting == sm.vtkSMTrace.RECORD_ALL_PROPERTIES else False
return (trace_props_with_default_values or not prop.get_object().IsValueDefault())
def should_trace_in_ctor(self, prop):
return False
class PipelineProxyFilter(ProxyFilter):
def should_trace_in_create(self, prop):
return ProxyFilter.should_trace_in_create(self, prop, user_can_modify_in_create=False)
def should_never_trace(self, prop):
"""overridden to avoid hiding "non-gui" properties such as FileName."""
# should we hide properties hidden from panels?
if not prop.get_object().FindDomain("vtkSMFileListDomain") is None:
return False
else:
return ProxyFilter.should_never_trace(self, prop)
def should_trace_in_ctor(self, prop):
if self.should_never_trace(prop): return False
return prop.get_object().IsA("vtkSMInputProperty") or \
prop.get_object().FindDomain("vtkSMFileListDomain") != None
class ExodusIIReaderFilter(PipelineProxyFilter):
def should_never_trace(self, prop):
if PipelineProxyFilter.should_never_trace(self, prop): return True
# Exodus reader has way too many wacky properties tracing them causes
# the reader to segfault. We need to either remove those properties
# entirely or fix them. Until I get a chance to get to the bottom of it,
# I am opting to ignore those properties when tracing.
return prop.get_property_name() in [\
"FilePrefix", "XMLFileName", "FilePattern", "FileRange"]
class RepresentationProxyFilter(PipelineProxyFilter):
def should_trace_in_ctor(self, prop): return False
def should_never_trace(self, prop):
if PipelineProxyFilter.should_never_trace(self, prop): return True
if prop.get_property_name() in ["Input",\
"SelectionCellFieldDataArrayName",\
"SelectionPointFieldDataArrayName"] : return True
return False
class ViewProxyFilter(ProxyFilter):
def should_never_trace(self, prop):
# skip "Representations" property and others.
# The fact that we need to skip so many properties means that we are
# missing something in the design of vtkSMProperties here. We need to
# reclassify properties to cleanly address all its "roles".
if prop.get_property_name() in [\
"ViewTime", "CacheKey", "Representations", "CameraClippingRange"]: return True
return ProxyFilter.should_never_trace(self, prop, hide_gui_hidden=False)
class AnimationProxyFilter(ProxyFilter):
def should_never_trace(self, prop):
if ProxyFilter.should_never_trace(self, prop): return True
if prop.get_property_name() in ["AnimatedProxy", "AnimatedPropertyName",
"AnimatedElement", "AnimatedDomainName"]:
return True
return False
class ExporterProxyFilter(ProxyFilter):
def should_trace_in_ctor(self, prop):
return not self.should_never_trace(prop) and self.should_trace_in_create(prop)
def should_never_trace(self, prop):
if ProxyFilter.should_never_trace(self, prop): return True
if prop.get_property_name() == "FileName" : return True
return False
class WriterProxyFilter(ProxyFilter):
def should_trace_in_ctor(self, prop):
return not self.should_never_trace(prop) and self.should_trace_in_create(prop)
def should_never_trace(self, prop):
if ProxyFilter.should_never_trace(self, prop): return True
if prop.get_property_name() in ["FileName", "Input"] : return True
return False
class TransferFunctionProxyFilter(ProxyFilter):
def should_trace_in_ctor(self, prop): return False
def should_never_trace(self, prop):
if ProxyFilter.should_never_trace(self, prop, hide_gui_hidden=False): return True
if prop.get_property_name() in ["ScalarOpacityFunction"]: return True
return False
class ScalarBarProxyFilter(ProxyFilter):
def should_trace_in_ctor(self, prop): return False
def should_never_trace(self, prop):
# despite being hidden from the panel, these properties should not be
# skipped in trace.
if prop.get_property_name() in ["Position", "Position2", "Orientation"]:
return False
return ProxyFilter.should_never_trace(self, prop)
def SupplementalProxy(cls):
"""This function decorates a ProxyFilter. Designed to be
used for supplemental proxies, so that we can centralize the logic
to decide whether to trace any of the properties on the supplemental
proxies the first time that proxy is accessed."""
setting = sm.vtkSMTrace.GetActiveTracer().GetFullyTraceSupplementalProxies()
if setting: return cls
def should_trace_in_ctor(self, *args, **kwargs):
return False
def should_trace_in_create(self, *args, **kwargs):
return False
cls.should_trace_in_create = should_trace_in_create
cls.should_trace_in_ctor = should_trace_in_ctor
return cls
# ===================================================================================================
# === TraceItem types ==
# TraceItems are units of traceable actions triggerred by the application using vtkSMTrace
# ===================================================================================================
class TraceItem(object):
def __init__(self):
pass
def finalize(self):
pass
class NestableTraceItem(TraceItem):
"""Base class for trace item that can be nested i.e.
can trace when some other trace item is active."""
pass
class BookkeepingItem(NestableTraceItem):
"""Base class for trace items that are only used for
book keeping and don't affect the trace itself."""
pass
class RegisterPipelineProxy(TraceItem):
"""This traces the creation of a Pipeline Proxy such as
sources/filters/readers etc."""
def __init__(self, proxy):
TraceItem.__init__(self)
self.Proxy = sm._getPyProxy(proxy)
def finalize(self):
pname = Trace.get_registered_name(self.Proxy, "sources")
varname = Trace.get_varname(pname)
accessor = ProxyAccessor(varname, self.Proxy)
ctor = sm._make_name_valid(self.Proxy.GetXMLLabel())
trace = TraceOutput()
trace.append("# create a new '%s'" % self.Proxy.GetXMLLabel())
filter_type = ExodusIIReaderFilter() \
if isinstance(self.Proxy, sm.ExodusIIReaderProxy) else PipelineProxyFilter()
trace.append(accessor.trace_ctor(ctor, filter_type))
Trace.Output.append_separated(trace.raw_data())
TraceItem.finalize(self)
class Delete(TraceItem):
"""This traces the deletion of a Pipeline proxy"""
def __init__(self, proxy):
TraceItem.__init__(self)
proxy = sm._getPyProxy(proxy)
accessor = Trace.get_accessor(proxy)
Trace.Output.append_separated([\
"# destroy %s" % (accessor),
"Delete(%s)" % (accessor),
"del %s" % accessor])
accessor.finalize()
del accessor
import gc
gc.collect()
class CleanupAccessor(BookkeepingItem):
def __init__(self, proxy):
self.Proxy = sm._getPyProxy(proxy)
def finalize(self):
if Trace.has_accessor(self.Proxy):
accessor = Trace.get_accessor(self.Proxy)
accessor.finalize()
del accessor
import gc
gc.collect()
class PropertiesModified(NestableTraceItem):
"""Traces properties modified on a specific proxy."""
def __init__(self, proxy, comment=None):
TraceItem.__init__(self)
proxy = sm._getPyProxy(proxy)
self.ProxyAccessor = Trace.get_accessor(proxy)
self.MTime = vtkTimeStamp()
self.MTime.Modified()
self.Comment = "#%s" % comment if not comment is None else \
"# Properties modified on %s" % str(self.ProxyAccessor)
try:
# Hack to track ScalarOpacityFunction property changes since that proxy
# is shown on the same pqProxyWidget as the ColorTransferFunction proxy --
# which is non-standard.
if proxy.ScalarOpacityFunction:
self.ScalarOpacityFunctionHack = PropertiesModified(proxy.ScalarOpacityFunction)
except: pass
def finalize(self):
props = self.ProxyAccessor.get_properties()
props_to_trace = [k for k in props if self.MTime.GetMTime() < k.get_object().GetMTime()]
if props_to_trace:
Trace.Output.append_separated([
self.Comment,
self.ProxyAccessor.trace_properties(props_to_trace, in_ctor=False)])
# also handle properties on values for properties with ProxyListDomain.
for prop in [k for k in props if k.has_proxy_list_domain()]:
val = prop.get_property_value()
if val:
valaccessor = Trace.get_accessor(val)
props = valaccessor.get_properties()
props_to_trace = [k for k in props if self.MTime.GetMTime() < k.get_object().GetMTime()]
if props_to_trace:
Trace.Output.append_separated([
"# Properties modified on %s" % valaccessor,
valaccessor.trace_properties(props_to_trace, in_ctor=False)])
TraceItem.finalize(self)
try:
self.ScalarOpacityFunctionHack.finalize()
del self.ScalarOpacityFunctionHack
except AttributeError: pass
class Show(TraceItem):
"""Traces Show"""
def __init__(self, producer, port, view, display, comment=None):
TraceItem.__init__(self)
producer = sm._getPyProxy(producer)
view = sm._getPyProxy(view)
display = sm._getPyProxy(display)
self.ProducerAccessor = Trace.get_accessor(producer)
self.ViewAccessor = Trace.get_accessor(view)
self.OutputPort = port
self.Display = display
self.Comment = comment
def finalize(self):
display = self.Display
if not Trace.has_accessor(display):
pname = "%sDisplay" % self.ProducerAccessor
accessor = ProxyAccessor(Trace.get_varname(pname), display)
trace_ctor = True
else:
accessor = Trace.get_accessor(display)
trace_ctor = False
port = self.OutputPort
output = TraceOutput()
if not self.Comment is None:
output.append("# %s" % self.Comment)
else:
output.append("# show data in view")
if port > 0:
output.append("%s = Show(OutputPort(%s, %d), %s)" % \
(str(accessor), str(self.ProducerAccessor), port, str(self.ViewAccessor)))
else:
output.append("%s = Show(%s, %s)" % \
(str(accessor), str(self.ProducerAccessor), str(self.ViewAccessor)))
if trace_ctor:
# Now trace default values.
ctor_trace = accessor.trace_ctor(None, RepresentationProxyFilter())
if ctor_trace:
output.append("# trace defaults for the display properties.")
output.append(ctor_trace)
Trace.Output.append_separated(output.raw_data())
TraceItem.finalize(self)
class Hide(TraceItem):
"""Traces Hide"""
def __init__(self, producer, port, view):
TraceItem.__init__(self)
producer = sm._getPyProxy(producer)
view = sm._getPyProxy(view)
producerAccessor = Trace.get_accessor(producer)
viewAccessor = Trace.get_accessor(view)
Trace.Output.append_separated([\
"# hide data in view",
"Hide(%s, %s)" % (str(producerAccessor), str(viewAccessor)) if port == 0 else \
"Hide(OutputPort(%s, %d), %s)" % (str(producerAccessor), port, str(viewAccessor))])
class SetScalarColoring(TraceItem):
"""Trace vtkSMPVRepresentationProxy.SetScalarColoring"""
def __init__(self, display, arrayname, attribute_type):
TraceItem.__init__(self)
self.Display = sm._getPyProxy(display)
self.ArrayName = arrayname
self.AttributeType = attribute_type
def finalize(self):
TraceItem.finalize(self)
if self.ArrayName:
Trace.Output.append_separated([\
"# set scalar coloring",
"ColorBy(%s, ('%s', '%s'))" % (\
str(Trace.get_accessor(self.Display)),
sm.GetAssociationAsString(self.AttributeType),
self.ArrayName)])
else:
Trace.Output.append_separated([\
"# turn off scalar coloring",
"ColorBy(%s, None)" % str(Trace.get_accessor(self.Display))])
class RegisterViewProxy(TraceItem):
"""Traces creation of a new view (vtkSMParaViewPipelineController::RegisterViewProxy)."""
def __init__(self, proxy):
TraceItem.__init__(self)
self.Proxy = sm._getPyProxy(proxy)
assert not self.Proxy is None
def finalize(self):
pname = Trace.get_registered_name(self.Proxy, "views")
varname = Trace.get_varname(pname)
accessor = ProxyAccessor(varname, self.Proxy)
# unlike for filters/sources, for views the CreateView function still takes the
# xml name for the view, not its label.
ctor_args = "'%s'" % self.Proxy.GetXMLName()
trace = TraceOutput()
trace.append("# Create a new '%s'" % self.Proxy.GetXMLLabel())
filter = ViewProxyFilter()
trace.append(accessor.trace_ctor("CreateView", filter, ctor_args))
Trace.Output.append_separated(trace.raw_data())
viewSizeAccessor = accessor.get_property("ViewSize")
if viewSizeAccessor and not filter.should_trace_in_create(viewSizeAccessor):
# trace view size, if present. We trace this commented out so
# that the playback in the GUI doesn't cause issues.
Trace.Output.append([\
"# uncomment following to set a specific view size",
"# %s" % viewSizeAccessor.get_property_trace(in_ctor=False)])
# we assume views don't have proxy list domains for now, and ignore tracing them.
TraceItem.finalize(self)
class ExportView(TraceItem):
def __init__(self, view, exporter, filename):
TraceItem.__init__(self)
view = sm._getPyProxy(view)
exporter = sm._getPyProxy(exporter)
viewAccessor = Trace.get_accessor(view)
exporterAccessor = ProxyAccessor("temporaryExporter", exporter)
trace = TraceOutput()
trace.append("# export view")
trace.append(\
exporterAccessor.trace_ctor("ExportView", ExporterProxyFilter(),
ctor_args="'%s', view=%s" % (filename, viewAccessor),
skip_assignment=True))
exporterAccessor.finalize() # so that it will get deleted
del exporterAccessor
Trace.Output.append_separated(trace.raw_data())
class SaveData(TraceItem):
def __init__(self, writer, filename, source, port):
TraceItem.__init__(self)
source = sm._getPyProxy(source, port)
sourceAccessor = Trace.get_accessor(source)
writer = sm._getPyProxy(writer)
writerAccessor = ProxyAccessor("temporaryWriter", writer)
if port > 0:
ctor_args_1 = "OutputPort(%s, %d)" % (sourceAccessor, port)
else:
ctor_args_1 = "%s" % sourceAccessor
trace = TraceOutput()
trace.append("# save data")
trace.append(\
writerAccessor.trace_ctor("SaveData", WriterProxyFilter(),
ctor_args="'%s', proxy=%s" % (filename, ctor_args_1),
skip_assignment=True))
writerAccessor.finalize() # so that it will get deleted.
del writerAccessor
del writer
Trace.Output.append_separated(trace.raw_data())
class EnsureLayout(TraceItem):
def __init__(self, layout):
TraceItem.__init__(self)
layout = sm._getPyProxy(layout)
accessor = Trace.get_accessor(layout)
class RegisterLayoutProxy(TraceItem):
def __init__(self, layout):
TraceItem.__init__(self)
self.Layout = sm._getPyProxy(layout)
def finalize(self):
pname = Trace.get_registered_name(self.Layout, "layouts")
accessor = ProxyAccessor(Trace.get_varname(pname), self.Layout)
Trace.Output.append_separated([\
"# create new layout object",
"%s = CreateLayout()" % accessor])
TraceItem.finalize(self)
class CreateAnimationTrack(TraceItem):
# FIXME: animation tracing support in general needs to be revamped after moving
# animation control logic to the server manager from Qt layer.
def __init__(self, cue):
TraceItem.__init__(self)
self.Cue = sm._getPyProxy(cue)
def finalize(self):
TraceItem.finalize(self)
# We let Trace create an accessor for the cue. We will then simply log the
# default property values.
accessor = Trace.get_accessor(self.Cue)
trace = TraceOutput()
trace.append("# create keyframes for this animation track")
# Create accessors for each of the animation key frames.
for keyframeProxy in self.Cue.KeyFrames:
pname = Trace.get_registered_name(keyframeProxy, "animation")
kfaccessor = ProxyAccessor(Trace.get_varname(pname), keyframeProxy)
ctor = sm._make_name_valid(keyframeProxy.GetXMLLabel())
trace.append_separated("# create a key frame")
trace.append(kfaccessor.trace_ctor(ctor, AnimationProxyFilter()))
# Now trace properties on the cue.
trace.append_separated("# initialize the animation track")
trace.append(accessor.trace_ctor(None, AnimationProxyFilter()))
Trace.Output.append_separated(trace.raw_data())
class RenameProxy(TraceItem):
"Trace renaming of a source proxy."
def __init__(self, proxy):
TraceItem.__init__(self)
proxy = sm._getPyProxy(proxy)
if Trace.get_registered_name(proxy, "sources"):
self.Accessor = Trace.get_accessor(proxy)
self.Proxy = proxy
else:
raise Untraceable("Only source proxy renames are traced.")
def finalize(self):
if self.Accessor:
newname = Trace.get_registered_name(self.Proxy, "sources")
Trace.Output.append_separated([\
"# rename source object",
"RenameSource('%s', %s)" % (newname, self.Accessor)])
TraceItem.finalize(self)
class SetCurrentProxy(TraceItem):
"""Traces change in active view/source etc."""
def __init__(self, selmodel, proxy, command):
TraceItem.__init__(self)
if proxy and proxy.IsA("vtkSMOutputPort"):
# FIXME: need to handle port number.
proxy = sm._getPyProxy(proxy.GetSourceProxy())
else:
proxy = sm._getPyProxy(proxy)
accessor = Trace.get_accessor(proxy)
pxm = selmodel.GetSessionProxyManager()
if selmodel is pxm.GetSelectionModel("ActiveView"):
Trace.Output.append_separated([\
"# set active view",
"SetActiveView(%s)" % accessor])
elif selmodel is pxm.GetSelectionModel("ActiveSources"):
Trace.Output.append_separated([\
"# set active source",
"SetActiveSource(%s)" % accessor])
else:
raise Untraceable("Unknown selection model")
class CallMethod(TraceItem):
def __init__(self, proxy, methodname, *args, **kwargs):
TraceItem.__init__(self)
trace = self.get_trace(proxy, methodname, args, kwargs)
if trace:
Trace.Output.append_separated(trace)
def get_trace(self, proxy, methodname, args, kwargs):
to_trace = []
try:
to_trace.append("# " + kwargs["comment"])
del kwargs["comment"]
except KeyError:
pass
accessor = Trace.get_accessor(sm._getPyProxy(proxy))
args = [str(CallMethod.marshall(x)) for x in args]
args += ["%s=%s" % (key, CallMethod.marshall(val)) for key, val in kwargs.iteritems()]
to_trace.append("%s.%s(%s)" % (accessor, methodname, ", ".join(args)))
return to_trace
@classmethod
def marshall(cls, x):
try:
if x.IsA("vtkSMProxy"):
return Trace.get_accessor(sm._getPyProxy(x))
except AttributeError:
return "'%s'" % x if type(x) == str else x
def _bind_on_event(ref):
def _callback(obj, string):
ref().on_event(obj, string)
return _callback
class CallMethodIfPropertiesModified(CallMethod):
"""Similar to CallMethod, except that the trace will get logged only
if the proxy fires PropertiesModified event before the trace-item is
finalized."""
def __init__(self, proxy, methodname, *args, **kwargs):
self.proxy = proxy
self.methodname = methodname
self.args = args
self.kwargs = kwargs
self.tag = proxy.AddObserver("PropertyModifiedEvent", _bind_on_event(weakref.ref(self)))
self.modified = False
def on_event(self, obj, string):
self.modified = True
def finalize(self):
self.proxy.RemoveObserver(self.tag)
self.tag = None
if self.modified:
trace = self.get_trace(self.proxy, self.methodname, self.args, self.kwargs)
Trace.Output.append_separated(trace)
CallMethod.finalize(self)
def __del__(self):
if self.proxy and self.tag:
self.proxy.RemoveObserver(self.tag)
class CallFunction(TraceItem):
def __init__(self, functionname, *args, **kwargs):
TraceItem.__init__(self)
to_trace = []
try:
to_trace.append("# " + kwargs["comment"])
del kwargs["comment"]
except KeyError:
pass
args = [str(CallMethod.marshall(x)) for x in args]
args += ["%s=%s" % (key, CallMethod.marshall(val)) for key, val in kwargs.iteritems()]
to_trace.append("%s(%s)" % (functionname, ", ".join(args)))
Trace.Output.append_separated(to_trace)
class SaveCameras(BookkeepingItem):
"""This is used to request recording of cameras in trace"""
# This is a little hackish at this point. We'll figure something cleaner out
# in time.
def __init__(self, proxy=None):
trace = self.get_trace(proxy)
if trace:
Trace.Output.append_separated(trace)
@classmethod
def get_trace(cls, proxy=None):
trace = TraceOutput()
proxy = sm._getPyProxy(proxy)
if proxy is None:
views = [x for x in simple.GetViews() if Trace.has_accessor(x)]
for v in views:
trace.append_separated(cls.get_trace(proxy=v))
elif proxy.IsA("vtkSMViewLayoutProxy"):
views = simple.GetViewsInLayout(proxy)
for v in views:
trace.append_separated(cls.get_trace(proxy=v))
elif proxy.IsA("vtkSMViewProxy"):
if proxy.GetProperty("CameraPosition"):
accessor = Trace.get_accessor(proxy)
trace.append("# current camera placement for %s" % accessor)
prop_names = ["CameraPosition", "CameraFocalPoint",
"CameraViewUp", "CameraViewAngle",
"CameraParallelScale", "CameraParallelProjection",
"EyeAngle", "InteractionMode"]
props = [x for x in accessor.get_properties() \
if x.get_property_name() in prop_names and \
not x.get_object().IsValueDefault()]
if props:
trace.append(accessor.trace_properties(props, in_ctor=False))
else: pass # non-camera views
elif proxy.IsA("vtkSMAnimationSceneProxy"):
for view in proxy.GetProperty("ViewModules"):
trace.append_separated(cls.get_trace(proxy=view))
else:
raise Untraceable("Invalid argument type %r"% proxy)
return trace.raw_data()
# __ActiveTraceItems is simply used to keep track of items that are currently
# active to avoid non-nestable trace items from being created when previous
# items are active.
__ActiveTraceItems = []
def _create_trace_item_internal(key, args=None, kwargs=None):
global __ActiveTraceItems
# trim __ActiveTraceItems to remove None references.
__ActiveTraceItems = [x for x in __ActiveTraceItems if not x() is None]
g = globals()
if g.has_key(key) and callable(g[key]):
args = args if args else []
kwargs = kwargs if kwargs else {}
traceitemtype = g[key]
if len(__ActiveTraceItems) == 0 or issubclass(traceitemtype, NestableTraceItem):
instance = traceitemtype(*args, **kwargs)
if not issubclass(traceitemtype, BookkeepingItem):
__ActiveTraceItems.append(weakref.ref(instance))
return instance
raise Untraceable("Non-nestable trace item. Ignoring in current context.")
raise Untraceable("Unknown trace item type %s" % key)
#print "Hello again", key, args
#return A(key)
def _start_trace_internal():
"""**internal** starts tracing. Called by vtkSMTrace::StartTrace()."""
Trace.reset()
Trace.Output.append([\
"#### import the simple module from the paraview",
"from paraview.simple import *",
"#### disable automatic camera reset on 'Show'",
"paraview.simple._DisableFirstRenderCameraReset()"])
return True
def _stop_trace_internal():
"""**internal** stops trace. Called by vtkSMTrace::StopTrace()."""
camera_trace = SaveCameras.get_trace(None)
if camera_trace:
Trace.Output.append_separated(\
"#### saving camera placements for all active views")
Trace.Output.append_separated(camera_trace)
Trace.Output.append_separated([\
"#### uncomment the following to render all views",
"# RenderAllViews()",
"# alternatively, if you want to write images, you can use SaveScreenshot(...)."
])
trace = str(Trace.Output)
Trace.reset()
return trace
#------------------------------------------------------------------------------
# Public methods
#------------------------------------------------------------------------------
def start_trace():
"""Starting tracing. On successful start, will return a vtkSMTrace object.
One can set tracing options on it to control how the tracing. If tracing was
already started, calling this contine with the same trace."""
return sm.vtkSMTrace.StartTrace()
def stop_trace():
"""Stops the trace and returns the generated trace output string."""
return sm.vtkSMTrace.StopTrace()
def get_current_trace_output(raw=False):
"""Returns the trace generated so far in the tracing process."""
return str(Trace.Output) if not raw else Trace.Output.raw_data()
def get_current_trace_output_and_reset(raw=False):
"""Equivalent to calling::
get_current_trace_output(raw)
reset_trace_output()
"""
output = get_current_trace_output(raw)
reset_trace_output()
return output
def reset_trace_output():
"""Resets the trace output without resetting the tracing datastructures
themselves."""
Trace.Output.reset()
#------------------------------------------------------------------------------
if __name__ == "__main__":
print "Running test"
start_trace()
s = simple.Sphere()
c = simple.PlotOverLine()
simple.Show()
print "***** TRACE RESULT *****"
print stop_trace()
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Wrapping/Python/paraview/smtrace.py
|
Python
|
gpl-3.0
| 54,107
|
[
"ParaView",
"VTK"
] |
43d949971bc250fb4bdc974a174096c4115954ab1401e6a4173a60eab30fb78b
|
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the LICENSE.txt file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
"""Pyvot - Pythonic interface for data exploration in Excel
The user-level API for the `xl` package follows. For interactive use, consider running the :ref:`interactive shell <interactive>`::
python -m xl.shell
**Managing Excel workbooks**:
- :class:`xl.Workbook() <xl.sheet.Workbook>` opens a new workbook
- xl.Workbook("filename") attaches to an existing workbook, or opens it
- :func:`xl.workbooks() <xl.tools.workbooks>` returns a Workbook for each that is currently open
**Excel Ranges**:
- :class:`xl.Range <xl.range.Range>` is the base type for a contiguous range of Excel cells.
- :func:`xl.get() <xl.tools.get>` / :meth:`Workbook.get <xl.sheet.Workbook.get>` / etc. return Ranges; namely, subclasses such as
:class:`xl.RowVector <xl.range.RowVector>`, :class:`xl.ColumnVector <xl.range.ColumnVector>`,
:class:`xl.Matrix <xl.range.Matrix>`, or :class:`xl.Scalar <xl.range.Scalar>`
- :meth:`xl.Range.get` / :meth:`xl.Range.set` allow reading from / writing to Excel
**Tools**:
- :func:`xl.map <xl.tools.map>` / :func:`xl.apply <xl.tools.apply>` / :func:`xl.filter <xl.tools.filter>` operate
like their Python counterparts, but read and write from an Excel workbook
``from xl import *`` imports :func:`xlmap`, etc. instead, to avoid overriding builtins.
- :func:`xl.join() <xl.tools.join>` allows joining two Excel tables by a pair of key columns
- :func:`xl.get() <xl.tools.get>` fetches a Range for a table column (by column name), named Excel range, or for an
Excel address (ex. A1:B1). It attempts to guess the active Workbook, and begins looking in the active sheet.
See also :meth:`Workbook.get <xl.sheet.Workbook.get>`
- :func:`xl.view() <xl.tools.view>` splats a list of Python values to an empty column in Excel
- :func:`xl.selected_range() <xl.tools.selected_range>` / :func:`xl.selected_value() <xl.tools.selected_value>`
provide the active sheet's selection"""
try:
__import__('win32com')
except ImportError as e:
import ctypes
import sys
is_64bit = ctypes.sizeof(ctypes.c_voidp) > 4
arch_str = "64-bit" if is_64bit else "32-bit"
ver = "%d.%d" % (sys.version_info.major, sys.version_info.minor)
raise Exception("pywin32 does not appear to be installed. Visit http://sourceforge.net/projects/pywin32/ and download "
"build 216 or above for Python %s (%s)" % (ver, arch_str), e)
from .version import __version__
# Conventions:
# - prefix excel COM objectss with "xl". Apply to field and method names.
# Design conventions:
# - Very low activation energy for users.
# Layer between "precise (dumb)" operations (which are often not useful) and "guess user intent (smart)" operations
# (which can be much more useful).
# Users start with "smart" general operations and work towards the precise ones.
# - Global functions user "current" workbook, which iterates all sheets.
from .range import Range, Vector, Scalar, RowVector, ColumnVector, Matrix, ExcelRangeError
from .cache import CacheManager, enable_caching, cache_result
from .tools import get, view, join, map, apply, filter, selected_range, selected_value, workbooks
from .sheet import Workbook
# We want to allow 'from xl import *' without clobbering builtin map / apply / filter.
# We define these aliases, and exclude map / apply / filter from __all__.
# This way xl.map works, but 'from xl import *' imports xlmap instead
xlmap, xlapply, xlfilter = map, apply, filter
__all__ = ['Range', 'Vector', 'Scalar', 'RowVector', 'ColumnVector', 'Matrix', 'ExcelRangeError',
'CacheManager', 'enable_caching', 'cache_result',
'get', 'view', 'join', 'selected_range', 'selected_value', 'workbooks',
'xlmap', 'xlapply', 'xlfilter', # We omit map / apply / filter from __all__ but include these. See above
'Workbook']
|
msunardi/PTVS
|
Python/Product/Pyvot/Pyvot/xl/__init__.py
|
Python
|
apache-2.0
| 4,494
|
[
"VisIt"
] |
513fe4a2c6b766ed116bdc9c54fcb8f982c50256411a58109b757724f802d10c
|
#!/usr/bin/python
#encoding:utf-8
'''
client
'''
import socket, sys, os
import time, json
host = socket.gethostname() # 这里获取的是本地,具体视情况而定
port = 7878
BUF_SIZE = 4096
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, e:
print "Error creating socket: %s" % e
sys.exit()
try:
remote_ip = socket.gethostbyname(host)
except socket.gaierror:
print "Hostname couldn't be resolved. Exciting"
sys.exit()
try:
client.connect((remote_ip, port))
client.setblocking(0) # set the socket is not blocking
print "Socket connected to %s on ip %s" % (host, remote_ip)
except socket.gaierror, e: #address related error
print "connected to server error%s" % e
sys.exit()
# beat_count = 0
#send heart_beat
while True:
# beat_count += 1 #heart_beat time
host_name = socket.gethostname()
data_to_server = {'ip': socket.gethostbyname(host_name), 'status': 'alive', 'pid': os.getpid()}
data_dumped = json.dumps(data_to_server)
try:
client.sendall(data_dumped)
except socket.error:
print "Send failed!!"
sys.exit()
print 'I - ', os.getpid(), '- am alive.'
time.sleep(3)
client.close()
|
hugoxia/Python
|
heartbeat/client.py
|
Python
|
mit
| 1,240
|
[
"exciting"
] |
56ce8360c3fc13eb527f292c8e444d8a75877e54e1c3b46a682245a81f9246c9
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, John D. Chodera
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
"""Load an md.Topology from tripos mol2 files.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
import itertools
import re
from mdtraj.utils import import_
from mdtraj.utils.six.moves import cStringIO as StringIO
from mdtraj.formats.registry import FormatRegistry
__all__ = ['load_mol2', "mol2_to_dataframes"]
@FormatRegistry.register_loader('.mol2')
def load_mol2(filename):
"""Load a TRIPOS mol2 file from disk.
Parameters
----------
filename : str
Path to the prmtop file on disk.
Returns
-------
traj : md.Trajectory
The resulting topology, as an md.Topology object.
Notes
-----
This function should work on GAFF and sybyl style MOL2 files, but has
been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
The elements are guessed using GAFF atom types or via the atype string.
Examples
--------
>>> traj = md.load_mol2('mysystem.mol2')
"""
from mdtraj.core.trajectory import Trajectory
from mdtraj.core.topology import Topology, Single, Double, Triple, Aromatic, Amide
atoms, bonds = mol2_to_dataframes(filename)
atoms_mdtraj = atoms[["name", "resName"]].copy()
atoms_mdtraj["serial"] = atoms.index
#Figure out 1 letter element names
# IF this is a GAFF mol2, this line should work without issues
atoms_mdtraj["element"] = atoms.atype.map(gaff_elements)
# If this is a sybyl mol2, there should be NAN (null) values
if atoms_mdtraj.element.isnull().any():
# If this is a sybyl mol2, I think this works generally.
# Argument x is being passed as a list with only one element.
def to_element(x):
if isinstance(x, (list, tuple)):
assert len(x) == 1
x = x[0]
if '.' in x: # orbital-hybridizations in SYBL
return x.split('.')[0]
try:
# check if we can convert the whole str to an Element,
# if not, we only pass the first letter.
from mdtraj.core.element import Element
Element.getBySymbol(x)
except KeyError:
return x[0]
return x
atoms_mdtraj["element"] = atoms.atype.apply(to_element)
atoms_mdtraj["resSeq"] = np.ones(len(atoms), 'int')
atoms_mdtraj["chainID"] = np.ones(len(atoms), 'int')
bond_type_map = {
'1': Single,
'2': Double,
'3': Triple,
'am': Amide,
'ar': Aromatic
}
if bonds is not None:
bonds_mdtraj = bonds[["id0", "id1"]].values
offset = bonds_mdtraj.min() # Should this just be 1???
bonds_mdtraj -= offset
# Create the bond augment information
n_bonds = bonds_mdtraj.shape[0]
bond_augment = np.zeros([n_bonds, 2], dtype=float)
# Add bond type information
bond_augment[:, 0] = [float(bond_type_map[str(bond_value)]) for bond_value in bonds["bond_type"].values]
# Add Bond "order" information, this is not known from Mol2 files
bond_augment[:, 1] = [0.0 for _ in range(n_bonds)]
# Augment array, dtype is cast to minimal representation of float
bonds_mdtraj = np.append(bonds_mdtraj, bond_augment, axis=-1)
else:
bonds_mdtraj = None
top = Topology.from_dataframe(atoms_mdtraj, bonds_mdtraj)
xyzlist = np.array([atoms[["x", "y", "z"]].values])
xyzlist /= 10.0 # Convert from angstrom to nanometer
traj = Trajectory(xyzlist, top)
return traj
def mol2_to_dataframes(filename):
"""Convert a GAFF (or sybyl) mol2 file to a pair of pandas dataframes.
Parameters
----------
filename : str
Name of mol2 filename
Returns
-------
atoms_frame : pd.DataFrame
DataFrame containing atom information
bonds_frame : pd.DataFrame
DataFrame containing bond information
Notes
-----
These dataframes may contain force field information as well as the
information necessary for constructing the coordinates and molecular
topology. This function has been tested for GAFF and sybyl-style
mol2 files but has been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
See Also
--------
If you just need the coordinates and bonds, use load_mol2(filename)
to get a Trajectory object.
"""
pd = import_('pandas')
with open(filename) as f:
data = dict((key, list(grp)) for key, grp in itertools.groupby(f, _parse_mol2_sections))
# Mol2 can have "status bits" at the end of the bond lines. We don't care
# about these, but they interfere with using pd_read_table because it looks
# like one line has too many columns. So we just regex out the offending
# text.
status_bit_regex = r"BACKBONE|DICT|INTERRES|\|"
data["@<TRIPOS>BOND\n"] = [re.sub(status_bit_regex, lambda _: "", s)
for s in data["@<TRIPOS>BOND\n"]]
if len(data["@<TRIPOS>BOND\n"]) > 1:
csv = StringIO()
csv.writelines(data["@<TRIPOS>BOND\n"][1:])
csv.seek(0)
bonds_frame = pd.read_table(csv, names=["bond_id", "id0", "id1", "bond_type"],
index_col=0, header=None, sep="\s+", engine='python')
else:
bonds_frame = None
csv = StringIO()
csv.writelines(data["@<TRIPOS>ATOM\n"][1:])
csv.seek(0)
atoms_frame = pd.read_csv(csv, sep="\s+", engine='python', header=None)
ncols = atoms_frame.shape[1]
names=["serial", "name", "x", "y", "z", "atype", "code", "resName", "charge", "status"]
atoms_frame.columns = names[:ncols]
return atoms_frame, bonds_frame
def _parse_mol2_sections(x):
"""Helper function for parsing a section in a MOL2 file."""
if x.startswith('@<TRIPOS>'):
_parse_mol2_sections.key = x
return _parse_mol2_sections.key
gaff_elements = {
'br': 'Br',
'c': 'C',
'c1': 'C',
'c2': 'C',
'c3': 'C',
'ca': 'C',
'cc': 'C',
'cd': 'C',
'ce': 'C',
'cf': 'C',
'cg': 'C',
'ch': 'C',
'cl': 'Cl',
'cp': 'C',
'cq': 'C',
'cu': 'C',
'cv': 'C',
'cx': 'C',
'cy': 'C',
'cz': 'C',
'f': 'F',
'h1': 'H',
'h2': 'H',
'h3': 'H',
'h4': 'H',
'h5': 'H',
'ha': 'H',
'hc': 'H',
'hn': 'H',
'ho': 'H',
'hp': 'H',
'hs': 'H',
'hw': 'H',
'hx': 'H',
'i': 'I',
'n': 'N',
'n1': 'N',
'n2': 'N',
'n3': 'N',
'n4': 'N',
'na': 'N',
'nb': 'N',
'nc': 'N',
'nd': 'N',
'ne': 'N',
'nf': 'N',
'nh': 'N',
'no': 'N',
'o': 'O',
'oh': 'O',
'os': 'O',
'ow': 'O',
'p2': 'P',
'p3': 'P',
'p4': 'P',
'p5': 'P',
'pb': 'P',
'px': 'P',
'py': 'P',
's': 'S',
's2': 'S',
's4': 'S',
's6': 'S',
'sh': 'S',
'ss': 'S',
'sx': 'S',
'sy': 'S'}
|
jchodera/mdtraj
|
mdtraj/formats/mol2.py
|
Python
|
lgpl-2.1
| 9,458
|
[
"MDTraj",
"OpenMM"
] |
50c23afc52a7f127d0cd5281db9cb8071bd5eea07efee778c3f0656fc7dc806c
|
import itertools
import math
import networkx as nx
import re
from collections import defaultdict
from django.db import connection
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view
from catmaid import state
from catmaid.models import UserRole, Treenode, ClassInstance, \
TreenodeConnector, Location
from catmaid.control.authentication import requires_user_role, \
can_edit_class_instance_or_fail, can_edit_or_fail
from catmaid.control.common import get_relation_to_id_map, \
get_class_to_id_map, insert_into_log, _create_relation, get_request_list
from catmaid.control.neuron import _delete_if_empty
from catmaid.control.node import _fetch_location, _fetch_locations
from catmaid.control.link import create_connector_link
from catmaid.util import Point3D, is_collinear
def can_edit_treenode_or_fail(user, project_id, treenode_id):
""" Tests if a user has permissions to edit the neuron which the skeleton of
the treenode models."""
info = _treenode_info(project_id, treenode_id)
return can_edit_class_instance_or_fail(user, info['neuron_id'], 'neuron')
def can_edit_skeleton_or_fail(user, project_id, skeleton_id, model_of_relation_id):
"""Test if a user has permission to edit a neuron modeled by a skeleton."""
cursor = connection.cursor()
cursor.execute("""
SELECT
ci2.id as neuron_id
FROM
class_instance ci,
class_instance ci2,
class_instance_class_instance cici
WHERE ci.project_id = %s
AND ci.id = %s
AND ci.id = cici.class_instance_a
AND ci2.id = cici.class_instance_b
AND cici.relation_id = %s
""", (project_id, skeleton_id, model_of_relation_id))
if cursor.rowcount == 0:
raise ValueError('No neuron modeled by skeleton %s' % skeleton_id)
neuron_id = cursor.fetchone()[0]
return can_edit_class_instance_or_fail(user, neuron_id, 'neuron')
@requires_user_role(UserRole.Annotate)
def create_treenode(request, project_id=None):
"""
Add a new treenode to the database
----------------------------------
1. Add new treenode for a given skeleton id. Parent should not be empty.
return: new treenode id
If the parent's skeleton has a single node and belongs to the
'Isolated synaptic terminals' group, then reassign ownership
of the skeleton and the neuron to the user. The treenode remains
property of the original user who created it.
2. Add new treenode (root) and create a new skeleton (maybe for a given
neuron) return: new treenode id and skeleton id.
If a neuron id is given, use that one to create the skeleton as a model of
it.
"""
params = {}
float_values = {
'x': 0,
'y': 0,
'z': 0,
'radius': 0}
int_values = {
'confidence': 0,
'useneuron': -1,
'parent_id': -1}
string_values = {}
for p in float_values.keys():
params[p] = float(request.POST.get(p, float_values[p]))
for p in int_values.keys():
params[p] = int(request.POST.get(p, int_values[p]))
for p in string_values.keys():
params[p] = request.POST.get(p, string_values[p])
# Get optional initial links to connectors, expect each entry to be a list
# of connector ID, relation ID and confidence.
links = get_request_list(request.POST, 'links', [], map_fn=int)
# Make sure the back-end is in the expected state if the node should have a
# parent and will therefore become part of another skeleton.
parent_id = int(params['parent_id'])
has_parent = parent_id and parent_id != -1
if has_parent:
state.validate_state(parent_id, request.POST.get('state'),
parent_edittime=has_parent, lock=True)
new_treenode = _create_treenode(project_id, request.user, request.user,
params['x'], params['y'], params['z'], params['radius'],
params['confidence'], params['useneuron'], params['parent_id'],
neuron_name=request.POST.get('neuron_name', None))
# Create all initial links
if links:
created_links = create_connector_link(project_id, request.user.id,
new_treenode.treenode_id, new_treenode.skeleton_id, links)
else:
created_links = []
return JsonResponse({
'treenode_id': new_treenode.treenode_id,
'skeleton_id': new_treenode.skeleton_id,
'edition_time': new_treenode.edition_time,
'parent_edition_time': new_treenode.parent_edition_time,
'created_links': created_links
})
@requires_user_role(UserRole.Annotate)
def insert_treenode(request, project_id=None):
"""
Create a new treenode between two existing nodes. Its creator and
creation_date information will be set to information of child node. No node
will be created, if the node on the edge between the given child and parent
node.
"""
# Use creation time, if part of parameter set
params = {}
float_values = {
'x': 0,
'y': 0,
'z': 0,
'radius': 0
}
int_values = {
'confidence': 0,
'parent_id': -1,
'child_id': -1
}
for p in float_values.keys():
params[p] = float(request.POST.get(p, float_values[p]))
for p in int_values.keys():
params[p] = int(request.POST.get(p, int_values[p]))
# If siblings should be taken over, all children of the parent node will be
# come children of the inserted node. This requires extra state
# information: the child state for the paren.
takeover_child_ids = get_request_list(request.POST,
'takeover_child_ids', None, int)
# Get optional initial links to connectors, expect each entry to be a list
# of connector ID and relation ID.
try:
links = get_request_list(request.POST, 'links', [], int)
except Exception, e:
raise ValueError("Couldn't parse list parameter: {}".format(e))
# Make sure the back-end is in the expected state if the node should have a
# parent and will therefore become part of another skeleton.
parent_id = params.get('parent_id')
child_id = params.get('child_id')
if parent_id not in (-1, None):
s = request.POST.get('state')
# Testing egular edge insertion is assumed if a child ID is provided
partial_child_checks = [] if child_id in (-1, None) else [child_id]
if takeover_child_ids:
partial_child_checks.extend(takeover_child_ids)
state.validate_state(parent_id, s, node=True,
children=partial_child_checks or False, lock=True),
# Find child and parent of new treenode
child = Treenode.objects.get(pk=params['child_id'])
parent = Treenode.objects.get(pk=params['parent_id'])
# Make sure both nodes are actually child and parent
if not child.parent == parent:
raise ValueError('The provided nodes need to be child and parent')
# Make sure the requested location for the new node is on the edge between
# both existing nodes if the user has no edit permissions on the neuron.
try:
can_edit_treenode_or_fail(request.user, project_id, parent.id)
except:
child_loc = Point3D(child.location_x, child.location_y, child.location_z)
parent_loc = Point3D(parent.location_x, parent.location_y, parent.location_z)
new_node_loc = Point3D(params['x'], params['y'], params['z'])
if not is_collinear(child_loc, parent_loc, new_node_loc, True):
raise ValueError('New node location has to be between child and parent')
# Use creator and creation time for neighboring node that was created last.
if child.creation_time < parent.creation_time:
user, time = parent.user, parent.creation_time
else:
user, time = child.user, child.creation_time
# Create new treenode
new_treenode = _create_treenode(project_id,
user, request.user, params['x'], params['y'], params['z'],
params['radius'], params['confidence'], -1, params['parent_id'], time)
# Update parent of child to new treenode, do this in raw SQL to also get the
# updated edition time Update also takeover children
cursor = connection.cursor()
params = [new_treenode.treenode_id, child.id]
if takeover_child_ids:
params.extend(takeover_child_ids)
child_template = ",".join(("%s",) * (len(takeover_child_ids) + 1))
else:
child_template = "%s"
cursor.execute("""
UPDATE treenode SET parent_id = %s
WHERE id IN ({})
RETURNING id, edition_time
""".format(child_template), params)
result = cursor.fetchall()
if not result or (len(params) - 1) != len(result):
raise ValueError("Couldn't update parent of inserted node's child: " + child.id)
child_edition_times = [[k,v] for k,v in result]
# Create all initial links
if links:
created_links = create_connector_link(project_id, request.user.id,
new_treenode.treenode_id, new_treenode.skeleton_id, links)
else:
created_links = []
return JsonResponse({
'treenode_id': new_treenode.treenode_id,
'skeleton_id': new_treenode.skeleton_id,
'edition_time': new_treenode.edition_time,
'parent_edition_time': new_treenode.parent_edition_time,
'child_edition_times': child_edition_times,
'created_links': created_links
})
class NewTreenode(object):
"""Represent a newly created treenode and all the information that is
returned to the client
"""
def __init__(self, treenode_id, edition_time, skeleton_id,
parent_edition_time):
self.treenode_id = treenode_id
self.edition_time = edition_time
self.skeleton_id = skeleton_id
self.parent_edition_time = parent_edition_time
def _create_treenode(project_id, creator, editor, x, y, z, radius, confidence,
neuron_id, parent_id, creation_time=None, neuron_name=None):
relation_map = get_relation_to_id_map(project_id)
class_map = get_class_to_id_map(project_id)
def insert_new_treenode(parent_id=None, skeleton_id=None):
""" If the parent_id is not None and the skeleton_id of the parent does
not match with the skeleton.id, then the database will throw an error
given that the skeleton_id, being defined as foreign key in the
treenode table, will not meet the being-foreign requirement.
"""
new_treenode = Treenode()
new_treenode.user = creator
new_treenode.editor = editor
new_treenode.project_id = project_id
if creation_time:
new_treenode.creation_time = creation_time
new_treenode.location_x = float(x)
new_treenode.location_y = float(y)
new_treenode.location_z = float(z)
new_treenode.radius = int(radius)
new_treenode.skeleton_id = skeleton_id
new_treenode.confidence = int(confidence)
if parent_id:
new_treenode.parent_id = parent_id
new_treenode.save()
return new_treenode
def relate_neuron_to_skeleton(neuron, skeleton):
return _create_relation(creator, project_id,
relation_map['model_of'], skeleton, neuron)
response_on_error = ''
try:
if -1 != int(parent_id): # A root node and parent node exist
# Select the parent treenode for update to prevent race condition
# updates to its skeleton ID while this node is being created.
cursor = connection.cursor()
cursor.execute('''
SELECT t.skeleton_id, t.edition_time FROM treenode t
WHERE t.id = %s FOR NO KEY UPDATE OF t
''', (parent_id,))
if cursor.rowcount != 1:
raise ValueError('Parent treenode %s does not exist' % parent_id)
parent_node = cursor.fetchone()
parent_skeleton_id = parent_node[0]
parent_edition_time = parent_node[1]
# Raise an Exception if the user doesn't have permission to edit
# the neuron the skeleton of the treenode is modeling.
can_edit_skeleton_or_fail(editor, project_id, parent_skeleton_id,
relation_map['model_of'])
response_on_error = 'Could not insert new treenode!'
new_treenode = insert_new_treenode(parent_id, parent_skeleton_id)
return NewTreenode(new_treenode.id, new_treenode.edition_time,
parent_skeleton_id, parent_edition_time)
else:
# No parent node: We must create a new root node, which needs a
# skeleton and a neuron to belong to.
response_on_error = 'Could not insert new treenode instance!'
new_skeleton = ClassInstance()
new_skeleton.user = creator
new_skeleton.project_id = project_id
new_skeleton.class_column_id = class_map['skeleton']
new_skeleton.name = 'skeleton'
new_skeleton.save()
new_skeleton.name = 'skeleton %d' % new_skeleton.id
new_skeleton.save()
if -1 != neuron_id:
# Check that the neuron to use exists
if 0 == ClassInstance.objects.filter(pk=neuron_id).count():
neuron_id = -1
if -1 != neuron_id:
# Raise an Exception if the user doesn't have permission to
# edit the existing neuron.
can_edit_class_instance_or_fail(editor, neuron_id, 'neuron')
# A neuron already exists, so we use it
response_on_error = 'Could not relate the neuron model to ' \
'the new skeleton!'
relate_neuron_to_skeleton(neuron_id, new_skeleton.id)
response_on_error = 'Could not insert new treenode!'
new_treenode = insert_new_treenode(None, new_skeleton.id)
return NewTreenode(new_treenode.id, new_treenode.edition_time,
new_skeleton.id, None)
else:
# A neuron does not exist, therefore we put the new skeleton
# into a new neuron.
response_on_error = 'Failed to insert new instance of a neuron.'
new_neuron = ClassInstance()
new_neuron.user = creator
new_neuron.project_id = project_id
new_neuron.class_column_id = class_map['neuron']
if neuron_name:
# Create a regular expression to find allowed patterns. The
# first group is the whole {nX} part, while the second group
# is X only.
counting_pattern = re.compile(r"(\{n(\d+)\})")
# Look for patterns, replace all {n} with {n1} to normalize.
neuron_name = neuron_name.replace("{n}", "{n1}")
if counting_pattern.search(neuron_name):
# Find starting values for each substitution.
counts = [int(m.groups()[1]) for m in counting_pattern.finditer(neuron_name)]
# Find existing matching neurons in database.
name_match = counting_pattern.sub(r"(\d+)", neuron_name)
name_pattern = re.compile(name_match)
matching_neurons = ClassInstance.objects.filter(
project_id=project_id,
class_column_id=class_map['neuron'],
name__regex=name_match).order_by('name')
# Increment substitution values based on existing neurons.
for n in matching_neurons:
for i, (count, g) in enumerate(zip(counts, name_pattern.search(n.name).groups())):
if count == int(g):
counts[i] = count + 1
# Substitute values.
count_ind = 0
m = counting_pattern.search(neuron_name)
while m:
neuron_name = m.string[:m.start()] + str(counts[count_ind]) + m.string[m.end():]
count_ind = count_ind + 1
m = counting_pattern.search(neuron_name)
new_neuron.name = neuron_name
else:
new_neuron.name = 'neuron'
new_neuron.save()
new_neuron.name = 'neuron %d' % new_neuron.id
new_neuron.save()
response_on_error = 'Could not relate the neuron model to ' \
'the new skeleton!'
relate_neuron_to_skeleton(new_neuron.id, new_skeleton.id)
response_on_error = 'Failed to insert instance of treenode.'
new_treenode = insert_new_treenode(None, new_skeleton.id)
response_on_error = 'Failed to write to logs.'
new_location = (new_treenode.location_x, new_treenode.location_y,
new_treenode.location_z)
insert_into_log(project_id, creator.id, 'create_neuron',
new_location, 'Create neuron %d and skeleton '
'%d' % (new_neuron.id, new_skeleton.id))
return NewTreenode(new_treenode.id, new_treenode.edition_time,
new_skeleton.id, None)
except Exception as e:
import traceback
raise Exception("%s: %s %s" % (response_on_error, str(e),
str(traceback.format_exc())))
@requires_user_role(UserRole.Annotate)
def update_parent(request, project_id=None, treenode_id=None):
treenode_id = int(treenode_id)
parent_id = int(request.POST.get('parent_id', -1))
can_edit_treenode_or_fail(request.user, project_id, treenode_id)
# Make sure the back-end is in the expected state
state.validate_state(treenode_id, request.POST.get('state'),
neighborhood=True, lock=True)
child = get_object_or_404(Treenode, pk=treenode_id, project_id=project_id)
parent = get_object_or_404(Treenode, pk=parent_id, project_id=project_id)
if child.skeleton_id != parent.skeleton_id:
raise Exception("Child node %s is in skeleton %s but parent node %s is in skeleton %s!", \
treenode_id, child.skeleton_id, parent_id, parent.skeleton_id)
child.parent_id = parent_id
child.save()
return JsonResponse({
'success': True,
'node_id': child.id,
'parent_id': child.parent_id,
'skeleton_id': child.skeleton_id
})
def update_node_radii(node_ids, radii, cursor=None):
"""Update radius of a list of nodes, returns old radii.
Both lists/tupples and single values can be supplied.
"""
# Make sure we deal with lists
type_nodes = type(node_ids)
if type_nodes not in (list, tuple):
node_ids = (node_ids,)
# If only one a single radius value is available, use it for every input
# node ID.
type_radii = type(radii)
if type_radii not in (list, tuple):
radii = len(node_ids) * (radii,)
if len(node_ids) != len(radii):
raise ValueError("Number of treenode doesn't match number of radii")
invalid_radii = [r for r in radii if math.isnan(r)]
if invalid_radii:
raise ValueError("Some radii where not numbers: " +
", ".join(invalid_radii))
# Make sure we have a database cursor
cursor = cursor or connection.cursor()
# Create a list of the form [(node id, radius), ...]
node_radii = "(" + "),(".join(map(lambda (k,v): "{},{}".format(k,v),
zip(node_ids, radii))) + ")"
cursor.execute('''
UPDATE treenode t SET radius = target.new_radius
FROM (SELECT x.id, x.radius AS old_radius, y.new_radius
FROM treenode x
INNER JOIN (VALUES {}) y(id, new_radius)
ON x.id=y.id FOR NO KEY UPDATE) target
WHERE t.id = target.id
RETURNING t.id, target.old_radius, target.new_radius,
t.edition_time, t.skeleton_id;
'''.format(node_radii))
updated_rows = cursor.fetchall()
if len(node_ids) != len(updated_rows):
missing_ids = frozenset(node_ids) - frozenset([r[0] for r in updated_rows])
raise ValueError('Coudn\'t find treenodes ' +
','.join([str(ni) for ni in missing_ids]))
return {r[0]: {
'old': r[1],
'new': float(r[2]),
'edition_time': r[3],
'skeleton_id': r[4]
} for r in updated_rows}
@requires_user_role(UserRole.Annotate)
def update_radii(request, project_id=None):
"""Update the radius of one or more nodes"""
treenode_ids = [int(v) for k,v in request.POST.iteritems() \
if k.startswith('treenode_ids[')]
radii = [float(v) for k,v in request.POST.iteritems() \
if k.startswith('treenode_radii[')]
# Make sure the back-end is in the expected state
cursor = connection.cursor()
state.validate_state(treenode_ids, request.POST.get('state'),
multinode=True, lock=True, cursor=cursor)
updated_nodes = update_node_radii(treenode_ids, radii, cursor)
return JsonResponse({
'success': True,
'updated_nodes': updated_nodes
})
@requires_user_role(UserRole.Annotate)
def update_radius(request, project_id=None, treenode_id=None):
treenode_id = int(treenode_id)
radius = float(request.POST.get('radius', -1))
if math.isnan(radius):
raise Exception("Radius '%s' is not a number!" % request.POST.get('radius'))
option = int(request.POST.get('option', 0))
cursor = connection.cursor()
# Make sure the back-end is in the expected state
state.validate_state(treenode_id, request.POST.get('state'),
node=True, lock=True, cursor=cursor)
def create_update_response(updated_nodes, radius):
return JsonResponse({
'success': True,
'updated_nodes': updated_nodes,
'new_radius': radius
})
if 0 == option:
# Update radius only for the passed in treenode and return the old
# radius.
old_radii = update_node_radii(treenode_id, radius, cursor)
return create_update_response(old_radii, radius)
cursor.execute('''
SELECT id, parent_id, radius
FROM treenode
WHERE skeleton_id = (SELECT t.skeleton_id FROM treenode t WHERE id = %s)
''' % treenode_id)
if 1 == option:
# Update radius from treenode_id to next branch or end node (included)
children = defaultdict(list)
for row in cursor.fetchall():
children[row[1]].append(row[0])
include = [treenode_id]
c = children[treenode_id]
while 1 == len(c):
child = c[0]
include.append(child)
c = children[child]
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
if 2 == option:
# Update radius from treenode_id to prev branch node or root (excluded)
parents = {}
children = defaultdict(list)
for row in cursor.fetchall():
parents[row[0]] = row[1]
children[row[1]].append(row[0])
include = [treenode_id]
parent = parents[treenode_id]
while parent and parents[parent] and 1 == len(children[parent]):
include.append(parent)
parent = parents[parent]
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
if 3 == option:
# Update radius from treenode_id to prev node with radius (excluded)
parents = {}
for row in cursor.fetchall():
if row[2] < 0 or row[0] == treenode_id: # DB default radius is 0 but is initialized to -1 elsewhere
parents[row[0]] = row[1]
include = [treenode_id]
parent = parents[treenode_id]
while parent in parents:
include.append(parent)
parent = parents[parent]
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
if 4 == option:
# Update radius from treenode_id to root (included)
parents = {row[0]: row[1] for row in cursor.fetchall()}
include = [treenode_id]
parent = parents[treenode_id]
while parent:
include.append(parent)
parent = parents[parent]
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
if 5 == option:
# Update radius of all nodes (in a single query)
skeleton_id = Treenode.objects.filter(pk=treenode_id).values('skeleton_id')
include = list(Treenode.objects.filter(skeleton_id=skeleton_id) \
.values_list('id', flat=True))
old_radii = update_node_radii(include, radius, cursor)
return create_update_response(old_radii, radius)
@requires_user_role(UserRole.Annotate)
def delete_treenode(request, project_id=None):
""" Deletes a treenode. If the skeleton has a single node, deletes the
skeleton and its neuron. Returns the parent_id, if any."""
treenode_id = int(request.POST.get('treenode_id', -1))
# Raise an exception if the user doesn't have permission to edit the
# treenode.
can_edit_or_fail(request.user, treenode_id, 'treenode')
# Raise an Exception if the user doesn't have permission to edit the neuron
# the skeleton of the treenode is modeling.
can_edit_treenode_or_fail(request.user, project_id, treenode_id)
# Make sure the back-end is in the expected state
state.validate_state(treenode_id, request.POST.get('state'), lock=True,
neighborhood=True)
treenode = Treenode.objects.get(pk=treenode_id)
parent_id = treenode.parent_id
# Get information about linked connectors
links = list(TreenodeConnector.objects.filter(project_id=project_id,
treenode_id=treenode_id).values_list('id', 'relation_id',
'connector_id', 'confidence'))
response_on_error = ''
deleted_neuron = False
try:
if not parent_id:
children = []
# This treenode is root.
response_on_error = 'Could not retrieve children for ' \
'treenode #%s' % treenode_id
n_children = Treenode.objects.filter(parent=treenode).count()
response_on_error = "Could not delete root node"
if n_children > 0:
# TODO yes you can, the new root is the first of the children,
# and other children become independent skeletons
raise Exception("You can't delete the root node when it "
"has children.")
# Get the neuron before the skeleton is deleted. It can't be
# accessed otherwise anymore.
neuron = ClassInstance.objects.get(project_id=project_id,
cici_via_b__relation__relation_name='model_of',
cici_via_b__class_instance_a=treenode.skeleton)
# Remove the original skeleton. It is OK to remove it if it only had
# one node, even if the skeleton's user does not match or the user
# is not superuser. Delete the skeleton, which triggers deleting
# the ClassInstanceClassInstance relationship with neuron_id
response_on_error = 'Could not delete skeleton.'
# Extra check for errors, like having two root nodes
count = Treenode.objects.filter(skeleton_id=treenode.skeleton_id) \
.count()
if 1 == count:
# deletes as well treenodes that refer to the skeleton
ClassInstance.objects.filter(pk=treenode.skeleton_id) \
.delete()
else:
return JsonResponse({"error": "Can't delete " \
"isolated node: erroneously, its skeleton contains more " \
"than one treenode! Check for multiple root nodes."})
# If the neuron modeled by the skeleton of the treenode is empty,
# delete it.
response_on_error = 'Could not delete neuron #%s' % neuron.id
deleted_neuron = _delete_if_empty(neuron.id)
if deleted_neuron:
# Insert log entry for neuron deletion
insert_into_log(project_id, request.user.id, 'remove_neuron',
(treenode.location_x, treenode.location_y, treenode.location_z),
'Deleted neuron %s and skeleton(s) %s.' % (neuron.id, treenode.skeleton_id))
else:
# Treenode is not root, it has a parent and perhaps children.
# Reconnect all the children to the parent.
response_on_error = 'Could not update parent id of children nodes'
cursor = connection.cursor()
cursor.execute("""
UPDATE treenode SET parent_id = %s
WHERE project_id = %s AND parent_id = %s
RETURNING id, edition_time
""", (treenode.parent_id, project_id, treenode.id))
# Children will be a list of two-element lists, just what we want to
# return as child info.
children = cursor.fetchall()
# Remove treenode
response_on_error = 'Could not delete treenode.'
Treenode.objects.filter(project_id=project_id, pk=treenode_id).delete()
return JsonResponse({
'x': treenode.location_x,
'y': treenode.location_y,
'z': treenode.location_z,
'parent_id': parent_id,
'children': children,
'links': links,
'radius': treenode.radius,
'confidence': treenode.confidence,
'skeleton_id': treenode.skeleton_id,
'deleted_neuron': deleted_neuron,
'success': "Removed treenode successfully."
})
except Exception as e:
raise Exception(response_on_error + ': ' + str(e))
def _treenode_info(project_id, treenode_id):
c = connection.cursor()
# (use raw SQL since we are returning values from several different models)
c.execute("""
SELECT
treenode.skeleton_id,
ci.name as skeleton_name,
ci2.id as neuron_id,
ci2.name as neuron_name
FROM
treenode,
relation r,
class_instance ci,
class_instance ci2,
class_instance_class_instance cici
WHERE ci.project_id = %s
AND treenode.id = %s
AND treenode.skeleton_id = ci.id
AND ci.id = cici.class_instance_a
AND ci2.id = cici.class_instance_b
AND cici.relation_id = r.id
AND r.relation_name = 'model_of'
""", (project_id, treenode_id))
results = [
dict(zip([col[0] for col in c.description], row))
for row in c.fetchall()
]
if len(results) > 1:
raise ValueError('Found more than one skeleton and neuron for '
'treenode %s' % treenode_id)
elif len(results) == 0:
raise ValueError('No skeleton and neuron for treenode %s' % treenode_id)
return results[0]
@api_view(['POST'])
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def treenode_info(request, project_id=None, treenode_id=None):
"""Retrieve skeleton and neuron information about this treenode.
---
type:
skeleton_id:
description: ID of the treenode's skeleton
type: integer
required: true
skeleton_name:
description: Name of the treenode's skeleton
type: string
required: true
neuron_id:
description: ID of the treenode's neuron
type: integer
required: true
neuron_name:
description: Name of the treenode's neuron
type: string
required: true
"""
info = _treenode_info(int(project_id), int(treenode_id))
return JsonResponse(info)
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def find_children(request, project_id=None, treenode_id=None):
try:
tnid = int(treenode_id)
cursor = connection.cursor()
cursor.execute('''
SELECT id, location_x, location_y, location_z
FROM treenode
WHERE parent_id = %s
''', (tnid,))
children = [[row] for row in cursor.fetchall()]
return JsonResponse(children, safe=False)
except Exception as e:
raise Exception('Could not obtain next branch node or leaf: ' + str(e))
@api_view(['POST'])
@requires_user_role(UserRole.Annotate)
def update_confidence(request, project_id=None, treenode_id=None):
"""Update confidence of edge between a node to either its parent or its
connectors.
The connection between a node and its parent or the connectors it is linked
to can be rated with a confidence value in the range 1-5. If connector links
should be updated, one can limit the affected connections to a specific
connector. Returned is an object, mapping updated partners to their old
confidences.
---
parameters:
- name: new_confidence
description: New confidence, value in range 1-5
type: integer
required: true
- name: to_connector
description: Whether all linked connectors instead of parent should be updated
type: boolean
required: false
- name: partner_ids
description: Limit update to a set of connectors if to_connector is true
type: array
items: integer
required: false
- name: partner_confidences
description: Set different confidences to connectors in <partner_ids>
type: array
items: integer
required: false
type:
message:
type: string
required: true
updated_partners:
type: object
required: true
"""
tnid = int(treenode_id)
can_edit_treenode_or_fail(request.user, project_id, tnid)
cursor = connection.cursor()
state.validate_state(tnid, request.POST.get('state'),
node=True, lock=True, cursor=cursor)
to_connector = request.POST.get('to_connector', 'false') == 'true'
partner_ids = get_request_list(request.POST, 'partner_ids', None, int)
partner_confidences = get_request_list(request.POST, 'partner_confidences',
None, int)
new_confidence = int(request.POST.get('new_confidence', 0))
# If partner confidences are specified, make sure there are exactly as many
# as there are partners. Otherwise validate passed in confidence
if partner_ids and partner_confidences:
if len(partner_confidences) != len(partner_ids):
raise ValueError("There have to be as many partner confidences as"
"there are partner IDs")
else:
if new_confidence < 1 or new_confidence > 5:
raise ValueError('Confidence not in range 1-5 inclusive.')
if partner_ids:
# Prepare new confidences for connector query
partner_confidences = (new_confidence,) * len(partner_ids)
if to_connector:
if partner_ids:
partner_template = ",".join(("(%s,%s)",) * len(partner_ids))
partner_data = [p for v in zip(partner_ids, partner_confidences) for p in v]
cursor.execute('''
UPDATE treenode_connector tc
SET confidence = target.new_confidence
FROM (SELECT x.id, x.confidence AS old_confidence,
new_values.confidence AS new_confidence
FROM treenode_connector x
JOIN (VALUES {}) new_values(cid, confidence)
ON x.connector_id = new_values.cid
WHERE x.treenode_id = %s) target
WHERE tc.id = target.id
RETURNING tc.connector_id, tc.edition_time, target.old_confidence
'''.format(partner_template), partner_data + [tnid])
else:
cursor.execute('''
UPDATE treenode_connector tc
SET confidence = %s
FROM (SELECT x.id, x.confidence AS old_confidence
FROM treenode_connector x
WHERE treenode_id = %s) target
WHERE tc.id = target.id
RETURNING tc.connector_id, tc.edition_time, target.old_confidence
''', (new_confidence, tnid))
else:
cursor.execute('''
UPDATE treenode t
SET confidence = %s, editor_id = %s
FROM (SELECT x.id, x.confidence AS old_confidence
FROM treenode x
WHERE id = %s) target
WHERE t.id = target.id
RETURNING t.parent_id, t.edition_time, target.old_confidence
''', (new_confidence, request.user.id, tnid))
updated_partners = cursor.fetchall()
if len(updated_partners) > 0:
location = Location.objects.filter(id=tnid).values_list(
'location_x', 'location_y', 'location_z')[0]
insert_into_log(project_id, request.user.id, "change_confidence",
location, "Changed to %s" % new_confidence)
return JsonResponse({
'message': 'success',
'updated_partners': {
r[0]: {
'edition_time': r[1],
'old_confidence': r[2]
} for r in updated_partners
}
})
# Else, signal error
if to_connector:
raise ValueError('Failed to update confidence between treenode %s and '
'connector.' % tnid)
else:
raise ValueError('Failed to update confidence at treenode %s.' % tnid)
def _skeleton_as_graph(skeleton_id):
# Fetch all nodes of the skeleton
cursor = connection.cursor()
cursor.execute('''
SELECT id, parent_id
FROM treenode
WHERE skeleton_id=%s''', [skeleton_id])
# Create a directed graph of the skeleton
graph = nx.DiGraph()
for row in cursor.fetchall():
# row[0]: id
# row[1]: parent_id
graph.add_node(row[0])
if row[1]:
# Create directional edge from parent to child
graph.add_edge(row[1], row[0])
return graph
def _find_first_interesting_node(sequence):
""" Find the first node that:
1. Has confidence lower than 5
2. Has a tag
3. Has any connector (e.g. receives/makes synapse, markes as abutting, ...)
Otherwise return the last node.
"""
if not sequence:
raise Exception('No nodes ahead!')
if 1 == len(sequence):
return sequence[0]
cursor = connection.cursor()
cursor.execute('''
SELECT t.id, t.confidence, tc.relation_id, tci.relation_id
FROM treenode t
LEFT OUTER JOIN treenode_connector tc ON (tc.treenode_id = t.id)
LEFT OUTER JOIN treenode_class_instance tci ON (tci.treenode_id = t.id)
WHERE t.id IN (%s)
''' % ",".join(map(str, sequence)))
nodes = {row[0]: row for row in cursor.fetchall()}
for node_id in sequence:
if node_id in nodes:
props = nodes[node_id]
# [1]: confidence
# [2]: a treenode_connector.relation_id, e.g. presynaptic_to or postsynaptic_to
# [3]: a treenode_class_instance.relation_id, e.g. labeled_as
# 2 and 3 may be None
if props[1] < 5 or props[2] or props[3]:
return node_id
else:
raise Exception('Nodes of this skeleton changed while inspecting them.')
return sequence[-1]
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def find_previous_branchnode_or_root(request, project_id=None, treenode_id=None):
try:
tnid = int(treenode_id)
alt = 1 == int(request.POST['alt'])
skid = Treenode.objects.get(pk=tnid).skeleton_id
graph = _skeleton_as_graph(skid)
# Travel upstream until finding a parent node with more than one child
# or reaching the root node
seq = [] # Does not include the starting node tnid
while True:
parents = graph.predecessors(tnid)
if parents: # list of parents is not empty
tnid = parents[0] # Can ony have one parent
seq.append(tnid)
if 1 != len(graph.successors(tnid)):
break # Found a branch node
else:
break # Found the root node
if seq and alt:
tnid = _find_first_interesting_node(seq)
return JsonResponse(_fetch_location(tnid), safe=False)
except Exception as e:
raise Exception('Could not obtain previous branch node or root:' + str(e))
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def find_next_branchnode_or_end(request, project_id=None, treenode_id=None):
try:
tnid = int(treenode_id)
skid = Treenode.objects.get(pk=tnid).skeleton_id
graph = _skeleton_as_graph(skid)
children = graph.successors(tnid)
branches = []
for child_node_id in children:
# Travel downstream until finding a child node with more than one
# child or reaching an end node
seq = [child_node_id] # Does not include the starting node tnid
branch_end = child_node_id
while True:
branch_children = graph.successors(branch_end)
if 1 == len(branch_children):
branch_end = branch_children[0]
seq.append(branch_end)
else:
break # Found an end node or a branch node
branches.append([child_node_id,
_find_first_interesting_node(seq),
branch_end])
# If more than one branch exists, sort based on downstream arbor size.
if len(children) > 1:
branches.sort(
key=lambda b: len(nx.algorithms.traversal.depth_first_search.dfs_successors(graph, b[0])),
reverse=True)
# Leaf nodes will have no branches
if len(children) > 0:
# Create a dict of node ID -> node location
node_ids_flat = list(itertools.chain.from_iterable(branches))
node_locations = {row[0]: row for row in _fetch_locations(node_ids_flat)}
branches = [[node_locations[node_id] for node_id in branch] for branch in branches]
return JsonResponse(branches, safe=False)
except Exception as e:
raise Exception('Could not obtain next branch node or leaf: ' + str(e))
|
catsop/CATMAID
|
django/applications/catmaid/control/treenode.py
|
Python
|
gpl-3.0
| 43,232
|
[
"NEURON"
] |
ffa09440ab1ae90c0ed027aca4e25c6668b1d6ffc1b8e7757cac874a4a02968d
|
import unittest
from pyramid import testing
class TutorialFunctionalTests(unittest.TestCase):
def setUp(self):
from tutorial import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def test_home(self):
res = self.testapp.get('/', status=200)
self.assertIn(b'Visit', res.body)
def test_hello(self):
res = self.testapp.get('/howdy', status=200)
self.assertIn(b'Hello World!', res.body)
self.assertIn(b'Go back', res.body)
if __name__ == "__main__":
unittest.main()
|
FedericoStra/pyramid_tutorial
|
07_views/tests/test_functional.py
|
Python
|
mit
| 585
|
[
"VisIt"
] |
f25718aa7f5c8f381306d15be0a3f999266a4f7fff5a67e0a11c52ddac0e80e6
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 30 15:34:39 2016
@author: charlesgulian
"""
import os
curr_dir = os.getcwd()
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import pysex
import sex_stats
import sex_config
import fits_tools
# Image deconvolution project:
# Main script for data analysis, image comparison, photometric statistics, and more
# Good image comparison
goodImage1 = 'AstroImages/Good/fpC-6484-x4078-y134_stitched_alignCropped.fits'
goodImage2 = 'AstroImages/Good/fpC-7006-x5226-y115_stitched_alignCropped.fits'
goodImage3 = 'AstroImages/Good/fpC-4868-x4211-y138_stitched_alignCropped.fits'
goodImage4 = 'AstroImages/Good/fpC-6383-x5176-y121_stitched_alignCropped.fits'
goodImgs = [goodImage1,goodImage2,goodImage3,goodImage4]
goodImgs = [goodImage1,goodImage3]
# Bad image comparison:
badImage1 = 'AstroImages/Bad/fpC-5759-x24775-y300_stitched_alignCropped.fits' # Modest gradient from top to bottom
badImage2 = 'AstroImages/Bad/fpC-6548-x24940-y302_stitched_alignCropped.fits' # Modest gradient from top to bottom
badImage3 = 'AstroImages/Bad/fpC-5781-x25627-y293_stitched_alignCropped.fits' # Very weak gradient from bottom left to top right
badImage4 = 'AstroImages/Bad/fpC-7140-x24755-y270_stitched_alignCropped.fits' # Weak gradient from bottom left to top right
badImgs = [badImage1,badImage2,badImage3,badImage4]
for testImage1 in goodImgs:
for testImage2 in goodImgs:
if testImage1 == testImage2:
continue
# Printing image file names
print testImage1+' '+testImage2
medSub = False
if medSub:
testImage1_medSub = testImage1.replace('.fits','_medSub.fits')
testImage2_medSub = testImage2.replace('.fits','_medSub.fits')
fits_tools.subtractMedian(testImage1,new_image_file=testImage1_medSub)
fits_tools.subtractMedian(testImage2,new_image_file=testImage2_medSub)
testImage1 = testImage1_medSub
testImage2 = testImage2_medSub
# Write configuration files for SExtractor comparison
# Writing configuration file for first image
fig1 = sex_config.configure(testImage1+','+testImage1,'default.sex','default.param',dual=True)
# Do default configuration
fig1.default_config()
# Writing onfiguration file for second image
fig2 = sex_config.configure(testImage1+','+testImage2,'default.sex','default.param',dual=True)
# Do default configuration
fig2.default_config()
varParam = 'Filter'
PhotAper = np.linspace(5.0,25.0,5)
BackSize = np.linspace(150,350,5)
Filter = ['Y','N']
varParamRange = Filter
for k in range(len(varParamRange)):
# Adjust Kron factor
fig1.reconfigure('FILTER',Filter[k])
# Change check image name
#temp = fig1.config_dict['CHECKIMAGE_NAME'].replace('Results/CheckImages','Figures/Jul19/'+varParam).replace('.fits','_{}{}.fits'.format(varParam,varParamRange[k]))
#fig1.reconfigure('CHECKIMAGE_NAME',temp)
# Write new configuration file for first image
fig1.write_config_file(new_config_file='copy_compare1.sex',new_param_file='copy_compare1.param')
# Adjust Kron factor
fig2.reconfigure('FILTER',Filter[k])
# Change check image name
#temp = fig2.config_dict['CHECKIMAGE_NAME'].replace('Results/CheckImages','Figures/Jul19/'+varParam).replace('.fits','_{}{}.fits'.format(varParam,varParamRange[k]))
#fig2.reconfigure('CHECKIMAGE_NAME',temp)
# Write new configuration file for second image
fig2.write_config_file(new_config_file='copy_compare2.sex',new_param_file='copy_compare2.param')
# Compare images in SExtractor
output = pysex.compare(testImage1,testImage2,'copy_compare1.sex','copy_compare2.sex')
# Get image tags
img_tag1 = (os.path.split(testImage1)[1])
img_tag1 = img_tag1[0:len(img_tag1)-len('.fits')]
img_tag2 = (os.path.split(testImage2)[1])
img_tag2 = img_tag2[0:len(img_tag2)-len('.fits')]
outputCat1 = os.path.join(os.getcwd(),'Results',img_tag1+'_'+img_tag1+'_compare.cat')
if not os.path.exists(outputCat1):
print 'Error: first output catalog path does not exist'
outputCat2 = os.path.join(os.getcwd(),'Results',img_tag1+'_'+img_tag2+'_compare.cat')
if not os.path.exists(outputCat2):
print 'Error: second output catalog path does not exist'
# Create sex_stats.data objects:
img1data = sex_stats.data(outputCat1)
img2data = sex_stats.data(outputCat2)
# Create .reg files from output catalogs
CREATE_regFiles = False
if CREATE_regFiles:
img1data.create_regFile()
img2data.create_regFile()
#-----------------------------------------------------------------------------#
# Flux ratio analysis:
flux1,flux2 = img1data.get_data('FLUX_APER'),img2data.get_data('FLUX_APER')
#mag1,mag2 = img1data.get_data('MAG_AUTO'),img2data.get_data('MAG_AUTO')
#flux1,flux2 = mag1,mag2
x,y = img1data.get_data('X_IMAGE'),img1data.get_data('Y_IMAGE')
flux1,flux2 = np.array(flux1),np.array(flux2)
#'''
fluxAvg = 0.5*(flux1+flux2)
fluxRatio = np.divide(flux1,flux2)
fluxRatio_mean = np.mean(fluxRatio)
fluxRatio_std = np.std(fluxRatio)
#fluxRatio_meanSubtracted = fluxRatio - fluxRatio_mean # (NOT MEAN SUBTRACTED)
#'''
#'''
print ''
print '{} = {}'.format(varParam,varParamRange[k])
print ''
print 'Number of objects detected: ',len(fluxRatio)
print 'Minimum flux values: ', np.min(flux1),' ',np.min(flux2)
print 'Number of negative flux values: ', len(np.where(flux1 < 0.0)[0]),' ',len(np.where(flux2 < 0.0)[0])
print 'Mean flux values: ',np.mean(flux1),' ',np.mean(flux2)
print ''
print 'Minimum flux ratio: ', np.min(fluxRatio)
print 'Maximum flux ratio: ', np.max(fluxRatio)
print 'Standard deviation of flux ratio: ', np.std(fluxRatio)
print ''
print 'Minimum values of images: ', np.min(fits_tools.getPixels(testImage1)),' ',np.min(fits_tools.getPixels(testImage2))
print 'Median values of images: ', np.median(fits_tools.getPixels(testImage1)),' ',np.median(fits_tools.getPixels(testImage2))
print 'Mean values of images: ', np.mean(fits_tools.getPixels(testImage1)),' ',np.mean(fits_tools.getPixels(testImage2))
print ' '
#'''
# Creating histogram of flux1 and flux2
# plt.hist has range=(tup1,tup2) to set upper and lower bounds of histogram
plt.hist(flux1,bins=70,normed=False,color='green')
plt.hist(flux2,bins=70,normed=False,color='blue')
plt.title('Histogram of Object Flux for Entire Image')
plt.ylabel('Frequency (N)')
plt.xlabel('Object flux')
SAVE = True
if SAVE:
plt.savefig(os.path.join(curr_dir,'Figures','Jul19',varParam,'flux1_flux2_hist_{}{}.png'.format(varParam,varParamRange[k])))
plt.close()
else:
plt.show()
# Creating histogram of flux1/flux2 (object-wise flux ratio)
#plt.hist(fluxRatio,bins=70,range=(-25.0,25.0),color='green') # Range = (-25.0,25.0)
#plt.hist(fluxRatio,bins=int(abs(np.max(fluxRatio)-np.min(fluxRatio))*10.0),range=(np.min(fluxRatio),np.max(fluxRatio)),color='green')
plt.hist(fluxRatio,bins=70,range=(np.min(fluxRatio),np.max(fluxRatio)),color='green')
plt.title('Histogram of Object-wise Flux Ratio')
plt.ylabel('Frequency (N)')
plt.xlabel('Object-wise flux ratio')
SAVE = True
if SAVE:
plt.savefig(os.path.join(curr_dir,'Figures','Jul19',varParam,'fluxRatio_hist_{}{}.png'.format(varParam,varParamRange[k])))
plt.close()
else:
plt.show()
# Creating color plot of object-wise flux ratio
cmap = matplotlib.cm.jet
plt.scatter(x,y,s=25.0*img1data.get_data('A_IMAGE'),c=fluxRatio-np.median(fluxRatio),marker='o',vmin=np.min(fluxRatio),vmax=np.max(fluxRatio),alpha=0.85)
plt.axis([0,1600,0,1600])
plt.colorbar()
plt.title('Map of Object-wise Flux Ratio')
plt.xlabel('X_IMAGE')
plt.ylabel('Y_IMAGE')
SAVE = True
if SAVE:
plt.savefig(os.path.join(curr_dir,'Figures','Jul19',varParam,'fluxRatio_map_{}{}.png'.format(varParam,varParamRange[k])))
plt.close()
else:
plt.show()
# Creating histogram of object-wise flux ratio for 4x4 bins
m,n = 4,4
xBins,yBins,fluxRatioBins = sex_stats.binData(x,y,fluxRatio,M=m,N=n)
for i in range(m):
for j in range(n):
plt.subplot(m,n,(n*i + (j+1)))
#plt.hist(fluxRatioBins[i,j],bins=int(abs(np.max(fluxRatio)-np.min(fluxRatio))*10.0))
plt.hist(fluxRatioBins[i,j],bins=70)
plt.axis([np.min(fluxRatioBins[i,j]),np.max(fluxRatioBins[i,j]),0.0,15.0])
SAVE = True
if SAVE:
plt.savefig(os.path.join(curr_dir,'Figures','Jul19',varParam,'fluxRatioBins_hist_{}{}.png'.format(varParam,varParamRange[k])))
plt.close()
else:
plt.show()
'''
fluxRatioBin_Avgs = np.zeros([m,n])
emptyBins = []
for i in range(m):
for j in range(n):
# Clipping data in bins:
fluxRatioBins_sigmaClipped = []
fluxRatioBins_excess = []
for k in range(len(fluxRatioBins[i,j])):
if np.abs((fluxRatioBins[i,j])[k]) <= maxSig[s]*np.std(fluxRatioBins[i,j]):
fluxRatioBins_sigmaClipped.append(fluxRatioBins[i,j][k])
else:
fluxRatioBins_excess.append()
if len(fluxRatioBins_sigmaClipped) == 0:
emptyBins.append('{},{}'.format(str(i),str(j)))
fluxRatioBins[i,j] = fluxRatioBins_sigmaClipped
fluxRatioBin_Avgs[i,j] = np.mean(fluxRatioBins_sigmaClipped)
# Masking NaNs in fluxRatioBin_Avgs:
fluxRatioBin_Avgs_Masked = np.ma.array(fluxRatioBin_Avgs,mask=np.isnan(fluxRatioBin_Avgs))
cmap = matplotlib.cm.gray
cmap.set_bad('r',1.)
#print np.nanmean(fluxRatioBin_Avgs)-2.0,' ',np.nanmean(fluxRatioBin_Avgs)+2.0
plt.pcolormesh(fluxRatioBin_Avgs_Masked,cmap=cmap,vmin=np.nanmean(fluxRatioBin_Avgs)-2.0,vmax=np.nanmean(fluxRatioBin_Avgs)+2.0)
plt.colorbar()
plt.xlabel('X Bin')
plt.ylabel('Y Bin')
plt.title('Flux Ratio Bin Averages: {} x {}'.format(m,n))
if not os.path.exists(os.path.join(curr_dir,'Figures','Jul14Imgs','ObjBin','{}_{}'.format(img_tag1[0:10],img_tag2[0:10]))):
os.mkdir(os.path.join(curr_dir,'Figures','Jul14Imgs','ObjBin','{}_{}'.format(img_tag1[0:10],img_tag2[0:10])))
plt.savefig(os.path.join(curr_dir,'Figures','Jul14Imgs','ObjBin','{}_{}'.format(img_tag1[0:10],img_tag2[0:10]),'fluxRatioBin_Avgs_sigmaClip{}.png'.format(str(maxSig[s])[0:4])))
plt.close()
plot = False # Warning: do not change to true unless length of maxSig small
if plot:
# Plotting source-wise flux ratio w/ colors
plt.scatter(x_clip, y_clip, s=25*np.log10(0.1*np.array(fluxAvg_clip)), c=fluxRatio_meanSubtracted_sigmaClipped, vmin=-1.5*maxSig[j]*fluxRatio_std, vmax=1.5*maxSig[j]*fluxRatio_std, alpha=0.75)
plt.axis([0,1600,0,1600])
plt.colorbar()
plt.xlabel('X_IMAGE')
plt.ylabel('Y_IMAGE')
plt.title('Flux Ratio Color Map: sigma cutoff = '+str(maxSig[j])[0:4])
plt.savefig((curr_dir+'/Figures/{}_{}_maxSig{}_fluxRatio_LINETEST.png'.format(img_tag1, img_tag2, str(maxSig[j])[0:4])))
plt.close()
#'''
break
break
""" THIS SECTION OF CODE WAS COMMENTED OUT ON July 12th, 2016; uncomment to do statistical analysis
chiSqNorm_linear = []
chiSqNorm_flat = []
rSqAdj = []
numPoints = []
for j in range(len(maxSig)):
# Clipping data
fluxRatio_meanSubtracted_sigmaClipped = []
fluxRatio_excess = []
fluxAvg_clip = []
x_clip,y_clip = [],[]
x_exc,y_exc = [],[]
for i in range(len(fluxRatio_meanSubtracted)):
if np.abs(fluxRatio_meanSubtracted[i]) < maxSig[j]*fluxRatio_std:
fluxRatio_meanSubtracted_sigmaClipped.append(fluxRatio_meanSubtracted[i])
x_clip.append(x[i])
y_clip.append(y[i])
fluxAvg_clip.append(fluxAvg[i])
else:
fluxRatio_excess.append(fluxRatio_meanSubtracted[i])
x_exc.append(x[i])
y_exc.append(y[i])
fluxRatio_meanSubtracted_sigmaClipped,fluxRatio_excess = np.array(fluxRatio_meanSubtracted_sigmaClipped),np.array(fluxRatio_excess)
x_clip,y_clip,x_exc,y_exc = np.array(x_clip),np.array(y_clip),np.array(x_exc),np.array(y_exc)
numPoints.append(float(len(x_clip)))
# Analyzing goodness-of-fit of 3D linear model fitted to data:
coeffs = sex_stats.linReg3D(x_clip,y_clip,fluxRatio_meanSubtracted_sigmaClipped)[0]
linearModelPoints = coeffs[0] + coeffs[1]*x_clip + coeffs[2]*y_clip
flatModelPoints = np.ones(np.shape(fluxRatio_meanSubtracted_sigmaClipped))*fluxRatio_mean
# SciPy: scipy.stats.chisquare
#CSN_lin = spst.chisquare()
CSN_lin = sex_stats.chiSquareNormalized(fluxRatio_meanSubtracted_sigmaClipped,linearModelPoints,3)
CSN_flat = sex_stats.chiSquareNormalized(fluxRatio_meanSubtracted_sigmaClipped,flatModelPoints,1)
RSA = sex_stats.rSquaredAdjusted(fluxRatio_meanSubtracted_sigmaClipped,linearModelPoints,3)
chiSqNorm_linear.append(CSN_lin)
chiSqNorm_flat.append(CSN_flat)
rSqAdj.append(RSA)
plot = True # Warning: do not change to true unless length of maxSig small
if plot:
# Plotting source-wise flux ratio w/ colors
plt.scatter(x_clip, y_clip, s=25*np.log10(0.1*np.array(fluxAvg_clip)), c=fluxRatio_meanSubtracted_sigmaClipped, vmin=-1.5*maxSig[j]*fluxRatio_std, vmax=1.5*maxSig[j]*fluxRatio_std, alpha=0.75)
plt.axis([0,1600,0,1600])
plt.colorbar()
plt.xlabel('X_IMAGE')
plt.ylabel('Y_IMAGE')
plt.title('Flux Ratio Color Map: sigma cutoff = '+str(maxSig[j])[0:4])
plt.savefig((curr_dir+'/Figures/{}_{}_maxSig{}_fluxRatio_LINETEST.png'.format(img_tag1, img_tag2, str(maxSig[j])[0:4])))
plt.close()
hist = False # Warning: do not change to true unless length of maxSig small
if hist:
# Plotting histogram of flux ratio
plt.hist(fluxRatio_meanSubtracted_sigmaClipped,bins=20,color='green')
plt.title('Histogram of Flux Ratio')
plt.ylabel(('Mean subtracted + clipped @ {} sigma').format(str(maxSig[j])[0:4]))
plt.xlabel('Flux ratio')
plt.savefig((curr_dir+'/Figures/Hist_{}_{}_maxSig{}_fluxRatio.png'.format(img_tag1, img_tag2, str(maxSig[j])[0:4])))
plt.close()
# Changing lists to NumPy arrays:
# chiSqNorm_linear,chiSqNorm_flat,rSqAdj = np.array(chiSqNorm_linear),np.array(chiSqNorm_flat),np.array(rSqAdj)
# Number of data points analyzed:
numPoints = np.array(numPoints)
numPoints = numPoints*(1.0/float(len(fluxRatio)))
# Plotting reduced chi-square statistic
plt.close()
plt.plot(maxSig,chiSqNorm_linear,'r-',label='Linear model')
plt.plot(maxSig,chiSqNorm_flat,'b-',label='Flat model')
plt.plot(maxSig,numPoints,'0.35',label='Frac. of data points')
plt.legend()
plt.axis([-0.1,1.0,0.0,3.0])
plt.title('Normalized Chi-square vs. Sigma Cutoff: 3D Linear Eq. + Gaussian Noise Test')
plt.ylabel('Normalized Chi-square: Linear')
plt.xlabel('Sigma Cutoff (# standard deviations from mean)')
plt.ylabel('Normalized Chi-square')
#plt.savefig(os.path.join(os.getcwd(),'Figures','StatAnalysis','Linear_eq_test_6'))
plt.show()
'''
# Plotting adjusted r-squared statistic
plt.plot(maxSig,rSqAdj,'k-')
plt.axis([0.0,1.0,-1.1,1.1])
plt.title('Adjusted r-Squared vs. Sigma Cutoff')
plt.xlabel('Sigma Cutoff (# standard deviations from mean)')
plt.ylabel('Adjusted r-Squared')
plt.show()
'''
"""
|
CharlesGulian/Deconv
|
Main.py
|
Python
|
gpl-3.0
| 17,228
|
[
"Gaussian"
] |
010744d03a4727cfd192ab34574b9731cc2448fe650b8085dca258e8bc959053
|
#!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
import os
from typing import FrozenSet
import paleomix.common.makefile
import pysam
from paleomix.common.fileutils import swap_ext
from paleomix.common.formats.bed import BEDError, read_bed_file
from paleomix.common.formats.fasta import FASTA
from paleomix.common.makefile import (
CLI_PARAMETERS,
REQUIRED_VALUE,
And,
IsBoolean,
IsDictOf,
IsFloat,
IsInt,
IsListOf,
IsNone,
IsStr,
IsUnsignedInt,
MakefileError,
Not,
Or,
RemovedOption,
StringEndsWith,
StringIn,
StringStartsWith,
ValueIn,
ValuesSubsetOf,
)
from paleomix.common.text import parse_padded_table
from paleomix.common.utilities import fill_dict
def read_makefiles(options, commands):
logger = logging.getLogger(__name__)
steps = frozenset(key for (key, _) in commands)
makefiles = []
for filename in options.files:
logger.info("Reading makefile %r", filename)
makefile = paleomix.common.makefile.read_makefile(filename, _VALIDATION)
makefile = _mangle_makefile(options, makefile, steps)
makefiles.append(makefile)
return makefiles
def _mangle_makefile(options, mkfile, steps):
_collapse_samples(mkfile)
_update_regions(options, mkfile)
_update_subsets(mkfile, steps)
_update_filtering(mkfile)
_update_sample_sets(mkfile)
_update_genotyping(mkfile)
_update_msa(mkfile)
_update_homozygous_contigs(mkfile)
_check_bam_sequences(options, mkfile, steps)
_check_sexes(mkfile)
_update_and_check_max_read_depth(options, mkfile)
_check_indels_and_msa(mkfile)
mkfile["Nodes"] = ()
return mkfile
def _collapse_samples(mkfile):
groups, samples = {}, set()
def _collect_samples(samples_dict, path=()):
current_samples = {}
for (key, subdd) in samples_dict.items():
if key.startswith("<") and key.endswith(">"):
key = key.lstrip("<").rstrip(">")
current_samples.update(_collect_samples(subdd, path + (key,)))
elif key not in samples:
samples.add(key)
subdd["Name"] = key
current_samples[key] = subdd
else:
raise MakefileError("Duplicate sample-name: %r" % (key,))
groups[path] = current_samples
return current_samples
_collect_samples(mkfile["Project"]["Samples"])
mkfile["Project"]["Samples"] = groups.pop(())
mkfile["Project"]["Groups"] = groups
def _select_samples(select, groups, samples, path):
selection = set()
for group in select:
if group.startswith("<") and group.endswith(">"):
key = tuple(group[1:-1].split("/"))
if key not in groups:
raise MakefileError(
"Unknown group specifed for filtering %r: %r" % (path, key)
)
selection.update(groups[key])
elif group in samples:
selection.add(group)
else:
raise MakefileError(
"Unknown/Invalid group specifed for filtering %r: %r" % (path, group)
)
return selection
def _update_regions(options, mkfile):
log = logging.getLogger(__name__)
log.info("Validating regions of interest")
mkfile["Project"]["Regions"] = mkfile["Project"].pop("RegionsOfInterest")
if not mkfile["Project"]["Regions"]:
raise MakefileError(
"No regions of interest have been specified; "
"no analyses will be performed."
)
for (name, subdd) in mkfile["Project"]["Regions"].items():
if "Prefix" not in subdd:
raise MakefileError("No genome specified for regions %r" % (name,))
subdd["Name"] = name
subdd["Desc"] = "{Prefix}.{Name}".format(**subdd)
subdd["BED"] = os.path.join(options.regions_root, subdd["Desc"] + ".bed")
subdd["FASTA"] = os.path.join(options.prefix_root, subdd["Prefix"] + ".fasta")
required_files = (
("Regions file", subdd["BED"]),
("Reference sequence", subdd["FASTA"]),
)
for (desc, path) in required_files:
if not os.path.isfile(path):
raise MakefileError(
"%s does not exist for %r:\n Path = %r" % (desc, name, path)
)
# Collects seq. names / validate regions
try:
sequences = _collect_sequence_names(
bed_file=subdd["BED"], fasta_file=subdd["FASTA"]
)
except (IOError, BEDError) as error:
raise MakefileError(
"Error reading regions-of-interest %r:\n%s" % (name, error)
)
subdd["Sequences"] = {None: sequences}
subdd["SubsetFiles"] = {None: ()}
sampledd = subdd["Genotypes"] = {}
for sample_name in mkfile["Project"]["Samples"]:
fasta_file = ".".join((sample_name, subdd["Desc"], "fasta"))
sampledd[sample_name] = os.path.join(
options.destination, mkfile["Project"]["Title"], "genotypes", fasta_file
)
def _collect_fasta_contigs(filename, cache={}):
if filename in cache:
return cache[filename]
if not os.path.exists(filename + ".fai"):
log = logging.getLogger(__name__)
log.info("Indexing %r; this may take a while", filename)
cache[filename] = contigs = FASTA.index_and_collect_contigs(filename)
return contigs
def _collect_sequence_names(bed_file: str, fasta_file: str) -> FrozenSet[str]:
contigs = _collect_fasta_contigs(fasta_file)
sequences = {}
for record in read_bed_file(bed_file, contigs=contigs):
if record.strand is None:
raise MakefileError(
"Found BED record in %r without a strand:\n%s" % (bed_file, record)
)
current = (record.contig, record.strand)
reference = sequences.setdefault(record.name, current)
if current[0] != reference[0]:
raise MakefileError(
"Regions in %r with the same name (%r) "
"are located on different contigs (%r and "
"%r); note that PALEOMIX assumes that "
"regions with the same name constitute "
"parts of a single consecutive sequence, "
"which must therefore be located on one "
"strand of a single sequence. Please "
"rename one or more of these regions to"
"continue." % (bed_file, record.name, current[0], reference[0])
)
elif current[1] != reference[1]:
raise MakefileError(
"Regions in %r with the same name (%r) "
"are located on different strands; note "
"that PALEOMIX assumes that regions with "
"the same name constitute parts of a "
"single consecutive sequence, and that "
"these must therefore be located on the "
"same strand." % (bed_file, record.name)
)
return frozenset(sequences)
def _update_subsets(mkfile, steps):
subsets_by_regions = mkfile["Project"]["Regions"]
def _collect_subsets(roi, subset, path):
if roi not in subsets_by_regions:
raise MakefileError(
"Subset of unknown region (%r) requested at %r" % (roi, path)
)
roi_fname = swap_ext(subsets_by_regions[roi]["BED"], subset + ".names")
if not os.path.isfile(roi_fname):
raise MakefileError(
"Subset file does not exist for Regions Of "
"Interest:\n Region = %r\n Subset = %r\n"
" Path = %r" % (roi, subset, roi_fname)
)
sequences = set()
with open(roi_fname) as handle:
for line in handle:
line = line.strip()
if line and not line.startswith("#"):
sequences.add(line)
known_seqs = subsets_by_regions[roi]["Sequences"][None]
unknown_seqs = sequences - known_seqs
if unknown_seqs:
message = (
"Unknown sequences in subset file:\n"
" File = %r\n Region = %r\n Subset = %r\n"
" Unknown sequence names ="
) % (roi_fname, roi, subset)
unknown_seqs = list(sorted(unknown_seqs))
if len(unknown_seqs) > 5:
unknown_seqs = unknown_seqs[:5] + ["..."]
message = "\n - ".join([message] + unknown_seqs)
raise MakefileError(message)
subsets_by_regions[roi]["SubsetFiles"][subset] = (roi_fname,)
subsets_by_regions[roi]["Sequences"][subset] = frozenset(sequences)
if "phylogeny:examl" in steps:
for (key, subdd) in mkfile["PhylogeneticInference"].items():
for (subkey, roidd) in subdd["RegionsOfInterest"].items():
if subkey not in subsets_by_regions:
message = (
"Unknown regions name in phylogenetic inference:\n"
"\tPath = PhylogeneticInference:%s:RegionsOfInterest"
"\n\tName = %s"
)
raise MakefileError(message % (key, subkey))
roidd["Name"] = subkey
if roidd.get("SubsetRegions") is not None:
path = "PhylogeneticInference:%s:RegionsOfInterest:%s" % (
key,
subkey,
)
_collect_subsets(subkey, roidd["SubsetRegions"], path)
def _update_filtering(mkfile):
samples = mkfile["Project"]["Samples"]
groups = mkfile["Project"]["Groups"]
log = logging.getLogger(__name__)
filtering = {}
for (target, filter_by) in mkfile["Project"]["FilterSingletons"].items():
if target.startswith("<") and target.endswith(">"):
raise MakefileError(
"Singleton-filtering must be specified per "
"sample, not by groups: %r" % (target,)
)
elif target not in samples:
raise MakefileError(
"Unknown/Invalid sample specifed for singleton filtering: %r"
% (target,)
)
elif target in filter_by:
raise MakefileError(
"Filtering singleton in sample using itself as comparison: %r"
% (target,)
)
path = "Project:FilterSingletons:%s" % (target,)
filtering[target] = _select_samples(filter_by, groups, samples, path)
# Implicit inclusion is allowed, since that is useful in some cases,
# where we want to filter a sample based on the group it is a member of
if target in filtering[target]:
# The target itself must be excluded, as including it is invalid
filtering[target] = filtering[target] - set((target,))
log.warning(
"Sample %r is singleton-filtered using a group it is also a member of",
target,
)
if not filtering[target]:
raise MakefileError(
"No samples specified by which to "
"singleton-filter by for %r" % (target,)
)
mkfile["Project"]["FilterSingletons"] = filtering
def _update_homozygous_contigs(mkfile):
"""Treat unspecified values for HomozygousContigs as an empty list, in
order that the user does not need to specify "[]" for empty lists.
"""
for regions in mkfile["Project"]["Regions"].values():
hcontigs = regions["HomozygousContigs"]
for key, contigs in hcontigs.items():
if contigs is None:
hcontigs[key] = []
def _check_bam_sequences(options, mkfile, steps):
"""Check that the BAM files contains the reference sequences found in the
FASTA file, matched by name and length; extra sequences are permitted. This
check is only done if genotyping is to be carried out, to reduce the
overhead of reading the BAM file headers.
"""
if ("genotype" not in steps) and ("genotyping" not in steps):
return
log = logging.getLogger(__name__)
log.info("Validating BAM files")
bam_files = {}
for regions in mkfile["Project"]["Regions"].values():
for sample in mkfile["Project"]["Samples"].values():
filename = os.path.join(
options.samples_root, "%s.%s.bam" % (sample["Name"], regions["Prefix"])
)
if os.path.exists(filename):
bam_files[filename] = _collect_fasta_contigs(regions["FASTA"])
for (filename, contigs) in bam_files.items():
with pysam.AlignmentFile(filename) as handle:
bam_contigs = dict(list(zip(handle.references, handle.lengths)))
for (contig, length) in contigs.items():
bam_length = bam_contigs.get(contig)
if bam_length is None:
message = (
"Reference sequence missing from BAM file; "
"BAM file aligned against different prefix?\n"
" BAM file = %s\n Sequence name = %s"
) % (filename, contig)
raise MakefileError(message)
elif bam_length != length:
message = (
"Length of reference sequence in FASTA differs "
"from length of sequence in BAM file; BAM file "
"aligned against different prefix?\n"
" BAM file = %s\n"
" Length in FASTA = %s\n"
" Length in BAM = %s"
) % (filename, length, bam_length)
raise MakefileError(message)
def _check_sexes(mkfile):
all_contigs = set()
contigs_sexes = set()
regions_sexes = set()
for regions in mkfile["Project"]["Regions"].values():
all_contigs.update(_collect_fasta_contigs(regions["FASTA"]))
for contigs in regions["HomozygousContigs"].values():
contigs_sexes.update(contigs)
current_sexes = set(regions["HomozygousContigs"])
if not regions_sexes:
regions_sexes = current_sexes
elif regions_sexes != current_sexes:
raise MakefileError(
"List of sexes for regions %r does not "
"match other regions" % (regions["Name"],)
)
if not regions_sexes:
raise MakefileError(
"No sexes have been specified in makefile; "
"please list all sample sexes and assosiated "
"homozygous contigs (if any)."
)
for sample in mkfile["Project"]["Samples"].values():
if sample.get("Sex") is None:
raise MakefileError(
"Please specify a sex for sample %r, or "
"'NA' if not applicable." % (sample["Name"])
)
elif sample["Sex"] not in regions_sexes:
sexes = ", ".join(map(repr, regions_sexes))
message = "Sample %r has unknown sex %r; known sexes are %s" % (
sample["Name"],
sample["Sex"],
sexes,
)
raise MakefileError(message)
unknown_contigs = contigs_sexes - all_contigs
if unknown_contigs:
log = logging.getLogger(__name__)
log.warning("Unknown contig(s) in 'HomozygousContigs':")
for name in sorted(unknown_contigs):
log.warning(" - %r", name)
log.warning("Please verify that the list(s) of contigs is correct!")
def _update_and_check_max_read_depth(options, mkfile):
if any(
subdd["VCF_Filter"]["MaxReadDepth"] == "auto"
for subdd in mkfile["Genotyping"].values()
):
log = logging.getLogger(__name__)
log.info("Determinining max-depth from depth-histograms")
for (key, settings) in mkfile["Genotyping"].items():
required_keys = set()
for sample in mkfile["Project"]["Samples"].values():
required_keys.add(sample["Name"])
max_depths = settings["VCF_Filter"]["MaxReadDepth"]
if isinstance(max_depths, dict):
# Extra keys are allowed, to make it easier
# to temporarily disable a sample
missing_keys = required_keys - set(max_depths)
if missing_keys:
missing_keys = "\n - ".join(sorted(missing_keys))
message = (
"MaxReadDepth not specified for the following "
"samples for %r:\n - %s" % (key, missing_keys)
)
raise MakefileError(message)
elif isinstance(max_depths, str):
assert max_depths.lower() == "auto", max_depths
prefix = mkfile["Project"]["Regions"][key]["Prefix"]
settings["VCF_Filter"]["MaxReadDepth"] = _read_max_depths(
options, prefix, required_keys
)
else:
max_depths = dict.fromkeys(required_keys, max_depths)
settings["VCF_Filter"]["MaxReadDepth"] = max_depths
def _read_max_depths(options, prefix, required_keys):
missing = []
max_depths = {}
for sample in required_keys:
fname = "%s.%s.depths" % (sample, prefix)
fpath = os.path.join(options.samples_root, fname)
max_depths[sample] = fpath
if not os.path.exists(fpath):
missing.append((sample, fpath))
if missing:
raise MakefileError(
"Could not determine 'MaxReadDepth' values "
"automatically; .depth files are missing for one "
"or more samples: \n - "
+ "\n - ".join("%s: %s" % item for item in missing)
+ "\n\nEnsure that the .depth files are available, "
"or specify a value for 'MaxReadDepth' manually."
)
for sample, fpath in max_depths.items():
max_depths[sample] = _read_max_depth(fpath, prefix, sample)
return max_depths
def _read_max_depth(filename, prefix, sample):
if filename in _DEPTHS_CACHE:
return _DEPTHS_CACHE[filename]
max_depth = None
max_depths = {}
try:
with open(filename) as handle:
for row in parse_padded_table(handle):
if (
row["Name"] != "*"
and row["Sample"] == "*"
and row["Library"] == "*"
and row["Contig"] == "*"
):
if row["Name"] in max_depths:
raise MakefileError(
"Depth histogram %r contains "
"multiple 'MaxDepth' records for "
"sample %r; please rebuild!" % (filename, row["Name"])
)
max_depths[row["Name"]] = row["MaxDepth"]
except (OSError, IOError) as error:
raise MakefileError(
"Error reading depth-histogram (%s): %s" % (filename, error)
)
log = logging.getLogger(__name__)
if sample in max_depths:
max_depth = max_depths[sample]
else:
name_counts = {}
name_mapping = {}
for cand_sample in max_depths:
name = cand_sample.split(".", 1)[0]
name_mapping[name] = cand_sample
name_counts[name] = name_counts.get(name, 0) + 1
if name_mapping.get(sample) == 1:
# Sample name (with some extensions) found
# This is typical if 'paleomix depths' has been run manually.
max_depth = max_depths[name_mapping[sample]]
elif len(max_depths) == 1:
# Just one sampel in the depth histogram; even though it does not
# match, we assuem that this is the correct table. This is because
# manually generating files / renaming files would otherwise cause
# failure when using 'MaxDepth: auto'.
((cand_sample, max_depth),) = max_depths.items()
log.warning(
"Name in depths file not as expected; found %r, not %r:",
cand_sample,
sample,
)
if max_depth is None:
raise MakefileError(
"MaxDepth for %r not found in depth-histogram: %r" % (sample, filename)
)
elif max_depth == "NA":
raise MakefileError(
"MaxDepth is not calculated for sample %r; "
"cannot determine MaxDepth values automatically." % (filename,)
)
elif not max_depth.isdigit():
raise MakefileError(
"MaxDepth is not a valid for sample %r in %r; "
"expected integer, found %r." % (sample, filename, max_depth)
)
max_depth = int(max_depth)
log.info("%s.%s = %i", sample, prefix, max_depth)
_DEPTHS_CACHE[filename] = max_depth
return max_depth
_DEPTHS_CACHE = {}
def _check_indels_and_msa(mkfile):
msa = mkfile["MultipleSequenceAlignment"]
regions = mkfile["Project"]["Regions"]
for (name, subdd) in regions.items():
msa_enabled = msa[name]["Enabled"]
if subdd["IncludeIndels"] and not msa_enabled:
raise MakefileError(
"Regions %r includes indels, but MSA is disabled!" % (name,)
)
def _update_sample_sets(mkfile):
samples = mkfile["Project"]["Samples"]
groups = mkfile["Project"]["Groups"]
for (key, subdd) in mkfile["PhylogeneticInference"].items():
subdd["ExcludeSamples"] = _select_samples(
subdd["ExcludeSamples"],
groups,
samples,
"PhylogeneticInference:%s:ExcludeSamples" % (key,),
)
# Replace None with an empty list, to simplify code using this value
root_trees_on = subdd["RootTreesOn"] or ()
subdd["RootTreesOn"] = _select_samples(
root_trees_on,
groups,
samples,
"PhylogeneticInference:%s:RootTreesOn" % (key,),
)
def _update_genotyping(mkfile):
genotyping = mkfile["Genotyping"]
defaults = genotyping.pop("Defaults")
defaults.setdefault("Padding", 5)
defaults["VCF_Filter"].setdefault("MaxReadDepth", 0)
for (key, subdd) in genotyping.items():
if subdd.get("GenotypeEntirePrefix"):
message = (
"GenotypeEntirePrefix is only allowed for prefixes "
"using default parameters, but is set for %r" % (key,)
)
raise MakefileError(message)
for key in mkfile["Project"]["Regions"]:
genotyping[key] = fill_dict(genotyping.get(key, {}), defaults)
regions = set(genotyping)
unknown_regions = regions - set(mkfile["Project"]["Regions"])
if unknown_regions:
raise MakefileError(
"Unknown Regions of Interest in Genotyping: %s"
% (", ".join(unknown_regions),)
)
def _update_msa(mkfile):
msa = mkfile["MultipleSequenceAlignment"]
defaults = msa.pop("Defaults")
defaults.setdefault("Program", "MAFFT")
defaults["MAFFT"].setdefault("Algorithm", "MAFFT")
for key in mkfile["Project"]["Regions"]:
msa[key] = fill_dict(msa.get(key, {}), defaults)
unknown_regions = set(msa) - set(mkfile["Project"]["Regions"])
if unknown_regions:
raise MakefileError(
"Unknown Regions of Interest in Genotyping: %s"
% (", ".join(unknown_regions),)
)
# Recursive definition of sample tree
_VALIDATION_SUBSAMPLE_KEY = And(StringStartsWith("<"), StringEndsWith(">"))
_VALIDATION_SAMPLES_KEY = And(IsStr, Not(_VALIDATION_SUBSAMPLE_KEY))
_VALIDATION_SAMPLES = {
_VALIDATION_SAMPLES_KEY: {
"GenotypingMethod": RemovedOption(),
"SpeciesName": RemovedOption(),
"CommonName": RemovedOption(),
"Sex": IsStr(),
"Gender": RemovedOption(),
}
}
_VALIDATION_SAMPLES[_VALIDATION_SUBSAMPLE_KEY] = _VALIDATION_SAMPLES
# Genotyping settings; note that explicit lists must not be used here, to allow
# proper inheritance of default values. Use IsListOf instead.
_VALIDATION_GENOTYPES = {
"Padding": IsUnsignedInt,
"GenotypeEntirePrefix": IsBoolean(default=False),
"MPileup": {StringStartsWith("-"): Or(IsInt, IsStr, IsNone)},
"BCFTools": {
StringStartsWith("-"): Or(IsInt, IsStr, IsNone),
# BCFTools used '-g' by itself to indicate the SNP caller, but now -g has a
# different meaning and takes values. Check forces updates to old makefile.
"-g": Or(IsInt, IsStr),
},
"Random": {"--min-distance-to-indels": IsUnsignedInt},
"VCF_Filter": {
"MaxReadDepth": Or(
IsUnsignedInt, IsDictOf(IsStr, IsUnsignedInt), StringIn(("auto",))
),
"--keep-ambigious-genotypes": IsNone,
"--min-quality": IsUnsignedInt,
"--min-allele-frequency": RemovedOption,
"--min-mapping-quality": IsUnsignedInt,
"--min-read-depth": IsUnsignedInt,
"--max-read-depth": IsUnsignedInt,
"--min-num-alt-bases": IsUnsignedInt,
"--min-distance-to-indels": IsUnsignedInt,
"--min-distance-between-indels": IsUnsignedInt,
"--min-strand-bias": IsFloat,
"--min-baseq-bias": IsFloat,
"--min-mapq-bias": IsFloat,
"--min-end-distance-bias": IsFloat,
},
}
_VALIDATION_MSA = {
"Enabled": IsBoolean(default=True),
"Program": StringIn(("mafft",)), # TODO: Add support for other programs
"MAFFT": {
"Algorithm": StringIn(
(
"mafft",
"auto",
"FFT-NS-1",
"FFT-NS-2",
"FFT-NS-i",
"NW-INS-i",
"L-INS-i",
"E-INS-i",
"G-INS-i",
)
),
StringStartsWith("-"): CLI_PARAMETERS,
},
}
_VALIDATION = {
"Project": {
"Title": IsStr(default="Untitled"),
"Samples": _VALIDATION_SAMPLES,
"RegionsOfInterest": {
IsStr: {
"Prefix": IsStr(default=REQUIRED_VALUE),
"Realigned": RemovedOption(),
"ProteinCoding": IsBoolean(default=False),
"IncludeIndels": IsBoolean(default=True),
"HomozygousContigs": {
IsStr: Or(IsNone, IsListOf(IsStr)),
# The sex 'NA' defaults to no homozygous chromosomes
"NA": Or(IsNone, IsListOf(IsStr), default=[]),
},
}
},
"FilterSingletons": {IsStr: [IsStr]},
},
"Genotyping": {"Defaults": _VALIDATION_GENOTYPES, IsStr: _VALIDATION_GENOTYPES},
"MultipleSequenceAlignment": {"Defaults": _VALIDATION_MSA, IsStr: _VALIDATION_MSA},
"PhylogeneticInference": {
IsStr: {
# Which program to use; TODO: Add support for other programs
"Program": StringIn(("examl",), default="examl"),
# Exclude one or more samples from the phylogeny
"ExcludeSamples": [IsStr],
# Which samples to root the final trees on / or midpoint rooting
"RootTreesOn": [IsStr],
# Create a tree per gene, for each region of interest,
# or create a supermatrix tree from all regions specified.
"PerGeneTrees": IsBoolean(default=False),
# Selection of regions of interest / settings per region
"RegionsOfInterest": {
IsStr: {
"Partitions": Or(
And(IsStr, ValuesSubsetOf("123456789X")),
ValueIn([False]),
default=REQUIRED_VALUE,
),
"SubsetRegions": Or(IsStr, IsNone, default=None),
}
},
"SubsetRegions": {IsStr: IsStr},
"ExaML": {
"Bootstraps": IsUnsignedInt(default=100),
"Replicates": IsUnsignedInt(default=1),
"Model": StringIn(("GAMMA", "PSR"), default="gamma"),
},
}
},
}
|
MikkelSchubert/paleomix
|
paleomix/pipelines/phylo/makefile.py
|
Python
|
mit
| 29,283
|
[
"pysam"
] |
9e0d2e173a80599d1422c54643ddac74976bf6d5ab761abc9858472fbcb8e644
|
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
from __future__ import division, print_function, absolute_import
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import numpy
from scipy.lib.six import callable
from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,
vectorize, asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from .linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search)
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.alltrue(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess, hess_inv : ndarray
Values of objective function, Jacobian, Hessian or its inverse (if
available). The Hessians may be approximations, see the documentation
of the function in question.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in self.items()])
else:
return self.__class__.__name__ + "()"
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in Scipy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(numpy.abs(x))
elif ord == -Inf:
return numpy.amin(numpy.abs(x))
else:
return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
"""
x = atleast_1d(x)
H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
400 * x[1:-1] * p[2:])
Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e. ``f(x,*args)``.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
xtol : float, optional
Relative error in xopt acceptable for convergence.
ftol : number, optional
Relative error in func(xopt) acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options for the Nelder-Mead algorithm are:
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
This function is called by the `minimize` function with
`method=Nelder-Mead`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
one2np1 = list(range(1, N + 1))
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
fsim = numpy.zeros((N + 1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0, N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
f = func(y)
fsim[k + 1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xtol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= ftol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None):
"""
See ``approx_fprime``. An optional initial function value arg is added.
"""
if f0 is None:
f0 = f(*((xk,) + args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
def approx_fprime(xk, f, epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : array_like
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`.
\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(np.float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
return _approx_fprime_helper(xk, f, epsilon, args=args)
def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable func(x0,*args)
Function whose derivative is to be checked.
grad : callable grad(x0, *args)
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08.
Returns
-------
err : float
The square root of the sum of squares (i.e. the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x): return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x): return [2 * x[0], -1.5 * x[1]**2]
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
"""
step = kwargs.pop('epsilon', _epsilon)
if kwargs:
raise ValueError("Unknown keyword arguments: %r" %
(list(kwargs.keys()),))
return sqrt(sum((grad(x0, *args) -
approx_fprime(x0, func, step, *args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
f2 = fprime(*((x0 + epsilon*p,) + args))
f1 = fprime(*((x0,) + args))
return (f2 - f1) / epsilon
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is None:
# line search failed: try different one.
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
if ret[0] is None:
raise _LineSearchError()
return ret
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
`OptimizeResult` at each iteration. Only returned if retall is True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options for the BFGS algorithm are:
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
This function is called by the `minimize` function with `method=BFGS`.
It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N, dtype=int)
Hk = I
old_fval = f(x0)
old_old_fval = None
xk = x0
if retall:
allvecs = [x0]
sk = [2 * gtol]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -numpy.dot(Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not numpy.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (numpy.dot(yk, sk))
except ZeroDivisionError:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if isinf(rhok): # this is patch for numpy
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] *
sk[numpy.newaxis, :])
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable, ``f(x, *args)``
Objective function to be minimized. Here `x` must be a 1-D array of
the variables that are to be changed in the search for a minimum, and
`args` are the other (fixed) parameters of `f`.
x0 : ndarray
A user-supplied initial estimate of `xopt`, the optimal value of `x`.
It must be a 1-D array of values.
fprime : callable, ``fprime(x, *args)``, optional
A function that returns the gradient of `f` at `x`. Here `x` and `args`
are as described above for `f`. The returned value must be a 1-D array.
Defaults to None, in which case the gradient is approximated
numerically (see `epsilon`, below).
args : tuple, optional
Parameter values passed to `f` and `fprime`. Must be supplied whenever
additional fixed parameters are needed to completely specify the
functions `f` and `fprime`.
gtol : float, optional
Stop when the norm of the gradient is less than `gtol`.
norm : float, optional
Order to use for the norm of the gradient
(``-np.Inf`` is min, ``np.Inf`` is max).
epsilon : float or ndarray, optional
Step size(s) to use when `fprime` is approximated numerically. Can be a
scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
floating point machine precision. Usually ``sqrt(eps)`` is about
1.5e-8.
maxiter : int, optional
Maximum number of iterations to perform. Default is ``200 * len(x0)``.
full_output : bool, optional
If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
addition to `xopt`. See the Returns section below for additional
information on optional return values.
disp : bool, optional
If True, return a convergence message, followed by `xopt`.
retall : bool, optional
If True, add to the returned values the results of each iteration.
callback : callable, optional
An optional user-supplied function, called after each iteration.
Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float, optional
Minimum value found, f(xopt). Only returned if `full_output` is True.
func_calls : int, optional
The number of function_calls made. Only returned if `full_output`
is True.
grad_calls : int, optional
The number of gradient calls made. Only returned if `full_output` is
True.
warnflag : int, optional
Integer value with warning status, only returned if `full_output` is
True.
0 : Success.
1 : The maximum number of iterations was exceeded.
2 : Gradient and/or function calls were not changing. May indicate
that precision was lost, i.e., the routine did not converge.
allvecs : list of ndarray, optional
List of arrays, containing the results at each iteration.
Only returned if `retall` is True.
See Also
--------
minimize : common interface to all `scipy.optimize` algorithms for
unconstrained and constrained minimization of multivariate
functions. It provides an alternative way to call
``fmin_cg``, by specifying ``method='CG'``.
Notes
-----
This conjugate gradient algorithm is based on that of Polak and Ribiere
[1]_.
Conjugate gradient methods tend to work better when:
1. `f` has a unique global minimizing point, and no local minima or
other stationary points,
2. `f` is, at least locally, reasonably well approximated by a
quadratic function of the variables,
3. `f` is continuous and has a continuous gradient,
4. `fprime` is not too large, e.g., has a norm less than 1000,
5. The initial guess, `x0`, is reasonably close to `f` 's global
minimizing point, `xopt`.
References
----------
.. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
Examples
--------
Example 1: seek the minimum value of the expression
``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
of the parameters and an initial guess ``(u, v) = (0, 0)``.
>>> args = (2, 3, 7, 8, 9, 10) # parameter values
>>> def f(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
>>> def gradf(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... gu = 2*a*u + b*v + d # u-component of the gradient
... gv = b*u + 2*c*v + e # v-component of the gradient
... return np.asarray((gu, gv))
>>> x0 = np.asarray((0, 0)) # Initial guess.
>>> from scipy import optimize
>>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
>>> print 'res1 = ', res1
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 2
Function evaluations: 5
Gradient evaluations: 5
res1 = [-1.80851064 -0.25531915]
Example 2: solve the same problem using the `minimize` function.
(This `myopts` dictionary shows all of the available options,
although in practice only non-default values would be needed.
The returned value will be a dictionary.)
>>> opts = {'maxiter' : None, # default value.
... 'disp' : True, # non-default value.
... 'gtol' : 1e-5, # default value.
... 'norm' : np.inf, # default value.
... 'eps' : 1.4901161193847656e-08} # default value.
>>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
... method='CG', options=opts)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 2
Function evaluations: 5
Gradient evaluations: 5
>>> res2.x # minimum found
array([-1.80851064 -0.25531915])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options for the conjugate gradient algorithm are:
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
This function is called by the `minimize` function with `method=CG`. It
is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
xk = x0
old_fval = f(xk)
old_old_fval = None
if retall:
allvecs = [xk]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xk = xk + alpha_k * pk
if retall:
allvecs.append(xk)
if gfkp1 is None:
gfkp1 = myfprime(xk)
yk = gfkp1 - gfk
beta_k = max(0, numpy.dot(yk, gfkp1) / deltak)
pk = -gfkp1 + beta_k * pk
gfk = gfkp1
gnorm = vecnorm(gfk, ord=norm)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Options for the Newton-CG algorithm are:
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
This function is called by the `minimize` function with
`method=Newton-CG`. It is not supposed to be called directly.
Also note that the `jac` parameter (Jacobian) is required.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Newton-CG method')
f = fun
fprime = jac
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
xtol = len(x0) * avextol
update = [2 * xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
old_fval = f(x0)
old_old_fval = None
float64eps = numpy.finfo(numpy.float64).eps
warnflag = 0
while (numpy.add.reduce(numpy.abs(update)) > xtol) and (k < maxiter):
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(numpy.abs(b))
eta = numpy.min([0.5, numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,) + args)
hcalls = hcalls + 1
while numpy.add.reduce(numpy.abs(ri)) > termcond:
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi, Ap)
if 0 <= curv <= 3 * float64eps:
break
elif curv < 0:
if (i > 0):
break
else:
# fall back to steepest descent direction
xsupi = dri0 / (-curv) * b
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0],
nhev=hcalls, status=warnflag,
success=(warnflag == 0), message=msg, x=xk)
if retall:
result['allvecs'] = allvecs
return result
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
"""
options = {'xatol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xatol=1e-5, maxiter=500, disp=0,
**unknown_options):
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5 * (3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean * (b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if disp > 2:
print (" ")
print (header)
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
golden = 1
# Check for parabolic fit
if numpy.abs(e) > tol1:
golden = 0
r = (xf - nfc) * (fx - ffulc)
q = (xf - fulc) * (fx - fnfc)
p = (xf - fulc) * q - (xf - nfc) * r
q = 2.0 * (q - r)
if q > 0.0:
p = -p
q = numpy.abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and
(p < q * (b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = numpy.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1 * si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si * numpy.max([numpy.abs(rat), tol1])
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if num >= maxfun:
flag = 1
break
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xatol, disp)
result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.'}.get(flag, ''),
x=xf, nfev=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
# need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be "
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
# set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#we are making NO CHANGES in this
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls = 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol * numpy.abs(x) + _mintol
tol2 = 2.0 * tol1
xmid = 0.5 * (a + b)
# check for convergence
if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
break
if (numpy.abs(deltax) <= tol1):
if (x >= xmid):
deltax = a - x # do a golden section step
else:
deltax = b - x
rat = _cg * deltax
else: # do a parabolic step
tmp1 = (x - w) * (fx - fv)
tmp2 = (x - v) * (fx - fw)
p = (x - v) * tmp2 - (x - w) * tmp1
tmp2 = 2.0 * (tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = numpy.abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
(numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))):
rat = p * 1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0:
rat = tol1
else:
rat = -tol1
else:
if (x >= xmid):
deltax = a - x # if it's not do a golden section step
else:
deltax = b - x
rat = _cg * deltax
if (numpy.abs(rat) < tol1): # update by at least tol1
if rat >= 0:
u = x + tol1
else:
u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x):
a = u
else:
b = u
if (fu <= fw) or (w == x):
v = w
w = u
fv = fw
fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u
fv = fu
else:
if (u >= x):
a = x
else:
b = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args
Additional arguments (if present).
brack : tuple
Triple (a,b,c) where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,c)
then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that the obtained solution will satisfy a<=x<=c.
tol : float
Stop if between iteration change is less than `tol`.
full_output : bool
If True, return all output args (xmin, fval, iter,
funcalls).
maxiter : int
Maximum number of iterations in solution.
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(),
xtol=1.48e-8, maxiter=500,
**unknown_options):
_check_unknown_options(unknown_options)
tol = xtol
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev)
def golden(func, args=(), brack=None, tol=_epsilon, full_output=0):
"""
Return the minimum of a function of one variable.
Given a function of one variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple
Additional arguments (if present), passed to func.
brack : tuple
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float
x tolerance stop criterion
full_output : bool
If True, return optional outputs.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
"""
options = {'xtol': tol}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, **unknown_options):
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (numpy.abs(xc - xb) > numpy.abs(xb - xa)):
x1 = xb
x2 = xb + _gC * (xc - xb)
else:
x2 = xb
x1 = xb - _gC * (xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
while (numpy.abs(x3 - x0) > tol * (numpy.abs(x1) + numpy.abs(x2))):
if (f2 < f1):
x0 = x1
x1 = x2
x2 = _gR * x1 + _gC * x3
f1 = f2
f2 = func(*((x2,) + args))
else:
x3 = x2
x2 = x1
x1 = _gR * x2 + _gC * x0
f2 = f1
f1 = func(*((x1,) + args))
funcalls += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
return OptimizeResult(fun=fval, nfev=funcalls, x=xmin)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = xb + _gold * (xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if numpy.abs(val) < _verysmall_num:
denom = 2.0 * _verysmall_num
else:
denom = 2.0 * val
w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
wlim = xb + grow_limit * (xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb
xb = w
fa = fb
fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc
xc = w
w = xc + _gold * (xc - xb)
fb = fc
fc = fw
fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb
xb = xc
xc = w
fa = fb
fb = fc
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method. This method
only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial direction set.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' `method` in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop
merely iterates over the inner loop. The inner loop minimizes
over each current direction in the direction set. At the end
of the inner loop, if certain conditions are met, the direction
that gave the largest decrease is dropped and replaced with
the difference between the current estiamted x and the estimated
x from the beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options for the Powell algorithm are:
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
This function is called by the `minimize` function with
`method=Powell`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
if maxiter is None:
maxiter = N * 1000
if maxfun is None:
maxfun = N * 1000
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0
ilist = list(range(N))
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls[0])
x = squeeze(x)
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
if flag == 1:
if disp:
print("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e. computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take the initial guess
as positional argument, and take `args`, `full_output` and `disp`
as keyword arguments. Use None if no "polishing" function is to be
used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages.
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, `i.e.`, ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
anneal : Another approach to seeking the global minimum of
multivariate, multimodal functions.
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the gobal minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, `i.e.`, to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` and `full_output` as keyword arguments, otherwise an error
will be raised.
`brute` assumes that the `finish` function returns a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing value
of the argument, ``Jmin`` is the minimum value of the objective function,
"..." may be some other returned values (which are not used by `brute`),
and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
def _scalarfunc(*params):
params = squeeze(asarray(params))
return func(params, *args)
vecfunc = vectorize(_scalarfunc)
grid = mgrid[lrange]
if (N == 1):
grid = (grid,)
Jout = vecfunc(*grid)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = zeros(N, int)
xmin = zeros(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
vals = finish(func, xmin, args=args, full_output=1, disp=disp)
xmin = vals[0]
Jmin = vals[1]
if vals[-1] > 0:
if disp:
print("Warning: Final optimization did not succeed")
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
def show_options(solver=None, method=None):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', or 'linprog'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g. 'BFGS' for
'minimize').
Notes
-----
**Minimize options**
*BFGS* options:
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
*Nelder-Mead* options:
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxfev : int
Maximum number of function evaluations to make.
*Newton-CG* options:
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
*CG* options:
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
*Powell* options:
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxfev : int
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
*Anneal* options:
ftol : float
Relative error in ``fun(x)`` acceptable for convergence.
schedule : str
Annealing schedule to use. One of: 'fast', 'cauchy' or
'boltzmann'.
T0 : float
Initial Temperature (estimated as 1.2 times the largest
cost-function deviation over random points in the range).
Tf : float
Final goal temperature.
maxfev : int
Maximum number of function evaluations to make.
maxaccept : int
Maximum changes to accept.
boltzmann : float
Boltzmann constant in acceptance test (increase for less
stringent test at each temperature).
learn_rate : float
Scale constant for adjusting guesses.
quench, m, n : float
Parameters to alter fast_sa schedule.
lower, upper : float or ndarray
Lower and upper bounds on `x`.
dwell : int
The number of times to search the space at each temperature.
*L-BFGS-B* options:
ftol : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
maxcor : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms
in an approximation to it.)
maxfun : int
Maximum number of function evaluations.
maxiter : int
Maximum number of iterations.
*TNC* options:
ftol : float
Precision goal for the value of f in the stoping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
gtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None
offset : float
Value to subtract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxiter : int
Maximum number of function evaluation. if None, `maxiter` is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. if < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
minfev : float
Minimum function value estimate. Defaults to 0.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
*COBYLA* options:
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
rhobeg : float
Reasonable initial changes to the variables.
maxfev : int
Maximum number of function evaluations.
catol : float
Absolute tolerance for constraint violations (default: 1e-6).
*SLSQP* options:
ftol : float
Precision goal for the value of f in the stopping criterion.
eps : float
Step size used for numerical approximation of the jacobian.
maxiter : int
Maximum number of iterations.
*dogleg* options:
initial_trust_radius : float
Initial trust-region radius.
max_trust_radius : float
Maximum value of the trust-region radius. No steps that are longer
than this value will be proposed.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
*trust-ncg* options:
See dogleg options.
**minimize_scalar options**
*brent* options:
xtol : float
Relative error in solution `xopt` acceptable for convergence.
*bounded* options:
xatol : float
Absolute error in solution `xopt` acceptable for convergence.
*golden* options:
xtol : float
Relative error in solution `xopt` acceptable for convergence.
**root options**
*hybrd* options:
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between
two consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : sequence
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float
A suitable step length for the forward-difference approximation
of the Jacobian (for ``fprime=None``). If `epsfcn` is less than
the machine precision, it is assumed that the relative errors
in the functions are of the order of the machine precision.
factor : float
A parameter determining the initial step bound (``factor * ||
diag * x||``). Should be in the interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
*LM* options:
col_deriv : bool
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float
Relative error desired in the sum of squares.
xtol : float
Relative error desired in the approximate solution.
gtol : float
Orthogonality desired between the function vector and the columns
of the Jacobian.
maxiter : int
The maximum number of calls to the function. If zero, then
100*(N+1) is the maximum where N is the number of elements in x0.
epsfcn : float
A suitable step length for the forward-difference approximation of
the Jacobian (for Dfun=None). If epsfcn is less than the machine
precision, it is assumed that the relative errors in the functions
are of the order of the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the variables.
*Broyden1* options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD
components.
Extra parameters:
- ``to_retain`: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
*Broyden2* options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD
components.
Extra parameters:
- ``to_retain`: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
*Anderson* options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
*LinearMixing* options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
*DiagBroyden* options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
*ExcitingMixing* options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
*Krylov* options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same
interface as the iterative solvers in
`scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will
be called as ``update(x, f)`` after each nonlinear step,
with ``x`` giving the current point, and ``f`` the current
function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the "inner" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear
iterations.
See `scipy.sparse.linalg.lgmres` for details.
**linprog options**
*simplex* options:
maxiter : int, optional
Maximum number of iterations to make.
tol : float, optional
The tolerance which determines when the Phase 1 objective is
sufficiently close to zero to be considered a basic feasible
solution or when the Phase 2 objective coefficients are close
enough to positive for the objective to be considered optimal.
bland : bool, optional
If True, choose pivots using Bland's rule. In problems which
fail to converge due to cycling, using Bland's rule can provide
convergence at the expense of a less optimal path about the simplex.
"""
import textwrap
if solver is None:
print("\nminimize")
print("--------\n")
show_options('minimize')
print("\nminimize_scalar")
print("---------------\n")
show_options('minimize_scalar')
print("\nroot")
print("----\n")
show_options('root')
print('\nlinprog')
print('-------\n')
show_options('linprog')
return
solver = solver.lower()
if solver not in ('minimize', 'minimize_scalar', 'root', 'linprog'):
raise ValueError('Unknown solver.')
solvers_doc = [s.strip()
for s in show_options.__doc__.split(' **')[1:]]
solver_doc = [s for s in solvers_doc
if s.lower().startswith(solver + " options")]
if method is None:
doc = [' **'] + solver_doc
else:
doc = solver_doc[0].split(' *')[1:]
doc = [' *'] + [s for s in doc if s.lower().startswith(method.lower())]
print(textwrap.dedent(''.join(doc)).rstrip())
return
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = fmin(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print()
print("Powell Direction Set Method")
print("===========================")
start = time.time()
x = fmin_powell(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print()
print("Nonlinear CG")
print("============")
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print(x)
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print()
print("BFGS Quasi-Newton")
print("=================")
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print()
print("BFGS approximate gradient")
print("=========================")
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print(x)
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print()
print("Newton-CG with Hessian product")
print("==============================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print()
print("Newton-CG with full Hessian")
print("===========================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print()
print("\nMinimizing the Rosenbrock function of order 3\n")
print(" Algorithm \t\t\t Seconds")
print("===========\t\t\t =========")
for k in range(len(algor)):
print(algor[k], "\t -- ", times[k])
if __name__ == "__main__":
main()
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/scipy/optimize/optimize.py
|
Python
|
apache-2.0
| 116,124
|
[
"Gaussian"
] |
984fea8ac17cee5425a39aa88eee83c6bc308a870487ee6ab07dc9f950aa40f5
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import TestRunner
test = TestRunner(__file__).test
|
demis001/scikit-bio
|
skbio/alignment/_lib/__init__.py
|
Python
|
bsd-3-clause
| 487
|
[
"scikit-bio"
] |
0ae76f7accb8293f082864ff0a1ed14333dfde95fbec83a0bbca236f6a21e698
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_iso8601,
qualities,
)
class SRGSSRIE(InfoExtractor):
_VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)'
_ERRORS = {
'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.',
'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.',
# 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.',
'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.',
'LEGAL': 'The video cannot be transmitted for legal reasons.',
'STARTDATE': 'This video is not yet available. Please try again later.',
}
def get_media_data(self, bu, media_type, media_id):
media_data = self._download_json(
'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id),
media_id)[media_type.capitalize()]
if media_data.get('block') and media_data['block'] in self._ERRORS:
raise ExtractorError('%s said: %s' % (
self.IE_NAME, self._ERRORS[media_data['block']]), expected=True)
return media_data
def _real_extract(self, url):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
if bu == 'rts':
return self.url_result('rts:%s' % media_id, 'RTS')
media_data = self.get_media_data(bu, media_type, media_id)
metadata = media_data['AssetMetadatas']['AssetMetadata'][0]
title = metadata['title']
description = metadata.get('description')
created_date = media_data.get('createdDate') or metadata.get('createdDate')
timestamp = parse_iso8601(created_date)
thumbnails = [{
'id': image.get('id'),
'url': image['url'],
} for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])]
preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD'])
formats = []
for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []):
protocol = source.get('@protocol')
for asset in source['url']:
asset_url = asset['text']
quality = asset['@quality']
format_id = '%s-%s' % (protocol, quality)
if protocol == 'HTTP-HDS':
formats.extend(self._extract_f4m_formats(
asset_url + '?hdcore=3.4.0', media_id,
f4m_id=format_id, fatal=False))
elif protocol == 'HTTP-HLS':
formats.extend(self._extract_m3u8_formats(
asset_url, media_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False))
else:
ext = None
if protocol == 'RTMP':
ext = self._search_regex(r'([a-z0-9]+):[^/]+', asset_url, 'ext')
formats.append({
'format_id': format_id,
'url': asset_url,
'preference': preference(quality),
'ext': ext,
})
self._sort_formats(formats)
return {
'id': media_id,
'title': title,
'description': description,
'timestamp': timestamp,
'thumbnails': thumbnails,
'formats': formats,
}
class SRGSSRPlayIE(InfoExtractor):
IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites'
_VALID_URL = r'https?://(?:(?:www|play)\.)?(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/[^/]+/(?P<type>video|audio)/[^?]+\?id=(?P<id>[0-9a-f\-]{36}|\d+)'
_TESTS = [{
'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'md5': '4cd93523723beff51bb4bee974ee238d',
'info_dict': {
'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'ext': 'm4v',
'upload_date': '20130701',
'title': 'Snowden beantragt Asyl in Russland',
'timestamp': 1372713995,
}
}, {
# No Speichern (Save) button
'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
'md5': '0a274ce38fda48c53c01890651985bc6',
'info_dict': {
'id': '677f5829-e473-4823-ac83-a1087fe97faa',
'ext': 'flv',
'upload_date': '20130710',
'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
'description': 'md5:88604432b60d5a38787f152dec89cd56',
'timestamp': 1373493600,
},
}, {
'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc',
'info_dict': {
'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc',
'ext': 'mp3',
'upload_date': '20151013',
'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem',
'timestamp': 1444750398,
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
'md5': '67a2a9ae4e8e62a68d0e9820cc9782df',
'info_dict': {
'id': '6348260',
'display_id': '6348260',
'ext': 'mp4',
'duration': 1796,
'title': 'Le 19h30',
'description': '',
'uploader': '19h30',
'upload_date': '20141201',
'timestamp': 1417458600,
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
}
}]
def _real_extract(self, url):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
# other info can be extracted from url + '&layout=json'
return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')
|
akirk/youtube-dl
|
youtube_dl/extractor/srgssr.py
|
Python
|
unlicense
| 6,606
|
[
"Jaguar"
] |
a836d21c725b05d392e72ceb481a3009568e58e3f3884cc482397f487a4e0a72
|
#!/usr/bin/env python
"""
Start a given production
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s prodID' % Script.scriptName,
'Arguments:',
' prodID: Production ID'
]))
Script.parseCommandLine()
from DIRAC.ProductionSystem.Client.ProductionClient import ProductionClient
args = Script.getPositionalArgs()
if (len(args) != 1):
Script.showHelp()
# get arguments
prodID = args[0]
prodClient = ProductionClient()
res = prodClient.setProductionStatus(prodID, 'Active')
if res['OK']:
DIRAC.gLogger.notice('Production %s successully started' % prodID)
else:
DIRAC.gLogger.error(res['Message'])
DIRAC.exit(-1)
DIRAC.exit(0)
|
fstagni/DIRAC
|
ProductionSystem/scripts/dirac-prod-start.py
|
Python
|
gpl-3.0
| 908
|
[
"DIRAC"
] |
4b549f7db67c7027394e8ccf47a1cc0f3c9456bba035d2aeb5651e03c0c12bb1
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import logging
import numpy as np
import itertools
from scipy.spatial import ConvexHull
from pymatgen.analysis.pourbaix.entry import MultiEntry, ion_or_solid_comp_object
from pymatgen.core.periodic_table import Element
from pymatgen.core.composition import Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
from pymatgen.analysis.phase_diagram import PhaseDiagram
"""
Module containing analysis classes which compute a pourbaix diagram given a
target compound/element.
"""
from six.moves import zip
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.0"
__maintainer__ = "Sai Jayaraman"
__credits__ = "Arunima Singh, Joseph Montoya"
__email__ = "sjayaram@mit.edu"
__status__ = "Development"
__date__ = "Nov 1, 2012"
logger = logging.getLogger(__name__)
PREFAC = 0.0591
MU_H2O = -2.4583
elements_HO = {Element('H'), Element('O')}
# TODO: There's a lot of functionality here that diverges
# based on whether or not the pbx diagram is multielement
# or not. Could be a more elegant way to
# treat the two distinct modes.
class PourbaixDiagram(object):
"""
Class to create a Pourbaix diagram from entries
Args:
entries [Entry]: Entries list containing both Solids and Ions
comp_dict {str: float}: Dictionary of compositions, defaults to
equal parts of each elements
conc_dict {str: float}: Dictionary of ion concentrations, defaults
to 1e-6 for each element
filter_multielement (bool): applying this filter to a multi-
element pourbaix diagram makes generates it a bit more
efficiently by filtering the entries used to generate
the hull. This breaks some of the functionality of
the analyzer, though, so use with caution.
"""
def __init__(self, entries, comp_dict=None, conc_dict=None,
filter_multielement=False):
# Get non-OH elements
pbx_elts = set(itertools.chain.from_iterable(
[entry.composition.elements for entry in entries]))
pbx_elts = list(pbx_elts - elements_HO)
# Set default conc/comp dicts
if not comp_dict:
comp_dict = {elt.symbol : 1. / len(pbx_elts) for elt in pbx_elts}
if not conc_dict:
conc_dict = {elt.symbol : 1e-6 for elt in pbx_elts}
self._elt_comp = comp_dict
self.pourbaix_elements = pbx_elts
solid_entries = [entry for entry in entries
if entry.phase_type == "Solid"]
ion_entries = [entry for entry in entries
if entry.phase_type == "Ion"]
for entry in ion_entries:
ion_elts = list(set(entry.composition.elements) - elements_HO)
if len(ion_elts) != 1:
raise ValueError("Elemental concentration not compatible "
"with multi-element ions")
entry.conc = conc_dict[ion_elts[0].symbol]
if not len(solid_entries + ion_entries) == len(entries):
raise ValueError("All supplied entries must have a phase type of "
"either \"Solid\" or \"Ion\"")
self._unprocessed_entries = entries
if len(comp_dict) > 1:
self._multielement = True
if filter_multielement:
# Add two high-energy H/O entries that ensure the hull
# includes all stable solids.
entries_HO = [ComputedEntry('H', 10000), ComputedEntry('O', 10000)]
solid_pd = PhaseDiagram(solid_entries + entries_HO)
solid_entries = list(set(solid_pd.stable_entries) - set(entries_HO))
self._processed_entries = self._generate_multielement_entries(
solid_entries + ion_entries)
else:
self._multielement = False
self._processed_entries = solid_entries + ion_entries
self._make_pourbaix_diagram()
def _create_conv_hull_data(self):
"""
Make data conducive to convex hull generator.
"""
entries_to_process = list()
for entry in self._processed_entries:
entry.scale(entry.normalization_factor)
entry.correction += (- MU_H2O * entry.nH2O + entry.conc_term)
entries_to_process.append(entry)
self._qhull_entries = entries_to_process
return self._process_conv_hull_data(entries_to_process)
def _process_conv_hull_data(self, entries_to_process):
"""
From a sequence of ion+solid entries, generate the necessary data
for generation of the convex hull.
"""
data = []
for entry in entries_to_process:
row = [entry.npH, entry.nPhi, entry.g0]
data.append(row)
temp = sorted(zip(data, self._qhull_entries),
key=lambda x: x[0][2])
[data, self._qhull_entries] = list(zip(*temp))
return data
def _generate_multielement_entries(self, entries):
"""
Create entries for multi-element Pourbaix construction.
This works by finding all possible linear combinations
of entries that can result in the specified composition
from the initialized comp_dict.
Args:
entries ([PourbaixEntries]): list of pourbaix entries
to process into MultiEntries
"""
N = len(self._elt_comp) # No. of elements
total_comp = Composition(self._elt_comp)
# generate all possible combinations of compounds that have all elts
entry_combos = [itertools.combinations(entries, j+1) for j in range(N)]
entry_combos = itertools.chain.from_iterable(entry_combos)
entry_combos = filter(lambda x: total_comp < MultiEntry(x).total_composition,
entry_combos)
# Generate and filter entries
processed_entries = []
for entry_combo in entry_combos:
processed_entry = self.process_multientry(entry_combo, total_comp)
if processed_entry is not None:
processed_entries.append(processed_entry)
return processed_entries
@staticmethod
def process_multientry(entry_list, prod_comp):
"""
Static method for finding a multientry based on
a list of entries and a product composition.
Essentially checks to see if a valid aqueous
reaction exists between the entries and the
product composition and returns a MultiEntry
with weights according to the coefficients if so.
Args:
entry_list ([Entry]): list of entries from which to
create a MultiEntry
comp (Composition): composition constraint for setting
weights of MultiEntry
"""
dummy_oh = [Composition("H"), Composition("O")]
try:
# Get balanced reaction coeffs, ensuring all < 0 or conc thresh
# Note that we get reduced compositions for solids and non-reduced
# compositions for ions because ions aren't normalized due to
# their charge state.
entry_comps = [e.composition if e.phase_type=='Ion'
else e.composition.reduced_composition
for e in entry_list]
rxn = Reaction(entry_comps + dummy_oh, [prod_comp])
thresh = np.array([pe.conc if pe.phase_type == "Ion"
else 1e-3 for pe in entry_list])
coeffs = -np.array([rxn.get_coeff(comp) for comp in entry_comps])
if (coeffs > thresh).all():
weights = coeffs / coeffs[0]
return MultiEntry(entry_list, weights=weights.tolist())
else:
return None
except ReactionError:
return None
def _make_pourbaix_diagram(self):
"""
Calculates entries on the convex hull in the dual space.
"""
stable_entries = set()
self._qhull_data = self._create_conv_hull_data()
dim = len(self._qhull_data[0])
if len(self._qhull_data) < dim:
# TODO: might want to lift this restriction and
# supply a warning instead, should work even if it's slow.
raise NotImplementedError("Can only do elements with at-least "
"3 entries for now")
if len(self._qhull_data) == dim:
self._facets = [list(range(dim))]
else:
facets_hull = np.array(ConvexHull(self._qhull_data).simplices)
self._facets = np.sort(np.array(facets_hull))
logger.debug("Final facets are\n{}".format(self._facets))
logger.debug("Removing vertical facets...")
vert_facets_removed = list()
for facet in self._facets:
facetmatrix = np.zeros((len(facet), len(facet)))
count = 0
for vertex in facet:
facetmatrix[count] = np.array(self._qhull_data[vertex])
facetmatrix[count, dim - 1] = 1
count += 1
if abs(np.linalg.det(facetmatrix)) > 1e-8:
vert_facets_removed.append(facet)
else:
logger.debug("Removing vertical facet : {}".format(facet))
logger.debug("Removing UCH facets by eliminating normal.z >0 ...")
# Find center of hull
vertices = set()
for facet in vert_facets_removed:
for vertex in facet:
vertices.add(vertex)
c = [0.0, 0.0, 0.0]
c[0] = np.average([self._qhull_data[vertex][0]
for vertex in vertices])
c[1] = np.average([self._qhull_data[vertex][1]
for vertex in vertices])
c[2] = np.average([self._qhull_data[vertex][2]
for vertex in vertices])
# Shift origin to c
new_qhull_data = np.array(self._qhull_data)
for vertex in vertices:
new_qhull_data[vertex] -= c
# For each facet, find normal n, find dot product with P, and
# check if this is -ve
final_facets = list()
for facet in vert_facets_removed:
a = new_qhull_data[facet[1]] - new_qhull_data[facet[0]]
b = new_qhull_data[facet[2]] - new_qhull_data[facet[0]]
n = np.cross(a, b)
val = np.dot(n, new_qhull_data[facet[0]])
if val < 0:
n = -n
if n[2] <= 0:
final_facets.append(facet)
else:
logger.debug("Removing UCH facet : {}".format(facet))
final_facets = np.array(final_facets)
self._facets = final_facets
stable_vertices = set()
for facet in self._facets:
for vertex in facet:
stable_vertices.add(vertex)
stable_entries.add(self._qhull_entries[vertex])
self._stable_entries = stable_entries
self._vertices = stable_vertices
@property
def facets(self):
"""
Facets of the convex hull in the form of [[1,2,3],[4,5,6]...]
"""
return self._facets
@property
def qhull_data(self):
"""
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
"""
return self._qhull_data
@property
def qhull_entries(self):
"""
Return qhull entries
"""
return self._qhull_entries
@property
def stable_entries(self):
"""
Returns the stable entries in the Pourbaix diagram.
"""
return list(self._stable_entries)
@property
def unstable_entries(self):
"""
Returns all unstable entries in the Pourbaix diagram
"""
return [e for e in self.qhull_entries if e not in self.stable_entries]
@property
def all_entries(self):
"""
Return all entries used to generate the pourbaix diagram
"""
return self._processed_entries
@property
def vertices(self):
"""
Return vertices of the convex hull
"""
return self._vertices
@property
def unprocessed_entries(self):
"""
Return unprocessed entries
"""
return self._unprocessed_entries
|
johnson1228/pymatgen
|
pymatgen/analysis/pourbaix/maker.py
|
Python
|
mit
| 12,869
|
[
"pymatgen"
] |
f8e820c34c178aeab82fd8efe1e381731741160ca5a3ad92d2c591f1d83635e5
|
# -*- coding: utf-8 -*-
#
# test_growth_curves.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from scipy.integrate import quad
import math
import numpy
from numpy import testing
import unittest
import nest
import time
import sys
HAVE_OPENMP = nest.ll_api.sli_func("is_threaded")
class SynapticElementIntegrator:
"""
Generic class which describes how to compute the number of
Synaptic Element based on Ca value
Each derived class should overwrite the get_se(self, t) method
"""
def __init__(self, tau_ca=10000.0, beta_ca=0.001):
"""
Constructor
:param tau_ca (float): time constant of Ca decay
:param beta_ca (float): each spike increase Ca value by this value
"""
self.tau_ca = tau_ca
self.beta_ca = beta_ca
self.t_minus = 0
self.ca_minus = 0
self.se_minus = 0
def reset(self):
self.t_minus = 0
self.ca_minus = 0
self.se_minus = 0
def handle_spike(self, t):
"""
Add beta_ca to the value of Ca at t = spike time
Also update the number of synaptic element
:param t (float): spike time
"""
assert t >= self.t_minus
# Update the number of synaptic element
self.se_minus = self.get_se(t)
# update Ca value
self.ca_minus = self.get_ca(t) + self.beta_ca
self.t_minus = t
def get_ca(self, t):
"""
:param t (float): current time
:return: Ca value
"""
assert t >= self.t_minus
ca = self.ca_minus * math.exp((self.t_minus - t) / self.tau_ca)
if ca > 0:
return ca
else:
return 0
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
Should be overwritten
"""
return 0.0
class LinearExactSEI(SynapticElementIntegrator):
"""
Compute the number of synaptic element corresponding to a
linear growth curve
dse/dCa = nu * (1 - Ca/eps)
Use the exact solution
"""
def __init__(self, eps=0.7, growth_rate=1.0, *args, **kwargs):
"""
Constructor
:param eps: fix point
:param growth_rate: scaling of the growth curve
.. seealso:: SynapticElementIntegrator()
"""
super(LinearExactSEI, self).__init__(*args, **kwargs)
self.eps = eps
self.growth_rate = growth_rate
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
"""
assert t >= self.t_minus
se = 1 / self.eps * (
self.growth_rate * self.tau_ca * (
self.get_ca(t) - self.ca_minus
) + self.growth_rate * self.eps * (t - self.t_minus)
) + self.se_minus
if se > 0:
return se
else:
return 0
class LinearNumericSEI(SynapticElementIntegrator):
"""
Compute the number of synaptic element corresponding to a
linear growth curve
dse/dCa = nu * (1 - Ca/eps)
Use numerical integration (see scipy.integrate.quad)
"""
def __init__(self, eps=0.7, growth_rate=1.0, *args, **kwargs):
"""
Constructor
:param eps: fix point
:param growth_rate: scaling of the growth curve
.. seealso:: SynapticElementIntegrator()
"""
super(LinearNumericSEI, self).__init__(*args, **kwargs)
self.eps = eps
self.growth_rate = growth_rate
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
"""
assert t >= self.t_minus
se = self.se_minus + quad(self.growth_curve, self.t_minus, t)[0]
if se > 0:
return se
else:
return 0
def growth_curve(self, t):
return self.growth_rate * (1.0 - (self.get_ca(t) / self.eps))
class GaussianNumericSEI(SynapticElementIntegrator):
"""
Compute the number of synaptic element corresponding to a
linear growth curve
dse/dCa = nu * (2 * exp( ((Ca - xi)/zeta)^2 ) - 1)
with:
xi = (eta + eps) / 2.0
zeta = (eta - eps) / (2.0 * sqrt(ln(2.0)))
Use numerical integration (see scipy.integrate.quad)
"""
def __init__(self, eta=0.1, eps=0.7, growth_rate=1.0, *args, **kwargs):
"""
Constructor
:param eps: low fix point
:param eta: high fix point
:param growth_rate: scaling of the growth curve
.. seealso:: SynapticElementIntegrator()
"""
super(GaussianNumericSEI, self).__init__(*args, **kwargs)
self.zeta = (eta - eps) / (2.0 * math.sqrt(math.log(2.0)))
self.xi = (eta + eps) / 2.0
self.growth_rate = growth_rate
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
"""
assert t >= self.t_minus
se = self.se_minus + quad(self.growth_curve, self.t_minus, t)[0]
if se > 0:
return se
else:
return 0
def growth_curve(self, t):
return self.growth_rate * (
2 * math.exp(
- math.pow((self.get_ca(t) - self.xi) / self.zeta, 2)
) - 1
)
class SigmoidNumericSEI(SynapticElementIntegrator):
"""
Compute the number of synaptic element corresponding to a
sigmoid growth curve
dse/dCa = nu * ((2.0 / exp( (Ca - eps)/psi)) - 1.0)
Use numerical integration (see scipy.integrate.quad)
"""
def __init__(self, eps=0.7, growth_rate=1.0, psi=0.1, *args, **kwargs):
"""
Constructor
:param eps: set point
:param psi: controls width of growth curve
:param growth_rate: scaling of the growth curve
.. seealso:: SynapticElementIntegrator()
"""
super(SigmoidNumericSEI, self).__init__(*args, **kwargs)
self.eps = eps
self.psi = psi
self.growth_rate = growth_rate
def get_se(self, t):
"""
:param t (float): current time
:return: Number of synaptic element
"""
assert t >= self.t_minus
se = self.se_minus + quad(self.growth_curve, self.t_minus, t)[0]
if se > 0:
return se
else:
return 0
def growth_curve(self, t):
return self.growth_rate * (
(2.0 / (1.0 + math.exp(
(self.get_ca(t) - self.eps) / self.psi
))) - 1.0
)
@unittest.skipIf(not HAVE_OPENMP, 'NEST was compiled without multi-threading')
class TestGrowthCurve(unittest.TestCase):
"""
Unittest class to test the GrowthCurve used with nest
"""
rtol = 5e-1
def setUp(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
nest.total_num_virtual_procs = 4
nest.rng_seed = 1
self.sim_time = 10000.0
self.sim_step = 100
nest.structural_plasticity_update_interval = self.sim_time + 1
self.se_integrator = []
self.sim_steps = None
self.ca_nest = None
self.ca_python = None
self.se_nest = None
self.se_python = None
# build
self.pop = nest.Create('iaf_psc_alpha', 10)
self.spike_recorder = nest.Create('spike_recorder')
nest.Connect(self.pop, self.spike_recorder, 'all_to_all')
noise = nest.Create('poisson_generator')
nest.SetStatus(noise, {"rate": 800000.0})
nest.Connect(noise, self.pop, 'all_to_all')
def simulate(self):
self.sim_steps = numpy.arange(0, self.sim_time, self.sim_step)
self.ca_nest = numpy.zeros(
(len(self.pop), len(self.sim_steps)))
self.ca_python = numpy.zeros(
(len(self.se_integrator), len(self.sim_steps)))
self.se_nest = numpy.zeros(
(len(self.pop), len(self.sim_steps)))
self.se_python = numpy.zeros(
(len(self.se_integrator), len(self.sim_steps)))
for t_i, t in enumerate(self.sim_steps):
for n_i in range(len(self.pop)):
self.ca_nest[n_i][t_i], synaptic_elements = nest.GetStatus(
self.pop[n_i], ('Ca', 'synaptic_elements'))[0]
self.se_nest[n_i][t_i] = synaptic_elements['se']['z']
nest.Simulate(self.sim_step)
tmp = nest.GetStatus(self.spike_recorder, 'events')[0]
spikes_all = tmp['times']
senders_all = tmp['senders']
for n_i, n in enumerate(self.pop):
spikes = spikes_all[senders_all == n.get('global_id')]
[sei.reset() for sei in self.se_integrator]
spike_i = 0
for t_i, t in enumerate(self.sim_steps):
while spike_i < len(spikes) and spikes[spike_i] <= t:
[sei.handle_spike(spikes[spike_i])
for sei in self.se_integrator]
spike_i += 1
for sei_i, sei in enumerate(self.se_integrator):
self.ca_python[sei_i, t_i] = sei.get_ca(t)
self.se_python[sei_i, t_i] = sei.get_se(t)
for sei_i, sei in enumerate(self.se_integrator):
testing.assert_allclose(self.ca_nest[n_i], self.ca_python[sei_i], rtol=self.rtol)
testing.assert_allclose(self.se_nest[n_i], self.se_python[sei_i], rtol=self.rtol)
def test_linear_growth_curve(self):
beta_ca = 0.0001
tau_ca = 10000.0
growth_rate = 0.0001
eps = 0.10
nest.SetStatus(
self.pop,
{
'beta_Ca': beta_ca,
'tau_Ca': tau_ca,
'synaptic_elements': {
'se': {
'growth_curve': 'linear',
'growth_rate': growth_rate,
'eps': eps,
'z': 0.0
}
}
}
)
self.se_integrator.append(LinearExactSEI(
tau_ca=tau_ca, beta_ca=beta_ca, eps=eps, growth_rate=growth_rate))
self.se_integrator.append(LinearNumericSEI(
tau_ca=tau_ca, beta_ca=beta_ca, eps=eps, growth_rate=growth_rate))
self.simulate()
# check that we got the same values from one run to another
# expected = self.se_nest[:, 10]
# print(self.se_nest[:, 10].__repr__())
expected = numpy.array([0.08374445370851626, 0.08374622771384878, 0.08372872162085436,
0.08377523830171477, 0.08376377406026522, 0.08377524732725673,
0.08375919233244447, 0.08371423280222919, 0.083725266553744,
0.08372601193476849])
pop_as_list = list(self.pop)
for n in self.pop:
testing.assert_allclose(self.se_nest[pop_as_list.index(n), 10],
expected[pop_as_list.index(n)],
rtol=self.rtol,)
def test_gaussian_growth_curve(self):
beta_ca = 0.0001
tau_ca = 10000.0
growth_rate = 0.0001
eta = 0.05
eps = 0.10
nest.SetStatus(
self.pop,
{
'beta_Ca': beta_ca,
'tau_Ca': tau_ca,
'synaptic_elements': {
'se': {
'growth_curve': 'gaussian',
'growth_rate': growth_rate,
'eta': eta, 'eps': eps, 'z': 0.0
}
}
}
)
self.se_integrator.append(
GaussianNumericSEI(tau_ca=tau_ca, beta_ca=beta_ca,
eta=eta, eps=eps, growth_rate=growth_rate))
self.simulate()
# check that we got the same values from one run to another
# expected = self.se_nest[:, 30]
# print(self.se_nest[:, 30].__repr__())
expected = numpy.array([0.10036617553986826, 0.10056553185703253, 0.10040107765414216,
0.10029244040007103, 0.10052276942680986, 0.10041568803099227,
0.10040451902882779, 0.10052255715855712, 0.1006910528746381,
0.10058568067154924])
pop_as_list = list(self.pop)
for n in self.pop:
testing.assert_allclose(self.se_nest[pop_as_list.index(n), 30],
expected[pop_as_list.index(n)],
rtol=self.rtol,)
def test_sigmoid_growth_curve(self):
beta_ca = 0.0001
tau_ca = 10000.0
growth_rate = 0.0001
eps = 0.10
psi = 0.10
local_nodes = nest.GetLocalNodeCollection(self.pop)
local_nodes.set(
{
'beta_Ca': beta_ca,
'tau_Ca': tau_ca,
'synaptic_elements': {
'se': {
'growth_curve': 'sigmoid',
'growth_rate': growth_rate,
'eps': eps, 'psi': 0.1, 'z': 0.0
}
}
})
self.se_integrator.append(
SigmoidNumericSEI(tau_ca=tau_ca, beta_ca=beta_ca,
eps=eps, psi=psi, growth_rate=growth_rate))
self.simulate()
# check that we got the same values from one run to another
# expected = self.se_nest[:, 30]
# print(self.se_nest[:, 30].__repr__())
expected = numpy.array([0.07798757689720627, 0.07796809230928879, 0.07796745199672085,
0.07807166878406996, 0.07794925570454732, 0.0780381869323308,
0.0780054060483019, 0.0779518888224286, 0.07792681014092591,
0.07798540508673037])
local_pop_as_list = list(local_nodes)
for count, n in enumerate(self.pop):
loc = self.se_nest[local_pop_as_list.index(n), 30]
ex = expected[count]
testing.assert_allclose(loc, ex, rtol=self.rtol)
def suite():
test_suite = unittest.makeSuite(TestGrowthCurve, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
|
sanjayankur31/nest-simulator
|
testsuite/pytests/test_sp/test_growth_curves.py
|
Python
|
gpl-2.0
| 14,933
|
[
"Gaussian"
] |
6ffebc75a4b4c5d47179cab916fd93d67dc01b0a22397796e3ee0bbe0d6684ce
|
#######################################################################
#
#
# Next Event Renderer for Dreambox/Enigma-2
# Coded by Vali (c)2010
# Support: www.dreambox-tools.info
#
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
# To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/
# or send a letter to Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
#
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
#
#######################################################################
from Components.VariableText import VariableText
from enigma import eLabel, eEPGCache
from Components.config import config
from Renderer import Renderer
from time import localtime
class OMNextEvent(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
self.epgcache = eEPGCache.getInstance()
GUI_WIDGET = eLabel
def changed(self, what):
if True:
ref = self.source.service
info = ref and self.source.info
if info is None:
self.text = ""
return
ENext = ""
eventNext = self.epgcache.lookupEvent(['IBDCTSERNXM', (ref.toString(), 1, -1)])
if eventNext:
if eventNext[0][4]:
t = localtime(eventNext[0][1])
duration = "%d min" % (eventNext[0][2] / 60)
ENext = _("It follows:") + ' ' + "%02d:%02d %s\n%s" % (t[3], t[4], duration, eventNext[0][4])
self.text = ENext
|
openmips/stbgui
|
lib/python/Components/Renderer/OMNextEvent.py
|
Python
|
gpl-2.0
| 1,770
|
[
"VisIt"
] |
a4cd8888ddeee44b92772ee5d989419cf476b0311cedcf68f8de3b2b3eb64048
|
from setuptools import setup, find_packages
from os import path
try:
from jupyterpip import cmdclass
except:
import sys
import subprocess
import importlib
subprocess.call([sys.executable, '-m', 'pip', 'install', 'jupyter-pip'])
cmdclass = importlib.import_module('jupyterpip').cmdclass
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst')) as f:
long_description = f.read()
with open(path.join(here, 'requirements.txt')) as f:
requirements = f.read().splitlines()
with open(path.join(here, 'psctb', 'version.py')) as f:
exec(f.read())
dlurl = 'http://github.com/PySCeS/PyscesToolbox/archive/' + __version__ + '.tar.gz'
packages = find_packages()
setup(
name='PyscesToolbox',
version=__version__,
packages=packages,
url='https://github.com/PySCeS/PyscesToolbox',
download_url=dlurl,
license='BSD-3-Clause',
author='Carl Christensen and Johann Rohwer',
author_email='exe0cdc@gmail.com, j.m.rohwer@gmail.com',
description='A set of metabolic model analysis tools for PySCeS.',
long_description=long_description,
install_requires=requirements,
package_data={'d3networkx_psctb': ['widget.js'],
'psctb': ['docs/*']},
keywords=['metabolism','metabolic control analysis','modelling'],
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3'],
cmdclass=cmdclass('d3networkx_psctb'),
)
|
PySCeS/PyscesToolbox
|
setup.py
|
Python
|
bsd-3-clause
| 1,776
|
[
"PySCeS"
] |
4f950c746607a05b9270d226734b24e32880369e410e9160cd44509b91c6a5db
|
#!/usr/bin/env python
from __future__ import print_function
import json
import optparse
import os
import subprocess
import sys
import tempfile
CHUNK_SIZE = 2**20
DEFAULT_DATA_TABLE_NAME = "bowtie_indexes"
def get_id_name(params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_bowtie_index(data_manager_dict, fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=DEFAULT_DATA_TABLE_NAME, color_space=False):
# TODO: allow multiple FASTA input files
fasta_base_name = os.path.split(fasta_filename)[-1]
sym_linked_fasta_filename = os.path.join(target_directory, fasta_base_name)
os.symlink(fasta_filename, sym_linked_fasta_filename)
args = ['bowtie-build']
if color_space:
args.append('-C')
args.append(sym_linked_fasta_filename)
args.append(fasta_base_name)
args.append(sym_linked_fasta_filename)
tmp_stderr = tempfile.NamedTemporaryFile(prefix="tmp-data-manager-bowtie-index-builder-stderr")
proc = subprocess.Popen(args=args, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno())
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
print("Error building index:", file=sys.stderr)
while True:
chunk = tmp_stderr.read(CHUNK_SIZE)
if not chunk:
break
sys.stderr.write(chunk)
sys.exit(return_code)
tmp_stderr.close()
data_table_entry = dict(value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name)
_add_data_table_entry(data_manager_dict, data_table_name, data_table_entry)
def _add_data_table_entry(data_manager_dict, data_table_name, data_table_entry):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get(data_table_name, [])
data_manager_dict['data_tables'][data_table_name].append(data_table_entry)
return data_manager_dict
def main():
parser = optparse.OptionParser()
parser.add_option('-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename')
parser.add_option('-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey')
parser.add_option('-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description')
parser.add_option('-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name')
parser.add_option('-c', '--color_space', dest='color_space', action='store_true', default=False, help='color_space')
(options, args) = parser.parse_args()
filename = args[0]
with open(filename) as fh:
params = json.load(fh)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
data_manager_dict = {}
dbkey = options.fasta_dbkey
if dbkey in [None, '', '?']:
raise Exception('"%s" is not a valid dbkey. You must specify a valid dbkey.' % (dbkey))
sequence_id, sequence_name = get_id_name(params, dbkey=dbkey, fasta_description=options.fasta_description)
# build the index
build_bowtie_index(data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME, color_space=options.color_space)
# save info to json file
with open(filename, 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
if __name__ == "__main__":
main()
|
mvdbeek/tools-iuc
|
data_managers/data_manager_bowtie_index_builder/data_manager/bowtie_index_builder.py
|
Python
|
mit
| 4,110
|
[
"Bowtie"
] |
8feb931662c8340951da977c0389b36f6c1864735283f8359ecab3af6d506817
|
import numpy
from chainer.functions.connection import bilinear
from chainer import link
class Bilinear(link.Link):
"""Bilinear layer that performs tensor multiplication.
Bilinear is a primitive link that wraps the
:func:`~chainer.functions.bilinear` functions. It holds parameters ``W``,
``V1``, ``V2``, and ``b`` corresponding to the arguments of
:func:`~chainer.functions.bilinear`.
Args:
left_size (int): Dimension of input vector :math:`e^1` (:math:`J`)
right_size (int): Dimension of input vector :math:`e^2` (:math:`K`)
out_size (int): Dimension of output vector :math:`y` (:math:`L`)
nobias (bool): If ``True``, parameters ``V1``, ``V2``, and ``b`` are
omitted.
initialW (3-D numpy array): Initial value of :math:`W`.
Shape of this argument must be
``(left_size, right_size, out_size)``. If ``None``,
:math:`W` is initialized by centered Gaussian distribution properly
scaled according to the dimension of inputs and outputs.
initial_bias (tuple): Initial values of :math:`V^1`, :math:`V^2`
and :math:`b`. The length this argument must be 3.
Each element of this tuple must have the shapes of
``(left_size, output_size)``, ``(right_size, output_size)``,
and ``(output_size,)``, respectively. If ``None``, :math:`V^1`
and :math:`V^2` is initialized by scaled centered Gaussian
distributions and :math:`b` is set to :math:`0`.
.. seealso:: See :func:`chainer.functions.bilinear` for details.
Attributes:
W (~chainer.Variable): Bilinear weight parameter.
V1 (~chainer.Variable): Linear weight parameter for the first argument.
V2 (~chainer.Variable): Linear weight parameter for the second
argument.
b (~chainer.Variable): Bias parameter.
"""
def __init__(self, left_size, right_size, out_size, nobias=False,
initialW=None, initial_bias=None):
super(Bilinear, self).__init__(W=(left_size, right_size, out_size))
self.in_sizes = (left_size, right_size)
self.nobias = nobias
if initialW is not None:
assert initialW.shape == self.W.data.shape
self.W.data[...] = initialW
else:
# TODO(Kenta OONO): I do not know appropriate way of
# initializing weights in tensor network.
# This initialization is a modification of
# that of Linear function.
in_size = left_size * right_size * out_size
self.W.data[...] = numpy.random.normal(
0, numpy.sqrt(1. / in_size), self.W.data.shape)
if not self.nobias:
self.add_param('V1', (left_size, out_size))
self.add_param('V2', (right_size, out_size))
self.add_param('b', out_size)
if initial_bias is not None:
V1, V2, b = initial_bias
assert V1.shape == self.V1.data.shape
assert V2.shape == self.V2.data.shape
assert b.shape == self.b.data.shape
self.V1.data[...] = V1
self.V2.data[...] = V2
self.b.data[...] = b
else:
self.V1.data[...] = numpy.random.normal(
0, numpy.sqrt(1. / left_size), (left_size, out_size))
self.V2.data[...] = numpy.random.normal(
0, numpy.sqrt(1. / right_size), (right_size, out_size))
self.b.data.fill(0)
def __call__(self, e1, e2):
"""Applies the bilinear function to inputs and the internal parameters.
Args:
e1 (~chainer.Variable): Left input.
e2 (~chainer.Variable): Right input.
Returns:
~chainer.Variable: Output variable.
"""
if self.nobias:
return bilinear.bilinear(e1, e2, self.W)
else:
return bilinear.bilinear(e1, e2, self.W, self.V1, self.V2, self.b)
def zero_grads(self):
# Left for backward compatibility
self.zerograds()
|
AlpacaDB/chainer
|
chainer/links/connection/bilinear.py
|
Python
|
mit
| 4,147
|
[
"Gaussian"
] |
f28445425feeb1111dfb101b77c949e0cc031909e06c145792e8535af2fd31af
|
from matplotlib import rcParams, rc
import numpy as np
import sys
from fitFunctions import gaussian
import scipy.interpolate
import scipy.signal
from baselineIIR import IirFilter
# common setup for matplotlib
params = {'savefig.dpi': 300, # save figures to 300 dpi
'axes.labelsize': 14,
'text.fontsize': 14,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.major.pad': 6,
'xtick.major.pad': 6,
'ytick.labelsize': 14}
# use of Sans Serif also in math mode
rc('text.latex', preamble='\usepackage{sfmath}')
rcParams.update(params)
import matplotlib.pyplot as plt
import numpy as np
import os
import struct
def calcThreshold(phase,Nsigma=2.5,nSamples=5000):
n,bins= np.histogram(phase[:nSamples],bins=100)
n = np.array(n,dtype='float32')/np.sum(n)
tot = np.zeros(len(bins))
for i in xrange(len(bins)):
tot[i] = np.sum(n[:i])
med = bins[np.abs(tot-0.5).argmin()]
thresh = bins[np.abs(tot-0.05).argmin()]
threshold = int(med-Nsigma*abs(med-thresh))
return threshold
def oldBaseFilter(data,alpha=0.08):
#construct IIR
alpha = 0.08
numCoeffs = np.zeros(31)
numCoeffs[30] = alpha
denomCoeffs = np.zeros(11)
denomCoeffs[0] = 1
denomCoeffs[10] = -(1-alpha)
baselines = scipy.signal.lfilter(numCoeffs,denomCoeffs,data)
return baselines
def detectPulses(sample,threshold,baselines,deadtime=10):
#deadtime in ticks (us)
data = np.array(sample)
#threshold = calcThreshold(data[0:2000])
dataSubBase = data - baselines
derivative = np.diff(data)
peakHeights = []
t = 0
negDeriv = derivative <= 0
posDeriv = np.logical_not(negDeriv)
print np.shape(derivative)
print np.shape(data)
print np.shape(negDeriv)
nNegDerivChecks = 10
lenience = 1
triggerBooleans = dataSubBase[nNegDerivChecks:-2] < threshold
negDerivChecksSum = np.zeros(len(negDeriv[0:-nNegDerivChecks-1]))
for i in range(nNegDerivChecks):
negDerivChecksSum += negDeriv[i:i-nNegDerivChecks-1]
peakCondition0 = negDerivChecksSum >= nNegDerivChecks-lenience
peakCondition1 = np.logical_and(posDeriv[nNegDerivChecks:-1],posDeriv[nNegDerivChecks+1:])
peakCondition01 = np.logical_and(peakCondition0,peakCondition1)
peakBooleans = np.logical_and(triggerBooleans,peakCondition01)
try:
peakIndices = np.where(peakBooleans)[0]+nNegDerivChecks
i = 0
p = peakIndices[i]
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
except IndexError:
return np.array([]),np.array([]),np.array([])
peakHeights = data[peakIndices]
peakBaselines = baselines[peakIndices]
return peakIndices,peakHeights,peakBaselines
def oldDetectPulses(sample,threshold,baselines):
filtered = np.array(sample)
#threshold = calcThreshold(filtered[0:2000])
filtered -= baselines
derivative = np.diff(filtered)
peakHeights = []
t = 0
negDeriv = derivative <= 0
posDeriv = np.logical_not(negDeriv)
triggerBooleans = filtered[1:-2] < threshold
peakCondition1 = np.logical_and(negDeriv[0:-2],posDeriv[1:-1])
peakCondition2 = np.logical_and(triggerBooleans,posDeriv[2:])
peakBooleans = np.logical_and(peakCondition1,peakCondition2)
try:
peakIndices = np.where(peakBooleans)[0]+1
i = 0
p = peakIndices[i]
deadtime=10#us
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
except IndexError:
return np.array([]),np.array([]),np.array([])
peakHeights = filtered[peakIndices]
peakBaselines = baselines[peakIndices]
return peakIndices,peakHeights,peakBaselines
rootFolder = '/home/kids/labData/'
quietFolder = '/home/kids/labData/20130925/blue/'
sampleRate=1e6 # 1 MHz
#roachNum = 0
#pixelNum = 51
#secs=60
#folder = '/home/kids/labData/20130925/blue/'
#cps=700
#bFiltered = False
#phaseFilename = os.path.join(folder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,secs,cps))
#quietFilename = os.path.join(quietFolder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,30,0))
#label='Blue'
#roachNum = 0
#pixelNum = 51
#secs=60
#folder = '/home/kids/labData/20130925/red/'
#cps=600
#bFiltered = False
#phaseFilename = os.path.join(folder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,secs,cps))
#quietFilename = os.path.join(quietFolder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,30,0))
#label='Red'
#roachNum = 0
#pixelNum = 134
#secs=5
#folder = '/home/kids/labData/20130220/'
#cps=700
#bFiltered = True
#phaseFilename = os.path.join(folder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(roachNum,pixelNum,secs,cps))
roachNum = 4
pixelNum = 2
secs=20
folder = os.path.join(rootFolder,'20121123/')
bFiltered = False
phaseFilename = os.path.join(folder,'ch_snap_r%dp%d_%dsecs.dat'%(roachNum,pixelNum,secs))
#missing quiet file, so use another
quietFilename = os.path.join(quietFolder,'ch_snap_r%dp%d_%dsecs_%dcps.dat'%(0,51,30,0))
bPlotPeaks = True
deadtime=10
phaseFile = open(phaseFilename,'r')
quietFile = open(quietFilename,'r')
phase = phaseFile.read()
quietPhase = quietFile.read()
numQDRSamples=2**19
numBytesPerSample=4
nLongsnapSamples = numQDRSamples*2*secs
qdrValues = struct.unpack('>%dh'%(nLongsnapSamples),phase)
qdrPhaseValues = np.array(qdrValues,dtype=np.float32)*360./2**16*4/np.pi #convert from adc units to degrees
#nPhaseValues=len(qdrValues)
nPhaseValues=int(1e5)
print nPhaseValues,'us'
quietQdrValues = struct.unpack('>%dh'%(numQDRSamples*2*30),quietPhase)
quietQdrPhaseValues = np.array(quietQdrValues,dtype=np.float32)*360./2**16*4/np.pi #convert from adc units to degrees
fig = plt.figure()
NAxes = 1
iAxes = 1
size=26
offset = 3
sampleStart = 5000
nSamples = nPhaseValues-sampleStart
thresholdLength = 2000
thresholdSigma = 2.1
sample=qdrValues[sampleStart:sampleStart+nSamples]
quietSample=quietQdrValues[sampleStart:sampleStart+thresholdLength]
#sample = np.array(qdrPhaseValues)
if bFiltered == False:
rawdata = np.array(sample)
quietRawdata = np.array(quietSample)
filter= np.loadtxt(os.path.join(rootFolder,'fir/template20121207r%d.txt'%roachNum))[pixelNum,:]
#lpf250kHz= np.loadtxt('/Scratch/filterData/fir/lpf_250kHz.txt')
matched30= np.loadtxt(os.path.join(rootFolder,'fir/matched_30us.txt'))
filter=matched30
#data = np.correlate(filter,rawdata,mode='same')[::-1]
data = scipy.signal.lfilter(filter,1,rawdata)
#quietData = np.correlate(filter,quietRawdata,mode='same')[::-1]
quietData = scipy.signal.lfilter(filter,1,quietRawdata)
print 'filtering done'
sys.stdout.flush()
else:
data = np.array(sample)
quietData = np.array(quietSample)
alpha=.999
hpOnePole = IirFilter(sampleFreqHz=sampleRate,numCoeffs=np.array([1,-1]),denomCoeffs=np.array([1,-alpha]))
criticalFreq = 200 #Hz
hpSos = IirFilter(sampleFreqHz=sampleRate,criticalFreqHz=criticalFreq,btype='highpass')
f=2*np.sin(np.pi*criticalFreq/sampleRate)
Q=.7
q=1./Q
hpSvf = IirFilter(sampleFreqHz=sampleRate,numCoeffs=np.array([1,-2,1]),denomCoeffs=np.array([1+f**2, f*q-2,1-f*q]))
criticalFreq = 100 #Hz
f=2*np.sin(np.pi*criticalFreq/sampleRate)
hpSvf2 = IirFilter(sampleFreqHz=sampleRate,numCoeffs=np.array([1,-2,1]),denomCoeffs=np.array([1+f**2, f*q-2,1-f*q]))
baselines = data - hpSvf.filterData(data)
oldBaselines = oldBaseFilter(data)
#oldBaselines = data - hpSvf2.filterData(data)
print 'baselines done'
threshold = calcThreshold(quietData,Nsigma=thresholdSigma)
print 'threshold done'
sys.stdout.flush()
endIdx = 1000*thresholdLength
if bPlotPeaks:
ax=fig.add_subplot(NAxes,1,iAxes)
ax.plot(data[0:endIdx],'k.-',label='optimal filtered phase')
ax.plot(baselines[0:endIdx],'b',label='lpf baseline')
ax.plot(baselines[0:endIdx]+threshold,'y--',label='threshold')
ax.plot(oldBaselines[0:endIdx],'c',label='lpf baseline old')
ax.plot(oldBaselines[0:endIdx]+threshold,'g--',label='threshold old')
idx,peaks,bases = detectPulses(data,threshold,baselines)
idx2,peaks2,bases2 = detectPulses(data,threshold,oldBaselines)
print len(peaks),'peaks detected'
print len(peaks2),'old peaks detected'
sys.stdout.flush()
#
#
if len(peaks)>0:
if bPlotPeaks:
ax.plot(idx2,peaks2,'bd',label='detected peak2')
ax.plot(idx,peaks,'r.',label='detected peak')
ax.plot(idx,bases,'g.',label='detected baseline')
ax.set_xlabel('time (us)')
ax.set_ylabel('phase (${}^{\circ}$)')
#ax.set_xlim([5000,15000])
#ax.set_title('detected peaks and baseline for ~%d cps, pixel /r%d/p%d'%(cps,roachNum,pixelNum))
ax.legend(loc='lower right')
iAxes+=1
# np.savez('sdetected%d%s_dead%d.npz'%(cps,label,deadtime),idx=idx,peaks=peaks,bases=bases,baselines=baselines,baselines2=baselines2,threshold=threshold,qdrValues=qdrValues,data=data,peaks2=peaks2,bases2=bases2,idx2=idx2)
print 'done'
sys.stdout.flush()
plt.show()
|
bmazin/SDR
|
Projects/Simulator/simulateSOS.py
|
Python
|
gpl-2.0
| 9,421
|
[
"Gaussian"
] |
27427cd102d09725cbd40121bca008cd94dd6f24d34c94adebb7968b7287912e
|
import wx
from confmanager import ConfManager
"""Set up some example stuff, so you can get a working example of the classes in action:"""
if __name__ == '__main__':
a = wx.App() # Create the main wx app
c = ConfManager('test') # Make a ConfManager and call it test
c.add_section('people') # Add a section called people
# Add some options:
c.set('people', 'name', 'John Smith', title = 'Customer &Name', validate = lambda value: None if len(value) >= 4 else 'No one is name is that short!', help = 'The name of the customer')
c.set('people', 'age', 43, title = 'Customer &Age', validate = lambda value: None if (value > 18 and value <= 120) else 'Sorry, but we don\'t have babies or dead people on our books.', help = 'The age of the customer')
c.set('people', 'height', 0, title = 'Customer &Height', validate = lambda value: None if value >= 100 else 'I don\'t like small types!', help = 'The height of the customer in centemeters')
c.add_section('likes', 'Likes and Dislikes')
c.set('likes', 'chocolate', False, title = 'Lovely Milky &Chocolate', help = 'You know what chocolate is')
c.set('likes', 'jam', True, title = '&Jam (Any Flavour You Like)', help = 'My mum makes the best jam')
c.add_section('misc', 'Miscelaneous')
c.set('misc', 'about', '', kwargs = {'style': wx.TE_RICH|wx.TE_MULTILINE})
import wx.lib.filebrowsebutton as FBB
import os
c.set('misc', 'file', '', title = 'Upload a &file', control = FBB.FileBrowseButton, validate = lambda value: None if os.path.exists(value) else 'You must provide a file to load!')
c.helpFunc = lambda event: wx.MessageBox('This is a test page.\n\nFor updates to this library, please visit github.com/chrisnorman7/confmanager.git', 'Help')
c.get_gui().Show(True) # Show the resulting GUI
a.MainLoop() # Run the mainloop to ensure a proper application
|
chrisnorman7/confmanager
|
example.py
|
Python
|
mpl-2.0
| 1,841
|
[
"VisIt"
] |
8ed4ef332955e43ed310a64ac36eda29d84087acca71a01634609dee3ba261c8
|
# -*- coding: utf-8 -*-
"""INI time-to-first-spike simulator backend with dynamic threshold.
This module defines the layer objects used to create a spiking neural network
for our built-in INI simulator
:py:mod:`~snntoolbox.simulation.target_simulators.INI_ttfs_dyn_thresh_target_sim`.
The coding scheme underlying this conversion is that the instantaneous firing
rate is given by the inverse time-to-first-spike. In contrast to
:py:mod:`~snntoolbox.simulation.target_simulators.INI_ttfs_target_sim`, this
one features a threshold that adapts dynamically to the amount of input a
neuron has received.
This simulator works only with Keras backend set to Tensorflow.
@author: rbodo
"""
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as k
from tensorflow.keras.layers import Dense, Flatten, AveragePooling2D, \
MaxPooling2D, Conv2D, Layer, DepthwiseConv2D
from snntoolbox.simulation.backends.inisim.ttfs import SpikeConcatenate, \
SpikeZeroPadding2D, SpikeReshape
class SpikeLayer(Layer):
"""Base class for layer with spiking neurons."""
def __init__(self, **kwargs):
self.config = kwargs.pop(str('config'), None)
self.layer_type = self.class_name
self.batch_size = self.config.getint('simulation', 'batch_size')
self.dt = self.config.getfloat('simulation', 'dt')
self.duration = self.config.getint('simulation', 'duration')
self.tau_refrac = self.config.getfloat('cell', 'tau_refrac')
self._v_thresh = self.config.getfloat('cell', 'v_thresh')
self.v_thresh = None
self.time = None
self.mem = self.spiketrain = self.impulse = None
self.refrac_until = None
self._kernel = self._bias = None
self.last_spiketimes = None
self.prospective_spikes = None
self.missing_impulse = None
allowed_kwargs = {'input_shape',
'batch_input_shape',
'batch_size',
'dtype',
'name',
'trainable',
'weights',
'input_dtype', # legacy
}
for kwarg in kwargs.copy():
if kwarg not in allowed_kwargs:
kwargs.pop(kwarg)
Layer.__init__(self, **kwargs)
self.stateful = True
def reset(self, sample_idx):
"""Reset layer variables."""
self.reset_spikevars(sample_idx)
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
def update_neurons(self):
"""Update neurons according to activation function."""
# Update membrane potentials.
new_mem = self.get_new_mem()
# Generate spikes.
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
output_spikes = self.softmax_activation(new_mem)
else:
output_spikes = self.linear_activation(new_mem)
# Reset membrane potential after spikes.
self.set_reset_mem(new_mem, output_spikes)
# Store refractory period after spikes.
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
# We do not constrain softmax output neurons.
new_refrac = tf.identity(self.refrac_until)
else:
new_refrac = tf.where(k.not_equal(output_spikes, 0),
k.ones_like(output_spikes) *
(self.time + self.tau_refrac),
self.refrac_until)
c = new_refrac[:self.batch_size]
cc = k.concatenate([c, c], 0)
updates = [self.refrac_until.assign(cc)]
if self.spiketrain is not None:
c = self.time * k.cast(k.not_equal(output_spikes, 0),
k.floatx())[:self.batch_size]
cc = k.concatenate([c, c], 0)
updates += [self.spiketrain.assign(cc)]
with tf.control_dependencies(updates):
masked_impulse = \
tf.where(k.greater(self.refrac_until, self.time),
k.zeros_like(self.impulse), self.impulse)
c = k.greater(masked_impulse, 0)[:self.batch_size]
cc = k.cast(k.concatenate([c, c], 0), k.floatx())
updates = [self.prospective_spikes.assign(cc)]
new_thresh = self._v_thresh * k.ones_like(self.v_thresh) + \
self.missing_impulse
updates += [self.v_thresh.assign(new_thresh)]
with tf.control_dependencies(updates):
# Compute post-synaptic potential.
psp = self.get_psp(output_spikes)
return k.cast(psp, k.floatx())
def linear_activation(self, mem):
"""Linear activation."""
return k.cast(k.greater_equal(mem, self.v_thresh), k.floatx())
@staticmethod
def softmax_activation(mem):
"""Softmax activation."""
return k.cast(k.less_equal(k.random_uniform(k.shape(mem)),
k.softmax(mem)), k.floatx())
def get_new_mem(self):
"""Add input to membrane potential."""
# Destroy impulse if in refractory period
masked_impulse = self.impulse if self.tau_refrac == 0 else \
tf.where(k.greater(self.refrac_until, self.time),
k.zeros_like(self.impulse), self.impulse)
new_mem = self.mem + masked_impulse
if self.config.getboolean('cell', 'leak'):
# Todo: Implement more flexible version of leak!
new_mem = tf.where(k.greater(new_mem, 0),
new_mem - 0.1 * self.dt, new_mem)
return new_mem
def set_reset_mem(self, mem, spikes):
"""
Reset membrane potential ``mem`` array where ``spikes`` array is
nonzero.
"""
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
new = tf.identity(mem)
else:
new = tf.where(k.not_equal(spikes, 0), k.zeros_like(mem), mem)
self.add_update([(self.mem, new)])
def get_psp(self, output_spikes):
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
psp = tf.identity(output_spikes)
else:
new_spiketimes = tf.where(k.not_equal(output_spikes, 0),
k.ones_like(output_spikes) * self.time,
self.last_spiketimes)
assign_new_spiketimes = self.last_spiketimes.assign(new_spiketimes)
with tf.control_dependencies([assign_new_spiketimes]):
last_spiketimes = self.last_spiketimes + 0 # Dummy op
psp = tf.where(k.greater(last_spiketimes, 0),
k.ones_like(output_spikes) * self.dt,
k.zeros_like(output_spikes))
return psp
def get_time(self):
"""Get simulation time variable.
Returns
-------
time: float
Current simulation time.
"""
return k.get_value(self.time)
def set_time(self, time):
"""Set simulation time variable.
Parameters
----------
time: float
Current simulation time.
"""
k.set_value(self.time, time)
def init_membrane_potential(self, output_shape=None, mode='zero'):
"""Initialize membrane potential.
Helpful to avoid transient response in the beginning of the simulation.
Not needed when reset between frames is turned off, e.g. with a video
data set.
Parameters
----------
output_shape: Optional[tuple]
Output shape
mode: str
Initialization mode.
- ``'uniform'``: Random numbers from uniform distribution in
``[-thr, thr]``.
- ``'bias'``: Negative bias.
- ``'zero'``: Zero (default).
Returns
-------
init_mem: ndarray
A tensor of ``self.output_shape`` (same as layer).
"""
if output_shape is None:
output_shape = self.output_shape
if mode == 'uniform':
init_mem = k.random_uniform(output_shape,
-self._v_thresh, self._v_thresh)
elif mode == 'bias':
init_mem = np.zeros(output_shape, k.floatx())
if hasattr(self, 'b'):
b = self.get_weights()[1]
for i in range(len(b)):
init_mem[:, i, Ellipsis] = -b[i]
else: # mode == 'zero':
init_mem = np.zeros(output_shape, k.floatx())
return init_mem
def reset_spikevars(self, sample_idx):
"""
Reset variables present in spiking layers. Can be turned off for
instance when a video sequence is tested.
"""
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
do_reset = sample_idx % mod == 0
if do_reset:
k.set_value(self.mem, self.init_membrane_potential())
k.set_value(self.time, np.float32(self.dt))
zeros_output_shape = np.zeros(self.output_shape, k.floatx())
if self.tau_refrac > 0:
k.set_value(self.refrac_until, zeros_output_shape)
if self.spiketrain is not None:
k.set_value(self.spiketrain, zeros_output_shape)
k.set_value(self.last_spiketimes, zeros_output_shape - 1)
k.set_value(self.v_thresh, zeros_output_shape + self._v_thresh)
k.set_value(self.prospective_spikes, zeros_output_shape)
k.set_value(self.missing_impulse, zeros_output_shape)
def init_neurons(self, input_shape):
"""Init layer neurons."""
from snntoolbox.bin.utils import get_log_keys, get_plot_keys
output_shape = self.compute_output_shape(input_shape)
self.v_thresh = k.variable(self._v_thresh)
self.mem = k.variable(self.init_membrane_potential(output_shape))
self.time = k.variable(self.dt)
# To save memory and computations, allocate only where needed:
if self.tau_refrac > 0:
self.refrac_until = k.zeros(output_shape)
if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
'hist_spikerates_activations', 'operations',
'synaptic_operations_b_t', 'neuron_operations_b_t',
'spiketrains_n_b_l_t'} & (get_plot_keys(self.config) |
get_log_keys(self.config))):
self.spiketrain = k.zeros(output_shape)
self.last_spiketimes = k.variable(-np.ones(output_shape))
self.v_thresh = k.variable(self._v_thresh * np.ones(output_shape))
self.prospective_spikes = k.variable(np.zeros(output_shape))
self.missing_impulse = k.variable(np.zeros(output_shape))
def get_layer_idx(self):
"""Get index of layer."""
label = self.name.split('_')[0]
layer_idx = None
for i in range(len(label)):
if label[:i].isdigit():
layer_idx = int(label[:i])
return layer_idx
def spike_call(call):
def decorator(self, x):
updates = []
if hasattr(self, 'kernel'):
store_old_kernel = self._kernel.assign(self.kernel)
store_old_bias = self._bias.assign(self.bias)
updates += [store_old_kernel, store_old_bias]
with tf.control_dependencies(updates):
new_kernel = k.abs(self.kernel)
new_bias = k.zeros_like(self.bias)
assign_new_kernel = self.kernel.assign(new_kernel)
assign_new_bias = self.bias.assign(new_bias)
updates += [assign_new_kernel, assign_new_bias]
with tf.control_dependencies(updates):
c = call(self, x)[self.batch_size:]
cc = k.concatenate([c, c], 0)
updates = [self.missing_impulse.assign(cc)]
with tf.control_dependencies(updates):
updates = [self.kernel.assign(self._kernel),
self.bias.assign(self._bias)]
elif 'AveragePooling' in self.name:
c = call(self, x)[self.batch_size:]
cc = k.concatenate([c, c], 0)
updates = [self.missing_impulse.assign(cc)]
else:
updates = []
with tf.control_dependencies(updates):
# Only call layer if there are input spikes. This is to prevent
# accumulation of bias.
self.impulse = \
tf.cond(k.any(k.not_equal(x[:self.batch_size], 0)),
lambda: call(self, x),
lambda: k.zeros_like(self.mem))
psp = self.update_neurons()[:self.batch_size]
return k.concatenate([psp,
self.prospective_spikes[self.batch_size:]], 0)
return decorator
class SpikeFlatten(Flatten):
"""Spike flatten layer."""
def __init__(self, **kwargs):
self.config = kwargs.pop(str('config'), None)
self.batch_size = self.config.getint('simulation', 'batch_size')
Flatten.__init__(self, **kwargs)
def call(self, x, mask=None):
psp = k.cast(Flatten.call(self, x), k.floatx())
prospective_spikes = Flatten.call(self, x)
return k.concatenate([psp[:self.batch_size],
prospective_spikes[self.batch_size:]], 0)
@staticmethod
def get_time():
return None
def reset(self, sample_idx):
"""Reset layer variables."""
pass
@property
def class_name(self):
"""Get class name."""
return self.__class__.__name__
class SpikeDense(Dense, SpikeLayer):
"""Spike Dense layer."""
def build(self, input_shape):
"""Creates the layer neurons and connections.
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
Dense.build(self, input_shape)
self.init_neurons(input_shape)
self._kernel = tf.Variable(lambda : tf.zeros_like(self.kernel))
self._bias = tf.Variable(lambda : tf.zeros_like(self.bias))
@spike_call
def call(self, x, **kwargs):
return Dense.call(self, x)
class SpikeConv2D(Conv2D, SpikeLayer):
"""Spike 2D Convolution."""
def build(self, input_shape):
"""Creates the layer weights.
Must be implemented on all layers that have weights.
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
Conv2D.build(self, input_shape)
self.init_neurons(input_shape)
self._kernel = tf.Variable(lambda : tf.zeros_like(self.kernel))
self._bias = tf.Variable(lambda : tf.zeros_like(self.bias))
@spike_call
def call(self, x, mask=None):
return Conv2D.call(self, x)
class SpikeDepthwiseConv2D(DepthwiseConv2D, SpikeLayer):
"""Spike 2D depthwise-separable convolution."""
def build(self, input_shape):
"""Creates the layer weights.
Must be implemented on all layers that have weights.
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
DepthwiseConv2D.build(self, input_shape)
self.init_neurons(input_shape)
self.kernel = self.depthwise_kernel
self._kernel = tf.Variable(lambda : tf.zeros_like(self.kernel))
self._bias = tf.Variable(lambda : tf.zeros_like(self.bias))
@spike_call
def call(self, x, mask=None):
return DepthwiseConv2D.call(self, x)
class SpikeAveragePooling2D(AveragePooling2D, SpikeLayer):
"""Average Pooling."""
def build(self, input_shape):
"""Creates the layer weights.
Must be implemented on all layers that have weights.
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
AveragePooling2D.build(self, input_shape)
self.init_neurons(input_shape)
@spike_call
def call(self, x, mask=None):
return AveragePooling2D.call(self, x)
class SpikeMaxPooling2D(MaxPooling2D, SpikeLayer):
"""Spiking Max Pooling."""
def build(self, input_shape):
"""Creates the layer neurons and connections..
Parameters
----------
input_shape: Union[list, tuple, Any]
Keras tensor (future input to layer) or list/tuple of Keras tensors
to reference for weight shape computations.
"""
MaxPooling2D.build(self, input_shape)
self.init_neurons(input_shape)
def call(self, x, mask=None):
"""Layer functionality."""
# Skip integration of input spikes in membrane potential. Directly
# transmit new spikes. The output psp is nonzero wherever there has
# been an input spike at any time during simulation.
input_psp = MaxPooling2D.call(self, x)
if self.spiketrain is not None:
new_spikes = tf.math.logical_xor(
k.greater(input_psp, 0), k.greater(self.last_spiketimes, 0))
self.add_update([(self.spiketrain,
self.time * k.cast(new_spikes, k.floatx()))])
psp = self.get_psp(input_psp)
return k.cast(psp, k.floatx())
custom_layers = {'SpikeFlatten': SpikeFlatten,
'SpikeDense': SpikeDense,
'SpikeConv2D': SpikeConv2D,
'SpikeAveragePooling2D': SpikeAveragePooling2D,
'SpikeMaxPooling2D': SpikeMaxPooling2D,
'SpikeConcatenate': SpikeConcatenate,
'SpikeDepthwiseConv2D': SpikeDepthwiseConv2D,
'SpikeZeroPadding2D': SpikeZeroPadding2D,
'SpikeReshape': SpikeReshape}
|
NeuromorphicProcessorProject/snn_toolbox
|
snntoolbox/simulation/backends/inisim/ttfs_dyn_thresh.py
|
Python
|
mit
| 18,549
|
[
"NEURON"
] |
33ceaddc92bfaca9108776d714b3f771d9323a7857fbf7a9339115a5940804ea
|
""" Test for AccountingDB
"""
import pytest
import DIRAC
DIRAC.initialize() # Initialize configuration
from DIRAC import gLogger
from DIRAC.AccountingSystem.DB.AccountingDB import AccountingDB
gLogger.setLevel("DEBUG")
acDB = AccountingDB()
startTime = 1262200000
middleTime = 1262300000
endTime = 1262400000
keyValues_1 = [
"User_1",
"UserGroup_1",
"Site_1",
"GridCE_1",
"GridMiddleware_1",
"GridResourceBroker_1",
"GridStatus_1",
]
nonKeyValue_1 = [123]
keyValues_2 = [
"User_2",
"UserGroup_2",
"Site_2",
"GridCE_2",
"GridMiddleware_2",
"GridResourceBroker_2",
"GridStatus_2",
]
nonKeyValue_2 = [456]
@pytest.fixture
def inout():
res = acDB.insertRecordDirectly("dirac-JenkinsSetup_Pilot", startTime, middleTime, keyValues_1 + nonKeyValue_1)
assert res["OK"], res["Message"]
res = acDB.insertRecordDirectly("dirac-JenkinsSetup_Pilot", middleTime, endTime, keyValues_2 + nonKeyValue_2)
assert res["OK"], res["Message"]
yield inout
res = acDB.deleteRecord(
"dirac-JenkinsSetup_Pilot",
startTime,
middleTime,
keyValues_1 + nonKeyValue_1,
)
assert res["OK"], res["Message"]
res = acDB.deleteRecord(
"dirac-JenkinsSetup_Pilot",
middleTime,
endTime,
keyValues_2 + nonKeyValue_2,
)
assert res["OK"], res["Message"]
# Real tests from here
def test_mix():
res = acDB.getRegisteredTypes()
assert res["OK"], res["Message"]
def test_retrieveRawRecords(inout):
# retrieve RAW records
res = acDB.retrieveRawRecords("dirac-JenkinsSetup_Pilot", startTime, endTime, {}, "")
assert res["OK"], res["Message"]
assert len(res["Value"]) == 2
assert res["Value"] == (
tuple([startTime, middleTime] + keyValues_1 + nonKeyValue_1),
tuple([middleTime, endTime] + keyValues_2 + nonKeyValue_2),
)
def test_retrieveBucketedData():
# retrieve bucketed data
res = acDB.retrieveBucketedData(
"dirac-JenkinsSetup_Pilot",
startTime,
endTime,
selectFields=["%s, SUM(%s)", ["Site", "Jobs"]],
condDict={},
groupFields=["%s", ["Site"]],
orderFields=["%s", ["Site"]],
)
assert res["OK"], res["Message"]
assert len(res["Value"]) == 2
res = acDB.retrieveBucketedData(
"dirac-JenkinsSetup_Pilot",
startTime,
endTime,
selectFields=["%s, %s, SUM(%s)", ["Site", "GridCE", "Jobs"]],
condDict={},
groupFields=["%s, %s", ["Site", "GridCE"]],
orderFields=["%s", ["Site"]],
)
assert res["OK"], res["Message"]
assert len(res["Value"]) == 2
res = acDB.retrieveBucketedData(
"dirac-JenkinsSetup_Pilot",
startTime,
endTime,
selectFields=["SUM(%s)", ["Jobs"]],
condDict={},
groupFields=[],
orderFields=[],
)
assert res["OK"], res["Message"]
assert len(res["Value"]) == 1
|
DIRACGrid/DIRAC
|
tests/Integration/AccountingSystem/Test_AccountingDB.py
|
Python
|
gpl-3.0
| 2,975
|
[
"DIRAC"
] |
cb43533904ff2db4eaba009bb69c5d3a097d6601cf2e515cca23dd720771d4e9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.