text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#############################################################################
## Pipeline is now on github: https://github.com/pminguez/geneticaPipeline ##
#############################################################################
import sys
from glob import glob
from subprocess import call
import argparse
import time
import datetime
import os
import re
def countdown(t):
for t in range(t,-1,-1):
mins, secs = divmod(t, 60)
timeformat = '{:02d}'.format(secs)
sys.stdout.write('\rRunning in ' + timeformat + ' secs')
sys.stdout.flush()
time.sleep(1)
parser = argparse.ArgumentParser(description="Process Fastq files for getting variants")
parser.add_argument("-u", action="store", dest='user',
help="user name to look/store in the correct dir")
parser.add_argument("-I", action="store", dest='input',
help="path to input folder")
parser.add_argument("-o", action="store", dest='output',
help="path to output results")
parser.add_argument("-T", action="store", dest='threads', type = int, default = 16,
help="specify number of threads to use")
parser.add_argument("-J", action="store", dest='parallelization', type = int, default = 5,
help="specify number of samples to run in parallel")
parser.add_argument("-duplicates", action="store_true",
help="set this flag to markduplicates with picardtools")
parser.add_argument("-local", action="store_true",
help="set this flag to run the pipeline using local paths")
parser.add_argument("-gvcf", action="store_true",
help="set this flag to keep gvcf files")
args = parser.parse_args()
if args.input == None:
print ''
print 'ERROR: An input folder containing fastq files is needed'
print ''
parser.print_help()
exit()
if args.output == None:
print ''
print 'ERROR: An output folder containing fastq files is needed'
print ''
parser.print_help()
exit()
#Importing samples
forward_paths = sorted(glob(args.input + '*_R1.fastq.gz'))
reverse_paths = sorted(glob(args.input + '*_R2.fastq.gz'))
if forward_paths == []:
forward_paths = sorted(glob(args.input + '*_1.fastq.gz'))
reverse_paths = sorted(glob(args.input + '*_2.fastq.gz'))
if forward_paths == []:
print ''
print 'ERROR: No fastq files detected in ' + args.input + '.\nFastq files names should be named: name_R1.fastq.gz and name_R2.fastq.gz or name_1.fastq.gz and name_2.fastq.gz'
print ''
exit()
if len(forward_paths) != len(reverse_paths):
print ''
print 'ERROR: Different number of forward and reverse fastq files detected. PLEASE CHECK.'
print ''
exit()
print '---------------------------------------------------------------------------------------------'
print ' *****Running FJD Pipeline***** '
print '---------------------------------------------------------------------------------------------'
print ''
print 'Number of samples to analyze: ' + str(len(forward_paths))
print ''
print 'ARGUMENTS:'
print ''
print ' -User: ' + str(args.user)
print ' -Input: ' + str(args.input)
print ' -Output: ' + str(args.output)
print ' -Threads: ' + str(args.threads)
print ' -Sample to parallelizate: ' + str(args.parallelization)
print ' -MarkDuplicates: ' + str(args.duplicates)
print ' -Running local: ' + str(args.local)
print ' -Keep gVCF files: ' + str(args.gvcf)
print ''
print '---------------------------------------------------------------------------------------------'
print 'Please review the arguments and number of samples to process...'
print ''
countdown(5)
print ''
print '---------------------------------------------------------------------------------------------'
if args.local:
genome_ref = "/mnt/genetica/GeneticaPipeDB/genome_data/hg19/ucsc.hg19.fasta"
picardtools = "/mnt/genetica/GeneticaPipeDB/software/picard-tools-2.1.1/picard.jar"
gatk = "/mnt/genetica/GeneticaPipeDB/software/GenomeAnalysisTK-3.5/GenomeAnalysisTK.jar"
hg19_path = "/mnt/genetica/GeneticaPipeDB/genome_data/hg19/"
annovar = "/mnt/genetica/GeneticaPipeDB/software/annovar/table_annovar.pl"
annovarDB = "/mnt/genetica/GeneticaPipeDB/software/annovar/humandb"
output_path = args.output
else:
genome_ref = "/mnt/genetica/" + str(args.user) + "/GeneticaPipeDB/genome_data/hg19/ucsc.hg19.fasta"
picardtools = "/mnt/genetica3/GeneticaPipeDB_updated/picard/build/libs/picard.jar"
gatk = "/mnt/genetica3/GeneticaPipeDB_updated/gatk-4.0.5.1/gatk-package-4.0.5.1-local.jar"
hg19_path = "/mnt/genetica/" + str(args.user) + "/GeneticaPipeDB/genome_data/hg19/"
annovar = "/mnt/genetica3/GeneticaPipeDB_updated/annovar/table_annovar.pl"
annovarDB = "/mnt/genetica3/GeneticaPipeDB_updated/annovar/humandb"
output_path = args.output
print ' Mapping fastq files (BWA) '
print '----------------------------------------------------------------------------------------------'
#Load genome reference to memory
call('/mnt/genetica3/GeneticaPipeDB_updated/bwa/bwa shm ' + genome_ref,shell = True)
#Loop samples for BWA
for i in range(0,len(forward_paths)):
sample_path = forward_paths[i][:forward_paths[i].rfind('/')+1]
sample_name = forward_paths[i][forward_paths[i].rfind('/')+1:forward_paths[i].rfind('_R')]
call('/mnt/genetica3/GeneticaPipeDB_updated/bwa/bwa mem -t' + str(args.threads) + ' -R "@RG\\tID:' + sample_name + '\\tLB:library\\tPL:illumina\\tPU:library\\tSM:' + sample_name + '" ' + genome_ref + ' ' + forward_paths[i] + ' ' + reverse_paths[i] + ' > ' + output_path + '/' + sample_name + '_bwa.sam',shell = True)
#Unload genome reference
call('/mnt/genetica3/GeneticaPipeDB_updated/bwa/bwa shm -d',shell = True)
print '----------------------------------------------------------------------------------------------'
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Sorting and creating bam and bai files of samples...'
print '----------------------------------------------------------------------------------------------'
call("find " + output_path + "*.sam | parallel --no-notice -j" + str(args.parallelization) + " '/mnt/genetica3/GeneticaPipeDB_updated/samtools-1.8/samtools sort {} -O BAM -@ " + str(args.threads / 2) + " -o {}_sorted.bam && /mnt/genetica3/GeneticaPipeDB_updated/samtools-1.8/samtools index {}_sorted.bam'", shell = True)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] OK! '
print '----------------------------------------------------------------------------------------------'
baserecalibrator_input = '*_sorted.bam'
#Remove sam files
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Removing sam files...'
print '----------------------------------------------------------------------------------------------'
for i in glob(output_path + '*.sam'):
os.remove(i)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Removing sam files...OK'
print '----------------------------------------------------------------------------------------------'
#MarkDuplicates with picardtools
if args.duplicates:
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Marking Duplicates... '
print '----------------------------------------------------------------------------------------------'
call("find " + output_path + "*_sorted.bam | parallel --no-notice -j" + str(args.parallelization) + " 'java -Xmx9g -jar " + picardtools + " \
MarkDuplicates \
I= {} \
O= {}_dedupped.bam \
CREATE_INDEX=true \
VALIDATION_STRINGENCY=SILENT \
TMP_DIR= " + sample_path + "working_temp \
M= {}_duplicate_metrics.txt'",shell = True)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Marking Duplicates...OK! '
print '----------------------------------------------------------------------------------------------'
baserecalibrator_input = '*_dedupped.bam'
#Empieza GATK
#Remove intermediary files
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Removing intermediary files...'
print '----------------------------------------------------------------------------------------------'
intermediary_files = glob(output_path + '*_sorted.bam') + glob(output_path + '*_sorted.bam.bai')
for i in intermediary_files:
os.remove(i)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Removing intermediary files...OK'
print '----------------------------------------------------------------------------------------------'
#Quality
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Doing Base Quality Score Recalibration (Step1)...'
print '----------------------------------------------------------------------------------------------'
call("find " + output_path + baserecalibrator_input + " | parallel -j" + str(args.parallelization) + " 'java -Xmx9g -jar " + gatk + " \
BaseRecalibrator \
-R " + genome_ref + " \
-I {} \
--known-sites " + hg19_path + "1000G_phase1.indels.hg19.sites.vcf \
--known-sites " + hg19_path + "Mills_and_1000G_gold_standard.indels.hg19.sites.vcf \
--known-sites " + hg19_path + "dbsnp_138.hg19.vcf \
-O {}_recal_data.table'",shell=True)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Doing Base Quality Score Recalibration...(Step1)OK!'
print '----------------------------------------------------------------------------------------------'
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Doing Base Quality Score Recalibration (Step2)...'
print '----------------------------------------------------------------------------------------------'
call("find " + output_path + baserecalibrator_input + " | parallel -j" + str(args.parallelization) + " 'java -Xmx9g -jar " + gatk + " \
ApplyBQSR \
-R " + genome_ref + " \
-I {} \
-bqsr {}_recal_data.table \
-O {}_bqsr.bam'",shell=True)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Doing Base Quality Score Recalibration (Step2)...OK!'
print '----------------------------------------------------------------------------------------------'
#Crea archivo gVCF desde BAM
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Calling the variants...'
print '----------------------------------------------------------------------------------------------'
call("find " + output_path + "*_bqsr.bam | parallel -j" + str(args.parallelization) + " 'java -Xmx9g -jar " + gatk + " \
HaplotypeCaller \
-R " + genome_ref + " \
-I {} \
-ERC GVCF \
-OVI \
-O {}.g.vcf'",shell=True)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Calling the variants...OK!'
print '----------------------------------------------------------------------------------------------'
#Crea VCF desde gVCF
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Genotyping in single mode...'
print '----------------------------------------------------------------------------------------------'
call("find " + output_path + "*.g.vcf | parallel -j1 'java -Xmx28g -jar " + gatk + " \
GenotypeGVCFs \
-R " + genome_ref + " \
-V {} \
-O {}_singleGT_raw.vcf'",shell = True)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Genotyping in single mode...OK!'
print '----------------------------------------------------------------------------------------------'
#Remove gVCF files
if args.gvcf == None:
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Removing gVCF files...'
print '----------------------------------------------------------------------------------------------'
intermediary_files3 = glob(output_path + '*.g.vcf') + glob(output_path + '*.g.vcf.idx')
for i in intermediary_files3:
os.remove(i)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Removing gVCF files...OK'
print '----------------------------------------------------------------------------------------------'
#Empieza annovar
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Annotating Variants...'
print '----------------------------------------------------------------------------------------------'
variants = sorted(glob(args.output + '*singleGT_raw.vcf'))
for vcffile in variants:
sample_path = vcffile[:vcffile.rfind('/')+1]
sample_name = vcffile[vcffile.rfind('/')+1:vcffile.rfind('singleGT_raw.vcf')]
output = sample_path + sample_name
call(annovar + ' ' + vcffile + ' ' + annovarDB + ' -buildver hg19 \
-out ' + output + ' \
--remove \
--otherinfo \
--protocol refGene,cytoBand,genomicSuperDups,esp6500siv2_all,1000g2015aug_eur,exac03,gnomad_exome,gnomad_genome,hrcr1,kaviar_20150923,popfreq_max_20150413,avsnp147,intervar_20170202,spidex,dbscsnv11,dbnsfp33a,revel,gwava,clinvar_20170130\
--operation g,r,r,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f,f \
--nastring . \
--vcfinput \
--thread ' + str(args.threads),shell = True)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Annotating Variants...OK!'
print '----------------------------------------------------------------------------------------------'
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Formating and Filtering Variants...'
print '----------------------------------------------------------------------------------------------'
if args.local:
call('python /mnt/genetica/' + str(args.user) + '/GeneticaPipeDB/pipeline/annotation_scripts/vcf_processing_step1_v2.py ' + sample_path + '/' + sample_name + '.hg19_multianno.txt' ,shell = True)
call('Rscript /mnt/genetica/' + str(args.user) + '/GeneticaPipeDB/pipeline/annotation_scripts/vcf_processing_step2.R ' + sample_path + '/' + sample_name + '_annotated_formatted.txt',shell = True)
else:
call('python /mnt/genetica/ionut/GeneticaPipeDB/pipeline/annotation_scripts/vcf_processing_step1_v2.py ' + sample_path + '/' + sample_name + '.hg19_multianno.txt' ,shell = True)
call('Rscript /mnt/genetica/ionut/GeneticaPipeDB/pipeline/annotation_scripts/vcf_processing_step2_server.R ' + sample_path + '/' + sample_name + '_annotated_formatted.txt',shell = True)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Formating and Filtering Variants...OK!'
print '----------------------------------------------------------------------------------------------'
#Remove intermediary files
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Removing intermediary files...'
print '----------------------------------------------------------------------------------------------'
intermediary_files4 = glob(output_path + '*_recal_data.table') + glob(output_path + '*_.avinput') + glob(output_path +'*__annotated_formatted.txt') + glob(output_path + '*_dedupped.bam') + glob(output_path + '*_dedupped.bai')
for i in intermediary_files4:
os.remove(i)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Removing intermediary files...OK'
print '----------------------------------------------------------------------------------------------'
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Renaming files ...'
print '----------------------------------------------------------------------------------------------'
today = str(datetime.date.today())
test_date = re.sub('-', '', today)
#sample_path = glob('/mnt/genetica/ionut/AnalisisPipeline/Muestras_Prueba/fastq/*')
#sample_path = glob('/mnt/genetica/ionut/AnalisisPipeline/Muestras_Prueba/fastq/slice1/results/*')
sample_path = glob(output_path + '*')
while len(sample_path) > 0:
for sample in sample_path:
name = '_' + test_date + '_v5'
if sample.endswith("_bqsr.bam"):
new_name1 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bam', name + '.bam', sample)
os.rename(sample, new_name1)
elif sample.endswith("_bqsr.bai"):
new_name2 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bai', name + '.bai', sample)
os.rename(sample, new_name2)
elif sample.endswith(".bam.g.vcf"):
new_name3 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bam.g.vcf', name + '.g.vcf', sample)
os.rename(sample, new_name3)
elif sample.endswith(".g.vcf.idx"):
new_name4 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bam.g.vcf.idx', name + '.g.vcf.idx', sample)
os.rename(sample, new_name4)
elif sample.endswith("_duplicate_metrics.txt"):
new_name5 = re.sub('.fastq.g_bwa.sam_sorted.bam_duplicate_metrics.txt', name + '_duplicate_metrics.txt', sample)
os.rename(sample, new_name5)
elif sample.endswith("_singleGT_raw.vcf"):
new_name6 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bam.g.vcf_singleGT_raw.vcf', name + '_singleGT_raw.vcf', sample)
os.rename(sample, new_name6)
elif sample.endswith("_singleGT_raw.vcf.idx"):
new_name7 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bam.g.vcf_singleGT_raw.vcf.idx', name + '_singleGT_raw.vcf.idx', sample)
os.rename(sample, new_name7)
elif sample.endswith("_multianno.txt"):
new_name8 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bam.g.vcf_.hg19_multianno.txt', name + '_multianno.txt', sample)
os.rename(sample, new_name8)
elif sample.endswith("_multianno.vcf"):
new_name9 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bam.g.vcf_.hg19_multianno.vcf', name + '_multianno.vcf', sample)
os.rename(sample, new_name9)
elif sample.endswith("_raw_variants.txt"):
new_name10 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bam.g.vcf__annotated_formatted.txt_raw_variants.txt', name + '_raw_variants.txt', sample)
os.rename(sample, new_name10)
elif sample.endswith("_prefiltered.txt"):
new_name11 = re.sub('.fastq.g_bwa.sam_sorted.bam_dedupped.bam_bqsr.bam.g.vcf__annotated_formatted.txt_prefiltered.txt', name + '_prefiltered.txt', sample)
os.rename(sample, new_name11)
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] Renaming files...OK'
print '----------------------------------------------------------------------------------------------'
print '----------------------------------------------------------------------------------------------'
print '[FJD_Pipeline] #VARIANTS READY FOR ANALYSIS#'
print '----------------------------------------------------------------------------------------------'
| pminguez/geneticaPipeline | pipeline_v5.py | Python | gpl-3.0 | 19,771 | [
"BWA"
] | 9249829fbc9cc87dff283002f64b4e44dc4a1ec5019c047de998f6cc1bae98cf |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from __future__ import print_function
import traceback
import os
import sys
#-------------------------------------------------------------------------
#
# GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Pango
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ..managedwindow import ManagedWindow
from gramps.gen.errors import UnavailableError, WindowActiveError
from gramps.gen.plug import PluginRegister, PTYPE_STR, load_addon_file
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
from ..utils import open_file_with_default_application
from ..pluginmanager import GuiPluginManager
from . import tool
from ._guioptions import add_gui_options
from ..dialog import InfoDialog
from ..editors import EditPerson
from gramps.gen.utils.file import get_unicode_path_from_file_chooser
from gramps.gen.const import URL_WIKISTRING, USER_HOME, WIKI_EXTRAPLUGINS_RAWDATA
from gramps.gen.config import config
def display_message(message):
"""
A default callback for displaying messages.
"""
print(message)
#-------------------------------------------------------------------------
#
# PluginStatus: overview of all plugins
#
#-------------------------------------------------------------------------
class PluginStatus(ManagedWindow):
"""Displays a dialog showing the status of loaded plugins"""
HIDDEN = '<span color="red">%s</span>' % _('Hidden')
AVAILABLE = '<span weight="bold" color="blue">%s</span>'\
% _('Visible')
def __init__(self, dbstate, uistate, track=[]):
self.dbstate = dbstate
self.__uistate = uistate
self.title = _("Plugin Manager")
ManagedWindow.__init__(self, uistate, track,
self.__class__)
self.__pmgr = GuiPluginManager.get_instance()
self.__preg = PluginRegister.get_instance()
self.set_window(Gtk.Dialog("", uistate.window,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE)),
None, self.title)
self.window.set_size_request(750, 400)
self.window.connect('response', self.close)
notebook = Gtk.Notebook()
#first page with all registered plugins
vbox_reg = Gtk.VBox()
scrolled_window_reg = Gtk.ScrolledWindow()
self.list_reg = Gtk.TreeView()
# model: plugintype, hidden, pluginname, plugindescr, pluginid
self.model_reg = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING,
GObject.TYPE_STRING, GObject.TYPE_STRING, GObject.TYPE_STRING)
self.selection_reg = self.list_reg.get_selection()
self.list_reg.set_model(self.model_reg)
self.list_reg.set_rules_hint(True)
self.list_reg.connect('button-press-event', self.button_press_reg)
col0_reg = Gtk.TreeViewColumn(_('Type'), Gtk.CellRendererText(), text=0)
col0_reg.set_sort_column_id(0)
col0_reg.set_resizable(True)
self.list_reg.append_column(col0_reg)
col = Gtk.TreeViewColumn(_('Status'), Gtk.CellRendererText(), markup=1)
col.set_sort_column_id(1)
self.list_reg.append_column(col)
col2_reg = Gtk.TreeViewColumn(_('Name'), Gtk.CellRendererText(), text=2)
col2_reg.set_sort_column_id(2)
col2_reg.set_resizable(True)
self.list_reg.append_column(col2_reg)
col = Gtk.TreeViewColumn(_('Description'), Gtk.CellRendererText(), text=3)
col.set_sort_column_id(3)
col.set_resizable(True)
self.list_reg.append_column(col)
self.list_reg.set_search_column(2)
scrolled_window_reg.add(self.list_reg)
vbox_reg.pack_start(scrolled_window_reg, True, True, 0)
hbutbox = Gtk.HButtonBox()
hbutbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.__info_btn = Gtk.Button(_("Info"))
hbutbox.add(self.__info_btn)
self.__info_btn.connect('clicked', self.__info, self.list_reg, 4) # id_col
self.__hide_btn = Gtk.Button(_("Hide/Unhide"))
hbutbox.add(self.__hide_btn)
self.__hide_btn.connect('clicked', self.__hide,
self.list_reg, 4, 1) # list, id_col, hide_col
if __debug__:
self.__edit_btn = Gtk.Button(_("Edit"))
hbutbox.add(self.__edit_btn)
self.__edit_btn.connect('clicked', self.__edit, self.list_reg, 4) # id_col
self.__load_btn = Gtk.Button(_("Load"))
hbutbox.add(self.__load_btn)
self.__load_btn.connect('clicked', self.__load, self.list_reg, 4) # id_col
vbox_reg.pack_start(hbutbox, False, False, 0)
notebook.append_page(vbox_reg,
tab_label=Gtk.Label(label=_('Registered Plugins')))
#second page with loaded plugins
vbox_loaded = Gtk.VBox()
scrolled_window = Gtk.ScrolledWindow()
self.list = Gtk.TreeView()
self.model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING,
GObject.TYPE_STRING, object,
GObject.TYPE_STRING, GObject.TYPE_STRING)
self.selection = self.list.get_selection()
self.list.set_model(self.model)
self.list.set_rules_hint(True)
self.list.connect('button-press-event', self.button_press)
self.list.connect('cursor-changed', self.cursor_changed)
col = Gtk.TreeViewColumn(_('Loaded'), Gtk.CellRendererText(),
markup=0)
col.set_sort_column_id(0)
col.set_resizable(True)
self.list.append_column(col)
col1 = Gtk.TreeViewColumn(_('File'), Gtk.CellRendererText(),
text=1)
col1.set_sort_column_id(1)
col1.set_resizable(True)
self.list.append_column(col1)
col = Gtk.TreeViewColumn(_('Status'), Gtk.CellRendererText(),
markup=5)
col.set_sort_column_id(5)
self.list.append_column(col)
col2 = Gtk.TreeViewColumn(_('Message'), Gtk.CellRendererText(), text=2)
col2.set_sort_column_id(2)
col2.set_resizable(True)
self.list.append_column(col2)
self.list.set_search_column(1)
scrolled_window.add(self.list)
vbox_loaded.pack_start(scrolled_window, True, True, 0)
hbutbox = Gtk.HButtonBox()
hbutbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.__info_btn = Gtk.Button(_("Info"))
hbutbox.add(self.__info_btn)
self.__info_btn.connect('clicked', self.__info, self.list, 4) # id_col
self.__hide_btn = Gtk.Button(_("Hide/Unhide"))
hbutbox.add(self.__hide_btn)
self.__hide_btn.connect('clicked', self.__hide,
self.list, 4, 5) # list, id_col, hide_col
if __debug__:
self.__edit_btn = Gtk.Button(_("Edit"))
hbutbox.add(self.__edit_btn)
self.__edit_btn.connect('clicked', self.__edit, self.list, 4) # id_col
self.__load_btn = Gtk.Button(_("Load"))
self.__load_btn.set_sensitive(False)
hbutbox.add(self.__load_btn)
self.__load_btn.connect('clicked', self.__load, self.list, 4) # id_col
vbox_loaded.pack_start(hbutbox, False, False, 5)
notebook.append_page(vbox_loaded,
tab_label=Gtk.Label(label=_('Loaded Plugins')))
#third page with method to install plugin
install_page = Gtk.VBox()
scrolled_window = Gtk.ScrolledWindow()
self.addon_list = Gtk.TreeView()
# model: help_name, name, ptype, image, desc, use, rating, contact, download, url
self.addon_model = Gtk.ListStore(GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
self.addon_list.set_model(self.addon_model)
self.addon_list.set_rules_hint(True)
#self.addon_list.connect('button-press-event', self.button_press)
col = Gtk.TreeViewColumn(_('Addon Name'), Gtk.CellRendererText(),
text=1)
col.set_sort_column_id(1)
self.addon_list.append_column(col)
col = Gtk.TreeViewColumn(_('Type'), Gtk.CellRendererText(),
text=2)
col.set_sort_column_id(2)
self.addon_list.append_column(col)
col = Gtk.TreeViewColumn(_('Description'), Gtk.CellRendererText(),
text=4)
col.set_sort_column_id(4)
self.addon_list.append_column(col)
self.addon_list.connect('cursor-changed', self.button_press_addon)
install_row = Gtk.HBox()
install_row.pack_start(Gtk.Label(label=_("Path to Addon:")), False, True, 0)
self.install_addon_path = Gtk.Entry()
button = Gtk.Button()
img = Gtk.Image()
img.set_from_stock(Gtk.STOCK_OPEN, Gtk.IconSize.BUTTON)
button.add(img)
button.connect('clicked', self.__select_file)
install_row.pack_start(self.install_addon_path, True, True, 0)
install_row.pack_start(button, False, False, 0)
scrolled_window.add(self.addon_list)
install_page.pack_start(scrolled_window, True, True, 0)
#add some spce under the scrollbar
install_page.pack_start(Gtk.Label(label=''), False, False, 0)
#path to addon path line
install_page.pack_start(install_row, False, False, 0)
hbutbox = Gtk.HButtonBox()
hbutbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.__add_btn = Gtk.Button(_("Install Addon"))
hbutbox.add(self.__add_btn)
self.__add_btn.connect('clicked', self.__get_addon_top)
self.__add_all_btn = Gtk.Button(_("Install All Addons"))
hbutbox.add(self.__add_all_btn)
self.__add_all_btn.connect('clicked', self.__get_all_addons)
self.__refresh_btn = Gtk.Button(_("Refresh Addon List"))
hbutbox.add(self.__refresh_btn)
self.__refresh_btn.connect('clicked', self.__refresh_addon_list)
install_page.pack_start(hbutbox, False, True, 5)
# notebook.append_page(install_page,
# tab_label=Gtk.Label(label=_('Install Addons')))
#add the notebook to the window
self.window.get_content_area().pack_start(notebook, True, True, 0)
if __debug__:
# Only show the "Reload" button when in debug mode
# (without -O on the command line)
self.__reload_btn = Gtk.Button(_("Reload"))
self.window.action_area.add(self.__reload_btn)
self.__reload_btn.connect('clicked', self.__reload)
#obtain hidden plugins from the pluginmanager
self.hidden = self.__pmgr.get_hidden_plugin_ids()
self.window.show_all()
self.__populate_lists()
self.list_reg.columns_autosize()
def __refresh_addon_list(self, obj):
"""
Reloads the addons from the wiki into the list.
"""
if sys.version_info[0] < 3:
from urllib2 import urlopen
else:
from urllib.request import urlopen
from ..utils import ProgressMeter
URL = "%s%s" % (URL_WIKISTRING, WIKI_EXTRAPLUGINS_RAWDATA)
try:
fp = urlopen(URL)
except:
print("Error: cannot open %s" % URL)
return
pm = ProgressMeter(_("Refreshing Addon List"))
pm.set_pass(header=_("Reading gramps-project.org..."))
state = "read"
rows = []
row = []
lines = fp.readlines()
pm.set_pass(total=len(lines), header=_("Reading gramps-project.org..."))
for line in lines:
pm.step()
if line.startswith("|-") or line.startswith("|}"):
if row != []:
rows.append(row)
state = "row"
row = []
elif state == "row":
if line.startswith("|"):
row.append(line[1:].strip())
else:
state = "read"
fp.close()
rows.sort(key=lambda row: (row[1], row[0]))
self.addon_model.clear()
# clear the config list:
config.get('plugin.addonplugins')[:] = []
pm.set_pass(total=len(rows), header=_("Checking addon..."))
for row in rows:
pm.step()
try:
# from wiki:
help_name, ptype, image, desc, use, rating, contact, download = row
except:
continue
help_url = _("Unknown Help URL")
if help_name.startswith("[[") and help_name.endswith("]]"):
name = help_name[2:-2]
if "|" in name:
help_url, name = name.split("|", 1)
elif help_name.startswith("[") and help_name.endswith("]"):
name = help_name[1:-1]
if " " in name:
help_url, name = name.split(" ", 1)
else:
name = help_name
url = _("Unknown URL")
if download.startswith("[[") and download.endswith("]]"):
# Not directly possible to get the URL:
url = download[2:-2]
if "|" in url:
url, text = url.split("|", 1)
# need to get a page that says where it is:
fp = urlopen("%s%s%s" % (URL_WIKISTRING, url,
"&action=edit&externaledit=true&mode=file"))
for line in fp:
if line.startswith("URL="):
junk, url = line.split("=", 1)
break
fp.close()
elif download.startswith("[") and download.endswith("]"):
url = download[1:-1]
if " " in url:
url, text = url.split(" ", 1)
if (url.endswith(".zip") or
url.endswith(".ZIP") or
url.endswith(".tar.gz") or
url.endswith(".tgz")):
# Then this is ok:
self.addon_model.append(row=[help_name, name, ptype, image, desc, use,
rating, contact, download, url])
config.get('plugin.addonplugins').append([help_name, name, ptype, image, desc, use,
rating, contact, download, url])
pm.close()
config.save()
def __get_all_addons(self, obj):
"""
Get all addons from the wiki and install them.
"""
from ..utils import ProgressMeter
pm = ProgressMeter(_("Install all Addons"), _("Installing..."), message_area=True)
pm.set_pass(total=len(self.addon_model))
for row in self.addon_model:
pm.step()
(help_name, name, ptype, image, desc, use, rating, contact,
download, url) = row
load_addon_file(url, callback=pm.append_message)
self.uistate.viewmanager.do_reg_plugins(self.dbstate, self.uistate)
pm.message_area_ok.set_sensitive(True)
self.__rebuild_load_list()
self.__rebuild_reg_list()
def __get_addon_top(self, obj):
"""
Toplevel method to get an addon.
"""
from ..utils import ProgressMeter
pm = ProgressMeter(_("Installing Addon"), message_area=True)
pm.set_pass(total=2, header=_("Reading gramps-project.org..."))
pm.step()
self.__get_addon(obj, callback=pm.append_message)
pm.step()
pm.message_area_ok.set_sensitive(True)
def __get_addon(self, obj, callback=display_message):
"""
Get an addon from the wiki or file system and install it.
"""
path = self.install_addon_path.get_text()
load_addon_file(path, callback)
self.uistate.viewmanager.do_reg_plugins(self.dbstate, self.uistate)
self.__rebuild_load_list()
self.__rebuild_reg_list()
def __select_file(self, obj):
"""
Select a file from the file system.
"""
fcd = Gtk.FileChooserDialog(_("Load Addon"),
buttons=(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.OK))
name = self.install_addon_path.get_text()
dir = os.path.dirname(name)
if not os.path.isdir(dir):
dir = USER_HOME
name = ''
elif not os.path.isfile(name):
name = ''
fcd.set_current_folder(dir)
if name:
fcd.set_filename(name)
status = fcd.run()
if status == Gtk.ResponseType.OK:
path = get_unicode_path_from_file_chooser(fcd.get_filename())
if path:
self.install_addon_path.set_text(path)
fcd.destroy()
def __populate_lists(self):
""" Build the lists of plugins """
self.__populate_load_list()
self.__populate_reg_list()
self.__populate_addon_list()
def __populate_addon_list(self):
"""
Build the list of addons from the config setting.
"""
self.addon_model.clear()
for row in config.get('plugin.addonplugins'):
try:
help_name, name, ptype, image, desc, use, rating, contact, download, url = row
except:
continue
self.addon_model.append(row=[help_name, name, ptype, image, desc, use,
rating, contact, download, url])
def __populate_load_list(self):
""" Build list of loaded plugins"""
fail_list = self.__pmgr.get_fail_list()
for i in fail_list:
# i = (filename, (exception-type, exception, traceback), pdata)
err = i[1][0]
pdata = i[2]
hidden = pdata.id in self.hidden
if hidden:
hiddenstr = self.HIDDEN
else:
hiddenstr = self.AVAILABLE
if err == UnavailableError:
self.model.append(row=[
'<span color="blue">%s</span>' % _('Unavailable'),
i[0], str(i[1][1]), None, pdata.id, hiddenstr])
else:
self.model.append(row=[
'<span weight="bold" color="red">%s</span>' % _('Fail'),
i[0], str(i[1][1]), i[1], pdata.id, hiddenstr])
success_list = sorted(self.__pmgr.get_success_list(),
key=lambda x: (x[0], x[2]._get_name()))
for i in success_list:
# i = (filename, module, pdata)
pdata = i[2]
modname = i[1].__name__
hidden = pdata.id in self.hidden
if hidden:
hiddenstr = self.HIDDEN
else:
hiddenstr = self.AVAILABLE
self.model.append(row=[
'<span weight="bold" color="#267726">%s</span>' % _("OK"),
i[0], pdata.description, None, pdata.id, hiddenstr])
def __populate_reg_list(self):
""" Build list of registered plugins"""
for (type, typestr) in PTYPE_STR.items():
registered_plugins = []
for pdata in self.__preg.type_plugins(type):
# model: plugintype, hidden, pluginname, plugindescr, pluginid
hidden = pdata.id in self.hidden
if hidden:
hiddenstr = self.HIDDEN
else:
hiddenstr = self.AVAILABLE
registered_plugins.append([typestr, hiddenstr, pdata.name,
pdata.description, pdata.id])
for row in sorted(registered_plugins):
self.model_reg.append(row)
def __rebuild_load_list(self):
self.model.clear()
self.__populate_load_list()
def __rebuild_reg_list(self):
self.model_reg.clear()
self.__populate_reg_list()
def cursor_changed(self, obj):
if __debug__:
selection = obj.get_selection()
if selection:
model, node = selection.get_selected()
if node:
data = model.get_value(node, 3)
self.__load_btn.set_sensitive(data is not None)
def button_press(self, obj, event):
""" Callback function from the user clicking on a line """
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
model, node = self.selection.get_selected()
data = model.get_value(node, 3)
name = model.get_value(node, 1)
if data:
PluginTrace(self.uistate, [], data, name)
def button_press_reg(self, obj, event):
""" Callback function from the user clicking on a line in reg plugin
"""
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
self.__info(obj, self.list_reg, 4)
def button_press_addon(self, obj):
""" Callback function from the user clicking on a line in reg plugin
"""
selection = self.addon_list.get_selection()
if selection:
model, node = selection.get_selected()
if node:
url = model.get_value(node, 9)
self.install_addon_path.set_text(url)
def build_menu_names(self, obj):
return (self.title, "")
def __reload(self, obj):
""" Callback function from the "Reload" button """
self.__pmgr.reload_plugins()
self.__rebuild_load_list()
self.__rebuild_reg_list()
def __info(self, obj, list_obj, id_col):
""" Callback function from the "Info" button
"""
selection = list_obj.get_selection()
model, node = selection.get_selected()
if not node:
return
id = model.get_value(node, id_col)
pdata = self.__preg.get_plugin(id)
typestr = pdata.ptype
auth = ' - '.join(pdata.authors)
email = ' - '.join(pdata.authors_email)
if len(auth) > 60:
auth = auth[:60] + '...'
if len(email) > 60:
email = email[:60] + '...'
if pdata:
infotxt = """%(plugnam)s: %(name)s [%(typestr)s]
%(plugdes)s: %(descr)s
%(plugver)s: %(version)s
%(plugaut)s: %(authors)s
%(plugmel)s: %(email)s
%(plugfil)s: %(fname)s
%(plugpat)s: %(fpath)s
""" % {
'name': pdata.name,
'typestr': typestr,
'descr': pdata.description,
'version': pdata.version,
'authors': auth,
'email': email,
'fname': pdata.fname,
'fpath': pdata.fpath,
'plugnam': _("Plugin name"),
'plugdes': _("Description"),
'plugver': _("Version"),
'plugaut': _("Authors"),
'plugmel': _("Email"),
'plugfil': _("Filename"),
'plugpat': _("Location"),
}
InfoDialog(_('Detailed Info'), infotxt, parent=self.window)
def __hide(self, obj, list_obj, id_col, hide_col):
""" Callback function from the "Hide" button
"""
selection = list_obj.get_selection()
model, node = selection.get_selected()
if not node:
return
id = model.get_value(node, id_col)
if id in self.hidden:
#unhide
self.hidden.remove(id)
model.set_value(node, hide_col, self.AVAILABLE)
self.__pmgr.unhide_plugin(id)
else:
#hide
self.hidden.add(id)
model.set_value(node, hide_col, self.HIDDEN)
self.__pmgr.hide_plugin(id)
def __load(self, obj, list_obj, id_col):
""" Callback function from the "Load" button
"""
selection = list_obj.get_selection()
model, node = selection.get_selected()
if not node:
return
idv = model.get_value(node, id_col)
pdata = self.__preg.get_plugin(idv)
self.__pmgr.load_plugin(pdata)
self.__rebuild_load_list()
def __edit(self, obj, list_obj, id_col):
""" Callback function from the "Load" button
"""
selection = list_obj.get_selection()
model, node = selection.get_selected()
if not node:
return
id = model.get_value(node, id_col)
pdata = self.__preg.get_plugin(id)
if pdata.fpath and pdata.fname:
open_file_with_default_application(
os.path.join(pdata.fpath, pdata.fname)
)
#-------------------------------------------------------------------------
#
# Details for an individual plugin that failed
#
#-------------------------------------------------------------------------
class PluginTrace(ManagedWindow):
"""Displays a dialog showing the status of loaded plugins"""
def __init__(self, uistate, track, data, name):
self.name = name
title = "%s: %s" % (_("Plugin Error"), name)
ManagedWindow.__init__(self, uistate, track, self)
self.set_window(Gtk.Dialog("", uistate.window,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE)),
None, title)
self.window.set_size_request(600, 400)
self.window.connect('response', self.close)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.text = Gtk.TextView()
scrolled_window.add(self.text)
self.text.get_buffer().set_text(
"".join(traceback.format_exception(data[0],data[1],data[2])))
self.window.get_content_area().add(scrolled_window)
self.window.show_all()
def build_menu_names(self, obj):
return (self.name, None)
#-------------------------------------------------------------------------
#
# Classes for tools
#
#-------------------------------------------------------------------------
class LinkTag(Gtk.TextTag):
def __init__(self, link, buffer):
GObject.GObject.__init__(self, name=link)
tag_table = buffer.get_tag_table()
self.set_property('foreground', "#0000ff")
self.set_property('underline', Pango.Underline.SINGLE)
try:
tag_table.add(self)
except ValueError:
pass # already in table
class ToolManagedWindowBase(ManagedWindow):
"""
Copied from src/ReportBase/_BareReportDialog.py BareReportDialog
"""
border_pad = 6
HELP_TOPIC = None
def __init__(self, dbstate, uistate, option_class, name, callback=None):
self.name = name
ManagedWindow.__init__(self, uistate, [], self)
self.extra_menu = None
self.widgets = []
self.frame_names = []
self.frames = {}
self.format_menu = None
self.style_button = None
window = Gtk.Dialog('Tool')
self.set_window(window, None, self.get_title())
#self.window.connect('response', self.close)
self.cancel = self.window.add_button(Gtk.STOCK_CLOSE,
Gtk.ResponseType.CANCEL)
self.cancel.connect('clicked', self.close)
self.ok = self.window.add_button(Gtk.STOCK_EXECUTE, Gtk.ResponseType.OK)
self.ok.connect('clicked', self.on_ok_clicked)
self.window.set_default_size(600, -1)
# Set up and run the dialog. These calls are not in top down
# order when looking at the dialog box as there is some
# interaction between the various frames.
self.setup_title()
self.setup_header()
#self.tbl = Gtk.Table(4, 4, False)
#self.tbl.set_col_spacings(12)
#self.tbl.set_row_spacings(6)
#self.tbl.set_border_width(6)
#self.col = 0
#self.window.vbox.add(self.tbl)
# Build the list of widgets that are used to extend the Options
# frame and to create other frames
self.add_user_options()
self.notebook = Gtk.Notebook()
self.notebook.set_border_width(6)
self.window.get_content_area().add(self.notebook)
self.results_text = Gtk.TextView()
self.results_text.connect('button-press-event',
self.on_button_press)
self.results_text.connect('motion-notify-event',
self.on_motion)
self.tags = []
self.link_cursor = Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR)
self.standard_cursor = Gdk.Cursor.new(Gdk.CursorType.XTERM)
self.setup_other_frames()
self.set_current_frame(self.initial_frame())
self.show()
#------------------------------------------------------------------------
#
# Callback functions from the dialog
#
#------------------------------------------------------------------------
def on_cancel(self, *obj):
pass # cancel just closes
def on_ok_clicked(self, obj):
"""
The user is satisfied with the dialog choices. Parse all options
and run the tool.
"""
# Save options
self.options.parse_user_options()
self.options.handler.save_options()
self.pre_run()
self.run() # activate results tab
self.post_run()
def initial_frame(self):
return None
def on_motion(self, view, event):
buffer_location = view.window_to_buffer_coords(Gtk.TextWindowType.TEXT,
int(event.x),
int(event.y))
iter = view.get_iter_at_location(*buffer_location)
for (tag, person_handle) in self.tags:
if iter.has_tag(tag):
_window = view.get_window(Gtk.TextWindowType.TEXT)
_window.set_cursor(self.link_cursor)
return False # handle event further, if necessary
view.get_window(Gtk.TextWindowType.TEXT).set_cursor(self.standard_cursor)
return False # handle event further, if necessary
def on_button_press(self, view, event):
buffer_location = view.window_to_buffer_coords(Gtk.TextWindowType.TEXT,
int(event.x),
int(event.y))
iter = view.get_iter_at_location(*buffer_location)
for (tag, person_handle) in self.tags:
if iter.has_tag(tag):
person = self.db.get_person_from_handle(person_handle)
if event.button == 1:
if event.type == Gdk.EventType._2BUTTON_PRESS:
try:
EditPerson(self.dbstate, self.uistate, [], person)
except WindowActiveError:
pass
else:
self.uistate.set_active(person_handle, 'Person')
return True # handled event
return False # did not handle event
def results_write_link(self, text, person, person_handle):
self.results_write(" ")
buffer = self.results_text.get_buffer()
iter = buffer.get_end_iter()
offset = buffer.get_char_count()
self.results_write(text)
start = buffer.get_iter_at_offset(offset)
end = buffer.get_end_iter()
self.tags.append((LinkTag(person_handle, buffer), person_handle))
buffer.apply_tag(self.tags[-1][0], start, end)
def results_write(self, text):
buffer = self.results_text.get_buffer()
mark = buffer.create_mark("end", buffer.get_end_iter())
self.results_text.scroll_to_mark(mark, 0.0, True, 0, 0)
buffer.insert_at_cursor(text)
buffer.delete_mark_by_name("end")
def write_to_page(self, page, text):
buffer = page.get_buffer()
mark = buffer.create_mark("end", buffer.get_end_iter())
self.results_text.scroll_to_mark(mark, 0.0, True, 0, 0)
buffer.insert_at_cursor(text)
buffer.delete_mark_by_name("end")
def clear(self, text):
# Remove all tags and clear text
buffer = text.get_buffer()
tag_table = buffer.get_tag_table()
start = buffer.get_start_iter()
end = buffer.get_end_iter()
for (tag, handle) in self.tags:
buffer.remove_tag(tag, start, end)
tag_table.remove(tag)
self.tags = []
buffer.set_text("")
def results_clear(self):
# Remove all tags and clear text
buffer = self.results_text.get_buffer()
tag_table = buffer.get_tag_table()
start = buffer.get_start_iter()
end = buffer.get_end_iter()
for (tag, handle) in self.tags:
buffer.remove_tag(tag, start, end)
tag_table.remove(tag)
self.tags = []
buffer.set_text("")
def pre_run(self):
from ..utils import ProgressMeter
self.progress = ProgressMeter(self.get_title())
def run(self):
raise NotImplementedError("tool needs to define a run() method")
def post_run(self):
self.progress.close()
#------------------------------------------------------------------------
#
# Functions related to setting up the dialog window.
#
#------------------------------------------------------------------------
def get_title(self):
"""The window title for this dialog"""
return "Tool" # self.title
def get_header(self, name):
"""The header line to put at the top of the contents of the
dialog box. By default this will just be the name of the
selected person. Most subclasses will customize this to give
some indication of what the report will be, i.e. 'Descendant
Report for %s'."""
return self.get_title()
def setup_title(self):
"""Set up the title bar of the dialog. This function relies
on the get_title() customization function for what the title
should be."""
self.window.set_title(self.get_title())
def setup_header(self):
"""Set up the header line bar of the dialog. This function
relies on the get_header() customization function for what the
header line should read. If no customization function is
supplied by the subclass, the default is to use the full name
of the currently selected person."""
title = self.get_header(self.get_title())
label = Gtk.Label(label='<span size="larger" weight="bold">%s</span>' % title)
label.set_use_markup(True)
self.window.get_content_area().pack_start(label, False, False,
self.border_pad)
def add_frame_option(self, frame_name, label_text, widget):
"""Similar to add_option this method takes a frame_name, a
text string and a Gtk Widget. When the interface is built,
all widgets with the same frame_name are grouped into a
GtkFrame. This allows the subclass to create its own sections,
filling them with its own widgets. The subclass is reponsible for
all managing of the widgets, including extracting the final value
before the report executes. This task should only be called in
the add_user_options task."""
if frame_name in self.frames:
self.frames[frame_name].append((label_text, widget))
else:
self.frames[frame_name] = [(label_text, widget)]
self.frame_names.append(frame_name)
def set_current_frame(self, name):
if name is None:
self.notebook.set_current_page(0)
else:
for frame_name in self.frame_names:
if name == frame_name:
if len(self.frames[frame_name]) > 0:
fname, child = self.frames[frame_name][0]
page = self.notebook.page_num(child)
self.notebook.set_current_page(page)
return
def add_results_frame(self, frame_name="Results"):
if frame_name not in self.frames:
window = Gtk.ScrolledWindow()
window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
window.add(self.results_text)
window.set_shadow_type(Gtk.ShadowType.IN)
self.frames[frame_name] = [[frame_name, window]]
self.frame_names.append(frame_name)
l = Gtk.Label(label="<b>%s</b>" % _(frame_name))
l.set_use_markup(True)
self.notebook.append_page(window, l)
self.notebook.show_all()
else:
self.results_clear()
return self.results_text
def add_page(self, frame_name="Help"):
if frame_name not in self.frames:
text = Gtk.TextView()
text.set_wrap_mode(Gtk.WrapMode.WORD)
window = Gtk.ScrolledWindow()
window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
window.add(text)
window.set_shadow_type(Gtk.ShadowType.IN)
self.frames[frame_name] = [[frame_name, window]]
self.frame_names.append(frame_name)
l = Gtk.Label(label="<b>%s</b>" % _(frame_name))
l.set_use_markup(True)
self.notebook.append_page(window, l)
self.notebook.show_all()
else:
# FIXME: get text
#
text = self.frames[frame_name][0][1].something
return text
def setup_other_frames(self):
"""Similar to add_option this method takes a frame_name, a
text string and a Gtk Widget. When the interface is built,
all widgets with the same frame_name are grouped into a
GtkFrame. This allows the subclass to create its own sections,
filling them with its own widgets. The subclass is reponsible for
all managing of the widgets, including extracting the final value
before the report executes. This task should only be called in
the add_user_options task."""
for key in self.frame_names:
flist = self.frames[key]
table = Gtk.Table(3, len(flist))
table.set_col_spacings(12)
table.set_row_spacings(6)
table.set_border_width(6)
l = Gtk.Label(label="<b>%s</b>" % key)
l.set_use_markup(True)
self.notebook.append_page(table, l)
row = 0
for (text, widget) in flist:
if text:
text_widget = Gtk.Label(label='%s:' % text)
text_widget.set_alignment(0.0, 0.5)
table.attach(text_widget, 1, 2, row, row+1,
Gtk.AttachOptions.SHRINK|Gtk.AttachOptions.FILL, Gtk.AttachOptions.SHRINK)
table.attach(widget, 2, 3, row, row+1,
yoptions=Gtk.AttachOptions.SHRINK)
else:
table.attach(widget, 2, 3, row, row+1,
yoptions=Gtk.AttachOptions.SHRINK)
row += 1
self.notebook.show_all()
#------------------------------------------------------------------------
#
# Functions related to extending the options
#
#------------------------------------------------------------------------
def add_user_options(self):
"""Called to allow subclasses add widgets to the dialog form.
It is called immediately before the window is displayed. All
calls to add_option or add_frame_option should be called in
this task."""
add_gui_options(self)
def build_menu_names(self, obj):
return (_('Main window'), self.get_title())
class ToolManagedWindowBatch(tool.BatchTool, ToolManagedWindowBase):
def __init__(self, dbstate, uistate, options_class, name, callback=None):
# This constructor will ask a question, set self.fail:
self.dbstate = dbstate
self.uistate = uistate
tool.BatchTool.__init__(self, dbstate, options_class, name)
if not self.fail:
ToolManagedWindowBase.__init__(self, dbstate, uistate,
options_class, name, callback)
class ToolManagedWindow(tool.Tool, ToolManagedWindowBase):
def __init__(self, dbstate, uistate, options_class, name, callback=None):
self.dbstate = dbstate
self.uistate = uistate
tool.Tool.__init__(self, dbstate, options_class, name)
ToolManagedWindowBase.__init__(self, dbstate, uistate, options_class,
name, callback)
| Forage/Gramps | gramps/gui/plug/_windows.py | Python | gpl-2.0 | 43,144 | [
"Brian"
] | dc1b4dd77bf262eaa6ec9f5e941896a6e47bc72d03beecc0c8250071c5483a3a |
#!/usr/bin/python
# Audio Tools, a module and set of tools for manipulating audio data
# Copyright (C) 2007-2016 Brian Langenberger
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import audiotools
from io import BytesIO
import math
import os
from hashlib import md5
from audiotools.decoders import (Sine_Mono,
Sine_Stereo,
Sine_Simple,
SameSample)
# these are test stream generators using stream formulas
# taken from the FLAC reference encoder
# but converted to PCMReaders for more general use
class FrameListReader:
def __init__(self, samples, sample_rate, channels, bits_per_sample,
channel_mask=None):
import audiotools.pcm
self.framelist = audiotools.pcm.from_list(samples,
channels,
bits_per_sample,
True)
self.samples = samples[:]
self.sample_rate = sample_rate
self.channels = channels
if channel_mask is None:
self.channel_mask = \
int(audiotools.ChannelMask.from_channels(channels))
else:
self.channel_mask = channel_mask
self.bits_per_sample = bits_per_sample
self.read = self.read_opened
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def read_opened(self, pcm_frames):
(framelist, self.framelist) = self.framelist.split(pcm_frames)
return framelist
def reset(self):
self.framelist = audiotools.pcm.from_list(self.samples,
self.channels,
self.bits_per_sample,
True)
self.read = self.read_opened
def read_closed(self, pcm_frames):
raise ValueError()
def close(self):
self.read = self.read_closed
class MD5Reader(audiotools.PCMReader):
def __init__(self, pcmreader):
audiotools.PCMReader.__init__(
self,
sample_rate=pcmreader.sample_rate,
channels=pcmreader.channels,
channel_mask=pcmreader.channel_mask,
bits_per_sample=pcmreader.bits_per_sample)
self.pcmreader = pcmreader
self.md5 = md5()
def reset(self):
if hasattr(self.pcmreader, "reset"):
self.pcmreader.reset()
self.md5 = md5()
def __repr__(self):
return "MD5Reader({!r},{!r},{!r})".format(
self.sample_rate, self.channels, self.bits_per_sample)
def read(self, pcm_frames):
framelist = self.pcmreader.read(pcm_frames)
self.md5.update(framelist.to_bytes(False, True))
return framelist
def close(self):
self.pcmreader.close()
def digest(self):
return self.md5.digest()
def hexdigest(self):
return self.md5.hexdigest()
class ShortStream(MD5Reader):
def __init__(self, samples, sample_rate, channels, bits_per_sample):
MD5Reader.__init__(
self,
FrameListReader(samples,
sample_rate,
channels,
bits_per_sample))
class Generate01(ShortStream):
def __init__(self, sample_rate):
ShortStream.__init__(self, [-32768],
sample_rate, 1, 16)
class Generate02(ShortStream):
def __init__(self, sample_rate):
ShortStream.__init__(self, [-32768, 32767],
sample_rate, 2, 16)
class Generate03(ShortStream):
def __init__(self, sample_rate):
ShortStream.__init__(self, [-25, 0, 25, 50, 100],
sample_rate, 1, 16)
class Generate04(ShortStream):
def __init__(self, sample_rate):
ShortStream.__init__(self, [-25, 500, 0, 400, 25, 300, 50, 200,
100, 100],
sample_rate, 2, 16)
class Silence8_Mono(SameSample):
def __init__(self, pcm_frames, sample_rate):
SameSample.__init__(self,
sample=0,
total_pcm_frames=pcm_frames,
sample_rate=sample_rate,
channels=1,
channel_mask=0x4,
bits_per_sample=8)
self.pcm_frames = pcm_frames
self.md5 = md5()
def read(self, pcm_frames):
framelist = SameSample.read(self, pcm_frames)
self.md5.update(framelist.to_bytes(False, True))
return framelist
def digest(self):
return self.md5.digest()
def hexdigest(self):
return self.md5.hexdigest()
def reset(self):
SameSample.reset(self)
self.md5 = md5()
def __repr__(self):
return "Silence8_Mono({!r},{!r})".format(
self.pcm_frames, self.sample_rate)
class Silence16_Mono(Silence8_Mono):
def __init__(self, pcm_frames, sample_rate):
SameSample.__init__(self,
sample=0,
total_pcm_frames=pcm_frames,
sample_rate=sample_rate,
channels=1,
channel_mask=0x4,
bits_per_sample=16)
self.pcm_frames = pcm_frames
self.md5 = md5()
def __repr__(self):
return "Silence16_Mono({!r},{!r})".format(
self.pcm_frames, self.sample_rate)
class Silence24_Mono(Silence8_Mono):
def __init__(self, pcm_frames, sample_rate):
SameSample.__init__(self,
sample=0,
total_pcm_frames=pcm_frames,
sample_rate=sample_rate,
channels=1,
channel_mask=0x4,
bits_per_sample=24)
self.pcm_frames = pcm_frames
self.md5 = md5()
def __repr__(self):
return "Silence24_Mono({!r},{!r})".format(
self.pcm_frames, self.sample_rate)
class Silence8_Stereo(Silence8_Mono):
def __init__(self, pcm_frames, sample_rate):
SameSample.__init__(self,
sample=0,
total_pcm_frames=pcm_frames,
sample_rate=sample_rate,
channels=2,
channel_mask=0x3,
bits_per_sample=8)
self.pcm_frames = pcm_frames
self.md5 = md5()
def __repr__(self):
return "Silence8_Stereo({!r},{!r})".format(
self.pcm_frames, self.sample_rate)
class Silence16_Stereo(Silence8_Mono):
def __init__(self, pcm_frames, sample_rate):
SameSample.__init__(self,
sample=0,
total_pcm_frames=pcm_frames,
sample_rate=sample_rate,
channels=2,
channel_mask=0x3,
bits_per_sample=16)
self.pcm_frames = pcm_frames
self.md5 = md5()
def __repr__(self):
return "Silence16_Stereo({!r},{!r})".format(
self.pcm_frames, self.sample_rate)
class Silence24_Stereo(Silence8_Mono):
def __init__(self, pcm_frames, sample_rate):
SameSample.__init__(self,
sample=0,
total_pcm_frames=pcm_frames,
sample_rate=sample_rate,
channels=2,
channel_mask=0x3,
bits_per_sample=24)
self.pcm_frames = pcm_frames
self.md5 = md5()
def __repr__(self):
return "Silence24_Stereo({!r},{!r})".format(
self.pcm_frames, self.sample_rate)
class Sine8_Mono(Sine_Mono):
def __init__(self,
pcm_frames,
sample_rate,
f1, a1, f2, a2):
Sine_Mono.__init__(self, 8, pcm_frames, sample_rate,
f1, a1, f2, a2)
self.pcm_frames = pcm_frames
self.f1 = f1
self.a1 = a1
self.f2 = f2
self.a2 = a2
self.md5 = md5()
def read(self, pcm_frames):
framelist = Sine_Mono.read(self, pcm_frames)
self.md5.update(framelist.to_bytes(False, True))
return framelist
def digest(self):
return self.md5.digest()
def hexdigest(self):
return self.md5.hexdigest()
def reset(self):
Sine_Mono.reset(self)
self.md5 = md5()
def __repr__(self):
return "Sine8_Mono({!r},{!r},{!r},{!r},{!r},{!r})".format(
self.pcm_frames,
self.sample_rate,
self.f1,
self.a1,
self.f2,
self.a2)
class Sine8_Stereo(Sine_Stereo):
def __init__(self, pcm_frames, sample_rate,
f1, a1, f2, a2, fmult):
Sine_Stereo.__init__(self, 8, pcm_frames,
sample_rate, f1, a1, f2, a2, fmult)
self.pcm_frames = pcm_frames
self.f1 = f1
self.a1 = a1
self.f2 = f2
self.a2 = a2
self.fmult = fmult
self.md5 = md5()
def read(self, pcm_frames):
framelist = Sine_Stereo.read(self, pcm_frames)
self.md5.update(framelist.to_bytes(False, True))
return framelist
def digest(self):
return self.md5.digest()
def hexdigest(self):
return self.md5.hexdigest()
def reset(self):
Sine_Stereo.reset(self)
self.md5 = md5()
def __repr__(self):
return "Sine8_Stereo({!r},{!r},{!r},{!r},{!r},{!r},{!r})".format(
self.pcm_frames,
self.sample_rate,
self.f1,
self.a1,
self.f2,
self.a2,
self.fmult)
class Sine16_Mono(Sine8_Mono):
def __init__(self, pcm_frames, sample_rate,
f1, a1, f2, a2):
Sine_Mono.__init__(self, 16, pcm_frames, sample_rate,
f1, a1, f2, a2)
self.pcm_frames = pcm_frames
self.f1 = f1
self.a1 = a1
self.f2 = f2
self.a2 = a2
self.md5 = md5()
def __repr__(self):
return "Sine16_Mono({!r},{!r},{!r},{!r},{!r},{!r})".format(
self.pcm_frames,
self.sample_rate,
self.f1,
self.a1,
self.f2,
self.a2)
class Sine16_Stereo(Sine8_Stereo):
def __init__(self, pcm_frames, sample_rate,
f1, a1, f2, a2, fmult):
Sine_Stereo.__init__(self, 16, pcm_frames, sample_rate,
f1, a1, f2, a2, fmult)
self.pcm_frames = pcm_frames
self.f1 = f1
self.a1 = a1
self.f2 = f2
self.a2 = a2
self.fmult = fmult
self.md5 = md5()
def __repr__(self):
return "Sine16_Stereo({!r},{!r},{!r},{!r},{!r},{!r},{!r})".format(
self.pcm_frames,
self.sample_rate,
self.f1,
self.a1,
self.f2,
self.a2,
self.fmult)
class Sine24_Mono(Sine8_Mono):
def __init__(self, pcm_frames, sample_rate,
f1, a1, f2, a2):
Sine_Mono.__init__(self, 24, pcm_frames, sample_rate,
f1, a1, f2, a2)
self.pcm_frames = pcm_frames
self.f1 = f1
self.a1 = a1
self.f2 = f2
self.a2 = a2
self.md5 = md5()
def __repr__(self):
return "Sine24_Mono({!r},{!r},{!r},{!r},{!r},{!r})".format(
self.pcm_frames,
self.sample_rate,
self.f1,
self.a1,
self.f2,
self.a2)
class Sine24_Stereo(Sine8_Stereo):
def __init__(self, pcm_frames, sample_rate,
f1, a1, f2, a2, fmult):
Sine_Stereo.__init__(self, 24, pcm_frames, sample_rate,
f1, a1, f2, a2, fmult)
self.pcm_frames = pcm_frames
self.f1 = f1
self.a1 = a1
self.f2 = f2
self.a2 = a2
self.fmult = fmult
self.md5 = md5()
def __repr__(self):
return "Sine24_Stereo({!r},{!r},{!r},{!r},{!r},{!r},{!r})".format(
self.pcm_frames,
self.sample_rate,
self.f1,
self.a1,
self.f2,
self.a2,
self.fmult)
class Simple_Sine(audiotools.PCMReader):
def __init__(self, pcm_frames, sample_rate, channel_mask,
bits_per_sample, *values):
audiotools.PCMReader.__init__(
self,
sample_rate=sample_rate,
channels=len(values),
channel_mask=channel_mask,
bits_per_sample=bits_per_sample)
self.pcm_frames = pcm_frames
self.total_frames = pcm_frames
self.i = 0
self.channel_max_values = [v[0] for v in values]
self.channel_counts = [v[1] for v in values]
self.streams = [Sine_Simple(pcm_frames,
bits_per_sample,
sample_rate,
max_value,
count)
for (max_value, count) in zip(self.channel_max_values,
self.channel_counts)]
self.md5 = md5()
def read(self, pcm_frames):
framelist = audiotools.pcm.from_channels(
[stream.read(pcm_frames) for stream in self.streams])
self.md5.update(framelist.to_bytes(False, True))
return framelist
def reset(self):
for stream in self.streams:
stream.reset()
self.md5 = md5()
def digest(self):
return self.md5.digest()
def hexdigest(self):
return self.md5.hexdigest()
def close(self):
for stream in self.streams:
stream.close()
def __repr__(self):
return "Simple_Sine({!r}, {!r}, {!r}, {!r}, *{!r})".format(
self.pcm_frames,
self.sample_rate,
self.channel_mask,
self.bits_per_sample,
[(m, c) for m, c in zip(self.channel_max_values,
self.channel_counts)])
class WastedBPS16:
def __init__(self, pcm_frames):
self.total_frames = pcm_frames
self.pcm_frames = pcm_frames
self.i = 0
self.sample_rate = 44100
self.channels = 2
self.channel_mask = 0x3
self.bits_per_sample = 16
self.signed = True
self.sample_frame = audiotools.pcm.empty_framelist(2, 16)
self.md5 = md5()
self.read = self.read_opened
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def read_opened(self, pcm_frames):
wave = []
for i in range(min(pcm_frames, self.pcm_frames)):
wave.append((self.i % 2000) << 2)
wave.append((self.i % 1000) << 3)
self.i += 1
framelist = audiotools.pcm.from_list(wave,
self.channels,
self.bits_per_sample,
self.signed)
self.pcm_frames -= framelist.frames
self.md5.update(framelist.to_bytes(False, True))
return framelist
def read_closed(self, pcm_frames):
raise ValueError()
def reset(self):
self.read = self.read_opened
self.i = 0
self.pcm_frames = self.total_frames
self.md5 = md5()
def digest(self):
return self.md5.digest()
def hexdigest(self):
return self.md5.hexdigest()
def close(self):
self.read = self.read_closed
def __repr__(self):
return "WastedBPS({!r})".format(self.pcm_frames)
class Raw(audiotools.PCMReader):
def __init__(self, pcm_frames, channels, bits_per_sample):
audiotools.PCMReader.__init__(
self,
sample_rate=44100,
channels=channels,
channel_mask=0,
bits_per_sample=bits_per_sample)
self.file = BytesIO()
full_scale = (1 << (bits_per_sample - 1)) - 1
f1 = 441.0
a1 = 0.61
f2 = 661.5
a2 = 0.37
delta1 = 2.0 * math.pi / (self.sample_rate / f1)
delta2 = 2.0 * math.pi / (self.sample_rate / f2)
theta1 = theta2 = 0.0
channel = []
for i in range(pcm_frames):
channel.append(int(((a1 * math.sin(theta1) + a2 *
math.sin(theta2)) * full_scale) + 0.5) +
((ord(os.urandom(1)) >> 4) - 8))
theta1 += delta1
theta2 += delta2
self.file.write(
audiotools.FrameList.from_channels(
[channel] * channels).string(bits_per_sample))
self.file.seek(0, 0)
PATTERN01 = [1, -1]
PATTERN02 = [1, 1, -1]
PATTERN03 = [1, -1, -1]
PATTERN04 = [1, -1, 1, -1]
PATTERN05 = [1, -1, -1, 1]
PATTERN06 = [1, -1, 1, 1, -1]
PATTERN07 = [1, -1, -1, 1, -1]
def fsd8(pattern, reps):
# FIXME - not quite accurate
values = {1: 127, -1: -128}
return FrameListReader([values[p] for p in pattern] * reps,
44100, 1, 8)
def fsd16(pattern, reps):
values = {1: 32767, -1: -32768}
return FrameListReader([values[p] for p in pattern] * reps,
44100, 1, 16)
def fsd24(pattern, reps):
values = {1: 8388607, -1: -8388608}
return FrameListReader([values[p] for p in pattern] * reps,
44100, 1, 24)
| tuffy/python-audio-tools | test/test_streams.py | Python | gpl-2.0 | 18,602 | [
"Brian"
] | 9d6b1c593643ff28a5a909b30eeecb33aae638739e6475ebe20bab31f650aa5e |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send Conditional CLI commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cnos_conditional_command
author: "Dave Kasberg (@dkasberg)"
short_description: Execute a single command based on condition on devices running Lenovo CNOS
description:
- This module allows you to modify the running configuration of a switch. It provides a way to
execute a single CNOS command on a network device by evaluating the current running configuration
and executing the command only if the specific settings have not been already configured.
The CNOS command is passed as an argument of the method.
This module functions the same as the cnos_command module.
The only exception is that the following inventory variable can be specified
[“condition = <flag string>”]
When this inventory variable is specified as the variable of a task, the command is executed for
the network element that matches the flag string. Usually, commands are executed across a group
of network devices. When there is a requirement to skip the execution of the command on one or
more devices, it is recommended to use this module.
This module uses SSH to manage network device configuration.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_conditional_command.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
clicommand:
description:
- This specifies the CLI command as an attribute to this method. The command is passed using
double quotes. The variables can be placed directly on to the CLI commands or can be invoked
from the vars directory.
required: true
default: Null
condition:
description:
- If you specify condition=false in the inventory file against any device, the command execution
is skipped for that device.
required: true
default: Null
flag:
description:
- If a task needs to be executed, you have to set the flag the same as it is specified in the
inventory for that device.
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_conditional_command. These are written in the main.yml file of the tasks directory.
---
- name: Applying CLI template on VLAG Tier1 Leaf Switch1
cnos_conditional_command:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_conditional_command_{{ inventory_hostname }}_output.txt"
condition: "{{ hostvars[inventory_hostname]['condition']}}"
flag: leaf_switch2
command: "spanning-tree mode enable"
enablePassword: "anil"
'''
RETURN = '''
return value: |
On successful execution, the method returns a message in JSON format
[Command Applied]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
clicommand=dict(required=True),
outputfile=dict(required=True),
condition=dict(required=True),
flag=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True), ), supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
condition = module.params['condition']
flag = module.params['flag']
cliCommand = module.params['clicommand']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
if (condition != flag):
module.exit_json(changed=True, msg="Command Skipped for this value")
return " "
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
#
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand + "\n", "(config)#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="CLI Command executed and results saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| t0mk/ansible | lib/ansible/modules/network/lenovo/cnos_conditional_command.py | Python | gpl-3.0 | 7,146 | [
"VisIt"
] | f18eb6146b05bc321a156b4c968f6838f2e065cc4c79c41208f0811a0f8d63bd |
#!/usr/bin/python
#
# (C) 2013, Markus Wildi
#
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
"""Config provides all required constants with default values. ToDo: This module must be rewritten in future.
"""
__author__ = 'wildi.markus@bluewin.ch'
import ConfigParser
import os
import string
# thanks http://stackoverflow.com/questions/635483/what-is-the-best-way-to-implement-nested-dictionaries-in-python
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
class DefaultConfiguration(object):
"""Default configuration for rts2saf"""
def __init__(self, debug=False, logger=None):
self.debug=debug
self.logger=logger
self.ccd=None
self.foc=None
self.sexFields=list()
self.config = ConfigParser.RawConfigParser()
self.config.optionxform = str
self.dcf=dict()
self.dcf[('basic', 'BASE_DIRECTORY')]= '/tmp/rts2saf_focus'
self.dcf[('basic', 'TEMP_DIRECTORY')]= '/tmp/'
self.dcf[('basic', 'FILE_GLOB')]= '*fits'
self.dcf[('filter wheels', 'inuse')]= '[ FILTA ]'
self.dcf[('filter wheels', 'EMPTY_SLOT_NAMES')]= [ 'empty8', 'open' ]
# this is really ugly
# but ConfigParser does not allow something else
# ToDo define more!
self.dcf[('filter wheel', 'fltw1')]= '[ FILTA, U, nof]'
self.dcf[('filter wheel', 'fltw2')]= '[ FILTB, Y ]'
self.dcf[('filter wheel', 'fltw3')]= '[ FILTC, nof ]'
self.dcf[('filter wheel', 'fltw4')]= '[ FILTD, nof ]'
#
# relative lower acquisition limit [tick]
# relative upper acquisition limit [tick]
# stepsize [tick]
# exposure factor
self.dcf[('filter properties', 'flt1')]= '[ U, -1000, 1100, 100, 11.1]'
self.dcf[('filter properties', 'flt2')]= '[ nof1,-1200, 1300, 200, 1.]'
self.dcf[('filter properties', 'flt3')]= '[ nof2,-1200, 1300, 200, 1.]'
self.dcf[('filter properties', 'flt4')]= '[ C, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt5')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt6')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt7')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt8')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('filter properties', 'flt9')]= '[ D, -1400, 1500, 300, 1.]'
self.dcf[('focuser properties', 'FOCUSER_NAME')]= 'F0'
self.dcf[('focuser properties', 'FOCUSER_RESOLUTION')]= 20
self.dcf[('focuser properties', 'FOCUSER_ABSOLUTE_LOWER_LIMIT')]= 0
self.dcf[('focuser properties', 'FOCUSER_ABSOLUTE_UPPER_LIMIT')]= 20
self.dcf[('focuser properties', 'FOCUSER_LOWER_LIMIT')]= 0
self.dcf[('focuser properties', 'FOCUSER_UPPER_LIMIT')]= 20
self.dcf[('focuser properties', 'FOCUSER_STEP_SIZE')]= 2
self.dcf[('focuser properties', 'FOCUSER_SPEED')]= 100.
self.dcf[('focuser properties', 'FOCUSER_NO_FTW_RANGE')]= '[ -100, 100, 20 ]'
self.dcf[('focuser properties', 'FOCUSER_TEMPERATURE_COMPENSATION')]= False
# not yet in use:
self.dcf[('acceptance circle', 'CENTER_OFFSET_X')]= 0.
self.dcf[('acceptance circle', 'CENTER_OFFSET_Y')]= 0.
#
self.dcf[('acceptance circle', 'RADIUS')]= 2000.
#
#
self.dcf[('analysis', 'MINIMUM_OBJECTS')]= 5
self.dcf[('analysis', 'MINIMUM_FOCUSER_POSITIONS')]= 5
# if non empty only FOC_POS within this interval will be analyzed
self.dcf[('analysis', 'FOCUSER_INTERVAL')]= list()
self.dcf[('SExtractor', 'SEXPATH')]= 'sextractor'
self.dcf[('SExtractor', 'SEXCFG')]= '/usr/local/etc/rts2/rts2saf/sex/rts2saf-sex.cfg'
self.dcf[('SExtractor', 'FIELDS')]= ['NUMBER', 'EXT_NUMBER','X_IMAGE','Y_IMAGE','MAG_BEST','FLAGS','CLASS_STAR','FWHM_IMAGE','A_IMAGE','B_IMAGE']
# ToDo, currently put into default sex.fg
# from sextractor config file
# ASSOC_PARAMS 3,4 # columns of xpos,ypos[,mag] # rts2af do not use mag
# ASSOC_RADIUS 10.0 # cross-matching radius (pixels)
# ASSOC_TYPE NEAREST # ASSOCiation method: FIRST, NEAREST, MEAN,
self.dcf[('SExtractor', 'OBJECT_SEPARATION')]= 10.
self.dcf[('SExtractor', 'ELLIPTICITY')]= .1
self.dcf[('SExtractor', 'ELLIPTICITY_REFERENCE')]= .3
self.dcf[('SExtractor', 'DETECT_THRESH')]=1.7
self.dcf[('SExtractor', 'ANALYSIS_THRESH')]=1.7
self.dcf[('SExtractor', 'DEBLEND_MINCONT')]= 0.1
self.dcf[('SExtractor', 'SATUR_LEVEL')]= 65535
self.dcf[('SExtractor', 'STARNNW_NAME')]= '/usr/local/etc/rts2/rts2saf/rts2saf-sex.nnw'
# mapping as found in dummy CCD, used for set
self.dcf[('ccd binning mapping', '1x1')] = 0
self.dcf[('ccd binning mapping', '2x2')] = 1
self.dcf[('ccd binning mapping', '3x3')] = 2
self.dcf[('ccd binning mapping', '4x4')] = 3
self.dcf[('ccd', 'CCD_NAME')]= 'CD'
self.dcf[('ccd', 'CCD_BINNING')]= '1x1'
self.dcf[('ccd', 'WINDOW')]= '[ -1, -1, -1, -1 ]'
self.dcf[('ccd', 'PIXELSIZE')]= 9.e-6 # unit meter
self.dcf[('ccd', 'PIXELSCALE')]= 1.1 # unit arcsec/pixel
self.dcf[('ccd', 'BASE_EXPOSURE')]= .01
self.dcf[('mode', 'SET_FOC_DEF')]= False
self.dcf[('mode', 'WRITE_FILTER_OFFSETS')]= True
# ToDo, make a real alternative
# self.dcf[('mode', 'ANALYZE_FWHM')]= True
self.dcf[('mode', 'ANALYZE_FLUX')]= False
self.dcf[('mode', 'ANALYZE_ASSOC')]= False
self.dcf[('mode', 'ANALYZE_ASSOC_FRACTION')]= 0.65
self.dcf[('mode', 'WITH_MATHPLOTLIB')]= False
self.dcf[('mode', 'WEIGHTED_MEANS')]= False
# mapping of fits header elements to canonical
self.dcf[('fits header mapping', 'AMBIENTTEMPERATURE')]= 'HIERARCH DAVIS.DOME_TMP'
self.dcf[('fits header mapping', 'DATETIME')]= 'JD'
self.dcf[('fits header mapping', 'EXPOSURE')]= 'EXPOSURE'
self.dcf[('fits header mapping', 'CCD_TEMP')]= 'CCD_TEMP'
self.dcf[('fits header mapping', 'FOC_POS')] = 'FOC_POS'
self.dcf[('fits header mapping', 'DATE-OBS')]= 'DATE-OBS'
self.dcf[('fits header mapping', 'BINNING')]= 'BINNING'
self.dcf[('fits header mapping', 'BINNING_X')]= 'BIN_V' # seen BIN_X
self.dcf[('fits header mapping', 'BINNING_Y')]= 'BIN_H' # seen BIN_Y
# These factors are used for fitting
self.dcf[('fits binning mapping', '1x1')]= 1
self.dcf[('fits binning mapping', '2x2')]= 2
self.dcf[('fits binning mapping', '4x4')]= 4
self.dcf[('fits binning mapping', '8x8')]= 8
self.dcf[('telescope', 'TEL_RADIUS')] = 0.09 # [meter]
self.dcf[('telescope', 'TEL_FOCALLENGTH')] = 1.26 # [meter]
self.dcf[('connection', 'URL')] = 'http://127.0.0.1:8889'
self.dcf[('connection', 'USERNAME')] = 'rts2saf'
self.dcf[('connection', 'PASSWORD')] = 'set password in your config file'
self.dcf[('queue focus run', 'FWHM_LOWER_THRESH')] = 35.
self.dcf[('analysis', 'FWHM_MIN')] = 1.5
self.dcf[('analysis', 'FWHM_MAX')] = 12.
self.dcf[('IMGP analysis', 'FILTERS_TO_EXCLUDE')] = '[ FILTC:grism1]'
self.dcf[('IMGP analysis', 'SCRIPT_FWHM')] = '/usr/local/bin/rts2saf_fwhm.py'
self.dcf[('IMGP analysis', 'SCRIPT_ASTROMETRY')] = '/usr/local/bin/rts2-astrometry.net'
# or rts2-astrometry.net
def writeDefaultConfiguration(self, cfn='./rts2saf-default.cfg'):
"""Write the default configuration to file, serves as a starting point.
:param cfn: file name
:type string:
:return cfn: file name if success else None
"""
for (section, identifier), value in sorted(self.dcf.iteritems()):
if self.config.has_section(section)== False:
self.config.add_section(section)
self.config.set(section, identifier, value)
try:
with open( cfn, 'w') as configfile:
configfile.write('# 2013-09-10, Markus Wildi\n')
configfile.write('# default configuration for rts2saf\n')
configfile.write('#\n')
configfile.write('#\n')
self.config.write(configfile)
except Exception, e:
self.logger.error('Configuration.writeDefaultConfiguration: config file: {0} could not be written, error: {1}'.format(cfn,e))
return None
return cfn
class Configuration(DefaultConfiguration):
"""Helper class containing the runtime configuration.
"""
# init from base class
def readConfiguration(self, fileName=None):
"""Copy the default configuration and overwrite the values with those from configuration file.
:return: True if success else False
"""
# make the values accessible
self.cfg=AutoVivification()
# TODO
filterWheelsInuse=list()
filterWheelsDefs=dict()
config = ConfigParser.ConfigParser()
config.optionxform = str
if os.path.exists(fileName):
try:
config.readfp(open(fileName))
except Exception, e:
self.logger.error('Configuration.readConfiguration: config file: {0} has wrong syntax, error: {1}'.format(fileName,e))
return False
# ok, I misuse ConfigParser
# check additional elements or typo
for sct in config.sections():
for k,v in config.items(sct):
try:
self.dcf[(sct, k)]
except Exception, e:
self.logger.error('Configuration.readConfiguration: config file: {0} has wrong syntax, error: {1}'.format(fileName,e))
return False
else:
self.logger.error('Configuration.readConfiguration: config file: {0} not found'.format(fileName))
return False
self.cfg['CFGFN'] = fileName
# read the defaults
for (section, identifier), value in self.dcf.iteritems():
#
# ToDO ugly
if section == 'ccd' :
self.cfg[identifier]= value
elif section in 'fits binning mapping' or section in 'ccd binning mapping':
self.cfg[section][identifier]= value
else:
self.cfg[identifier]= value
# over write the defaults
ftds=list()
# if there is no filter wheel defined, a FAKE wheel with one FAKE filter is created
fakeFtw=True
for (section, identifier), value in self.dcf.iteritems():
try:
value = config.get( section, identifier)
except Exception, e:
# exception if section, identifier value are not present in config file
#self.logger.error('Configuration.readConfiguration: config file: {0} has an error at section:{1}, identifier:{2}, value:{3}'.format(fileName, section, identifier, value))
continue
value= string.replace( value, ' ', '')
items=list()
# decode the compound configuration expressions first, the rest is copied to
# after completion
if section=='SExtractor':
if identifier in 'FIELDS':
value=value.replace("'", '')
self.cfg['FIELDS']=value[1:-1].split(',')
else:
self.cfg[identifier]= value
elif section=='basic':
if isinstance(self.cfg[identifier], bool):
# ToDo, looking for a direct way
if value in 'True':
self.cfg[identifier]= True
else:
self.cfg[identifier]= False
else:
self.cfg[identifier]= value
elif section=='focuser properties':
if identifier in 'FOCUSER_NO_FTW_RANGE':
self.cfg[identifier]=value[1:-1].split(',')
else:
self.cfg[identifier]= value
#
elif section=='filter properties':
self.cfg[identifier]= value
ftds.append(value)
#
elif section=='filter wheel':
items= value[1:-1].split(',')
filterWheelsDefs[items[0]]=[ x for x in items[1:] if x is not '']
#
elif( section=='filter wheels'):
fakeFtw=False
if identifier in 'inuse':
filterWheelsInuse=value[1:-1].split(',')
self.cfg[identifier]=filterWheelsInuse
elif identifier in 'EMPTY_SLOT_NAMES':
self.cfg[identifier]=value[1:-1].split(',')
#
elif( section == 'ccd' and identifier == 'WINDOW'):
items= value[1:-1].split(',')
self.cfg[identifier] = [ int(x) for x in items ]
if len(self.cfg[identifier]) != 4:
self.logger.warn( 'Configuration.readConfiguration: wrong ccd window specification {0} {1}, using the whole CCD area'.format(len(self.cfg[identifier]), self.cfg[identifier]))
self.cfg[identifier] = [ -1, -1, -1, -1]
elif( section=='analysis') and identifier == 'FOCUSER_INTERVAL':
items= value[1:-1].split(',')
self.cfg[identifier] = [ int(x) for x in items ]
if len(self.cfg[identifier]) != 2:
self.logger.warn( 'Configuration.readConfiguration: wrong focuser interval specification {0} {1}, using all images'.format(len(self.cfg[identifier]), self.cfg[identifier]))
self.cfg[identifier] = list()
elif( section=='IMGP analysis'):
items= value[1:-1].split(',')
if identifier in 'FILTERS_TO_EXCLUDE':
tDict=dict()
for e in value[1:-1].split(','):
k,v=e.split(':')
tDict[v]=k # that's ok !!
self.cfg[identifier]=tDict
else:
self.cfg[identifier]= value
elif( section=='fits binning mapping'):
# exception
self.cfg[section][identifier]= value
elif( section=='ccd binning mapping'):
# exception
self.cfg[section][identifier]= value
# first bool, then int !
elif isinstance(self.cfg[identifier], bool):
# ToDo, looking for a direct way
if value in 'True':
self.cfg[identifier]= True
else:
self.cfg[identifier]= False
elif( isinstance(self.cfg[identifier], int)):
try:
self.cfg[identifier]= int(value)
except Exception, e:
self.logger.error('Configuration.readConfiguration: no int '+ value+ ' in section ' + section + ', identifier ' + identifier + ' in file ' + fileName+ ', error: {0}'.format(e))
elif(isinstance(self.cfg[identifier], float)):
try:
self.cfg[identifier]= float(value)
except Exception, e:
self.logger.error('Configuration.readConfiguration: no float '+ value+ 'in section ' + section + ', identifier ' + identifier + ' in file ' + fileName + ', error: {0}'.format(e))
else:
self.cfg[identifier]= value
# for convenience
# ToDo look!
self.cfg['FAKE'] = fakeFtw
if self.cfg['FAKE']:
self.cfg['FILTER DEFINITIONS'] = ['FAKE_FT']
self.cfg['FILTER WHEEL DEFINITIONS'] = {'FAKE_FTW': [ 'FAKE_FT'] }
self.cfg['FILTER WHEELS INUSE'] = [ 'FAKE_FTW' ]
else:
self.cfg['FILTER DEFINITIONS'] = ftds
self.cfg['FILTER WHEEL DEFINITIONS'] = filterWheelsDefs
self.cfg['FILTER WHEELS INUSE'] = filterWheelsInuse
self.cfg['FITS_BINNING_MAPPING'] = self.cfg['fits binning mapping']
self.cfg['CCD_BINNING_MAPPING'] = self.cfg['ccd binning mapping']
return True
def writeConfiguration(self, cfn='./rts2saf-my-new.cfg'):
for (section, identifier), value in sorted(self.dcf.iteritems()):
print section, '=>', identifier, '=>', value
if self.config.has_section(section)== False:
self.config.add_section(section)
self.config.set(section, identifier, value)
with open( cfn, 'w') as configfile:
configfile.write(' 2013-09-10, Markus Wildi\n')
configfile.write(' default configuration for rts2saf\n')
configfile.write('\n')
configfile.write('\n')
self.config.write(configfile)
def checkConfiguration(self, args=None):
"""Check the runtime configuration e.g. if SExtractor is present or if the filter wheel definitions and filters are consistent.
:return: True if success else False
"""
# rts2.sextractor excepts the file not found error and uses internal defaults, we check that here
if not os.path.exists(self.cfg['SEXPATH']):
self.logger.warn( 'Configuration.checkConfiguration: sextractor path: {0} not valid, returning'.format(self.cfg['SEXPATH']))
return False
if not os.path.exists(self.cfg['SEXCFG']):
self.logger.warn( 'Configuration.checkConfiguration: SExtractor config file: {0} not found, returning'.format(self.cfg['SEXCFG']))
return False
if not os.path.exists(self.cfg['STARNNW_NAME']):
self.logger.warn( 'Configuration.checkConfiguration: SExtractor NNW config file: {0} not found, returning'.format(self.cfg['STARNNW_NAME']))
return False
if not self.cfg['FIELDS']:
self.logger.warn( 'Configuration.checkConfiguration: no sextractor fields defined, returning')
return False
ftws = self.cfg['FILTER WHEEL DEFINITIONS'].keys()
fts=list()
for x in self.cfg['FILTER DEFINITIONS']:
ele= x.strip('[]').split(',')
fts.append(ele[0])
for ftw in self.cfg['FILTER WHEELS INUSE']:
if ftw not in ftws:
self.logger.warn( 'Configuration.checkConfiguration: filter wheel: {} not defined in: {}'.format(ftw, ftws))
return False
for ftName in self.cfg['FILTER WHEEL DEFINITIONS'][ftw]:
if ftName not in fts:
self.logger.warn( 'Configuration.checkConfiguration: filter: {} not defined in: {}'.format(ftName, self.cfg['FILTER DEFINITIONS']))
return False
try:
vars(args)['associate']
if not 'NUMBER' in self.cfg['FIELDS']:
self.logger.error( 'Configuration.checkConfiguration: with --associate specify SExtractor parameter NUMBER in FIELDS: {}'.format( self.cfg['FIELDS']))
return False
except:
pass
try:
vars(args)['flux']
for fld in ['FLUX_MAX' , 'FLUX_APER', 'FLUXERR_APER']:
if fld in self.cfg['FIELDS']:
self.logger.error( 'Configuration.checkConfiguration: with --flux do not specify SExtractor parameter: {} in FIELDS: {}'.format( fld, self.cfg['FIELDS']))
return False
except:
pass
return True
# more to come
| xyficu/rts2 | scripts/rts2saf/rts2saf/config.py | Python | gpl-2.0 | 21,088 | [
"VisIt"
] | 407a09501934f4ee7d333666b21ec4124b810c5c1edb73412890f79f71195f65 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Yambo(AutotoolsPackage):
"""Yambo is a FORTRAN/C code for Many-Body calculations in solid
state and molecular physics.
Yambo relies on the Kohn-Sham wavefunctions generated by two DFT
public codes: abinit, and PWscf. The code was originally developed
in the Condensed Matter Theoretical Group of the Physics Department
at the University of Rome "Tor Vergata" by Andrea Marini. Previous
to its release under the GPL license, yambo was known as SELF.
"""
homepage = "http://www.yambo-code.org/index.php"
url = "https://github.com/yambo-code/yambo/archive/4.2.2.tar.gz"
version('4.2.2', '97f3513bd726141be5e18072118b6fb5')
version('4.2.1', '99027014192c0f0f4b5d9b48414ad85d')
version('4.2.0', '0cbb4d7c9790596d163ebe872d95bd30')
variant('dp', default=False, description='Enable double precision')
variant(
'profile',
values=('time', 'memory'),
default='',
description='Activate profiling of specific sections',
multi=True
)
variant(
'io',
values=('iotk', 'etsf-io'),
default='',
description='Activate support for different io formats (requires network access)', # noqa
multi=True
)
# MPI + OpenMP parallelism
variant('mpi', default=True, description='Enable MPI support')
variant('openmp', default=False, description='Enable OpenMP support')
depends_on('blas')
depends_on('lapack')
# MPI dependencies are forced, until we have proper forwarding of variants
#
# Note that yambo is used as an application, and not linked as a library,
# thus there will be no case where another package pulls-in e.g. netcdf+mpi
# and wants to depend on yambo~mpi.
depends_on('mpi', when='+mpi')
depends_on('netcdf+mpi', when='+mpi')
depends_on('hdf5+mpi', when='+mpi')
depends_on('fftw+mpi', when='+mpi')
depends_on('scalapack', when='+mpi')
depends_on('netcdf~mpi', when='~mpi')
depends_on('hdf5~mpi', when='~mpi')
depends_on('fftw~mpi', when='~mpi')
depends_on('hdf5+fortran')
depends_on('netcdf')
depends_on('netcdf-fortran')
depends_on('libxc@2.0.3:')
build_targets = ['all']
parallel = False
# The configure in the package has the string 'cat config/report'
# hard-coded, which causes a failure at configure time due to the
# current working directory in Spack. Fix this by using the absolute
# path to the file.
@run_before('configure')
def filter_configure(self):
report_abspath = join_path(self.build_directory, 'config', 'report')
filter_file('config/report', report_abspath, 'configure')
def enable_or_disable_time(self, activated):
return '--enable-time-profile' if activated else '--disable-time-profile' # noqa: E501
def enable_or_disable_memory(self, activated):
return '--enable-memory-profile' if activated else '--disable-memory-profile' # noqa: E501
def enable_or_disable_openmp(self, activated):
return '--enable-open-mp' if activated else '--disable-open-mp'
def configure_args(self):
args = [
# As of version 4.2.1 there are hard-coded paths that make
# the build process fail if the target prefix is not the
# configure directory
'--prefix={0}'.format(self.stage.source_path),
'--disable-keep-objects',
'--with-editor=none'
]
spec = self.spec
# Double precision
args.extend(self.enable_or_disable('dp'))
# Application profiling
args.extend(self.enable_or_disable('profile'))
# MPI + threading
args.extend(self.enable_or_disable('mpi'))
args.extend(self.enable_or_disable('openmp'))
# LAPACK
if '+mpi' in spec:
args.append('--with-scalapack-libs={0}'.format(
spec['scalapack'].libs +
spec['lapack'].libs +
spec['blas'].libs
))
args.extend([
'--with-blas-libs={0}'.format(spec['blas'].libs),
'--with-lapack-libs={0}'.format(spec['lapack'].libs)
])
# Netcdf
args.extend([
'--enable-netcdf-hdf5',
'--enable-hdf5-compression',
'--with-hdf5-libs={0}'.format(spec['hdf5'].libs),
'--with-netcdf-path={0}'.format(spec['netcdf'].prefix),
'--with-netcdff-path={0}'.format(spec['netcdf-fortran'].prefix)
])
args.extend(self.enable_or_disable('io'))
# Other dependencies
args.append('--with-fft-path={0}'.format(spec['fftw'].prefix))
args.append('--with-libxc-path={0}'.format(spec['libxc'].prefix))
return args
def install(self, spec, prefix):
# As of version 4.2.1 an 'install' target is advertized,
# but not present
install_tree('bin', prefix.bin)
install_tree('lib', prefix.lib)
install_tree('include', prefix.include)
install_tree('driver', prefix.driver)
| krafczyk/spack | var/spack/repos/builtin/packages/yambo/package.py | Python | lgpl-2.1 | 6,314 | [
"ABINIT",
"NetCDF",
"Yambo"
] | 76e6cd9ae59a398c8701e9eda3122d65046aa02e86fc81fefd3e20b845f8b562 |
"""
Acceptance tests for Studio related to course reruns.
"""
from __future__ import absolute_import
import random
from bok_choy.promise import EmptyPromise
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.studio.course_rerun import CourseRerunPage
from common.test.acceptance.pages.studio.index import DashboardPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from .base_studio_test import StudioCourseTest
class CourseRerunTest(StudioCourseTest):
"""
Feature: Courses can be rerun
"""
shard = 21
__test__ = True
SECTION_NAME = 'Rerun Section'
SUBSECITON_NAME = 'Rerun Subsection'
UNIT_NAME = 'Rerun Unit'
COMPONENT_NAME = 'Rerun Component'
COMPONENT_CONTENT = 'Test Content'
def setUp(self):
"""
Login as global staff because that's the only way to rerun a course.
"""
super(CourseRerunTest, self).setUp(is_staff=True)
self.dashboard_page = DashboardPage(self.browser)
def populate_course_fixture(self, course_fixture):
"""
Create a sample course with one section, one subsection, one unit, and one component.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', self.SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', self.SUBSECITON_NAME).add_children(
XBlockFixtureDesc('vertical', self.UNIT_NAME).add_children(
XBlockFixtureDesc('html', self.COMPONENT_NAME, self.COMPONENT_CONTENT)
)
)
)
)
def test_course_rerun(self):
"""
Scenario: Courses can be rerun
Given I have a course with a section, subsesction, vertical, and html component with content 'Test Content'
When I visit the course rerun page
And I type 'test_rerun' in the course run field
And I click Create Rerun
And I visit the course listing page
And I wait for all courses to finish processing
And I click on the course with run 'test_rerun'
Then I see a rerun notification on the course outline page
And when I click 'Dismiss' on the notification
Then I do not see a rerun notification
And when I expand the subsection and click on the unit
And I click 'View Live Version'
Then I see one html component with the content 'Test Content'
"""
course_info = (self.course_info['org'], self.course_info['number'], self.course_info['run'])
updated_course_info = course_info[0] + "+" + course_info[1] + "+" + course_info[2]
self.dashboard_page.visit()
self.dashboard_page.scroll_to_course(course_info[1])
self.dashboard_page.create_rerun(updated_course_info)
rerun_page = CourseRerunPage(self.browser, *course_info)
rerun_page.wait_for_page()
course_run = 'test_rerun_' + str(random.randrange(1000000, 9999999))
rerun_page.course_run = course_run
rerun_page.create_rerun()
def finished_processing():
self.dashboard_page.visit()
return not self.dashboard_page.has_processing_courses
EmptyPromise(finished_processing, "Rerun finished processing", try_interval=5, timeout=60).fulfill()
assert course_run in self.dashboard_page.course_runs
self.dashboard_page.click_course_run(course_run)
outline_page = CourseOutlinePage(self.browser, *course_info)
outline_page.wait_for_page()
self.assertTrue(outline_page.has_rerun_notification)
outline_page.dismiss_rerun_notification()
EmptyPromise(lambda: not outline_page.has_rerun_notification, "Rerun notification dismissed").fulfill()
subsection = outline_page.section(self.SECTION_NAME).subsection(self.SUBSECITON_NAME)
subsection.expand_subsection()
unit_page = subsection.unit(self.UNIT_NAME).go_to()
unit_page.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 1)
self.assertEqual(courseware.xblock_component_html_content(), self.COMPONENT_CONTENT)
| ESOedX/edx-platform | common/test/acceptance/tests/studio/test_studio_rerun.py | Python | agpl-3.0 | 4,426 | [
"VisIt"
] | fca6853708063376d47b1af6cd02fe23a58acf12dc4a8c9d215fe199ba5d475e |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# ChemPy - A chemistry toolkit for Python
#
# Copyright (c) 2010 by Joshua W. Allen (jwallen@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
Contains functions for converting between some of the thermodynamics models
given in the :mod:`chempy.thermo` module. The two primary functions are:
* :func:`convertGAtoWilhoit()` - converts a :class:`ThermoGAModel` to a :class:`WilhoitModel`
* :func:`convertWilhoitToNASA()` - converts a :class:`WilhoitModel` to a :class:`NASAModel`
"""
import math
import numpy
import logging
import cython
from scipy import zeros, linalg, optimize, integrate
import chempy.constants as constants
from chempy.thermo import ThermoGAModel, WilhoitModel, NASAPolynomial, NASAModel
################################################################################
def convertGAtoWilhoit(GAthermo, atoms, rotors, linear, B0=500.0, constantB=False):
"""
Convert a :class:`ThermoGAModel` object `GAthermo` to a
:class:`WilhoitModel` object. You must specify the number of `atoms`,
internal `rotors` and the linearity `linear` of the molecule so that the
proper limits of heat capacity at zero and infinite temperature can be
determined. You can also specify an initial guess of the scaling temperature
`B0` to use, and whether or not to allow that parameter to vary
(`constantB`). Returns the fitted :class:`WilhoitModel` object.
"""
freq = 3 * atoms - (5 if linear else 6) - rotors
wilhoit = WilhoitModel()
if constantB:
wilhoit.fitToDataForConstantB(GAthermo.Tdata, GAthermo.Cpdata, linear, freq, rotors, GAthermo.H298, GAthermo.S298, B0)
else:
wilhoit.fitToData(GAthermo.Tdata, GAthermo.Cpdata, linear, freq, rotors, GAthermo.H298, GAthermo.S298, B0)
return wilhoit
################################################################################
def convertWilhoitToNASA(wilhoit, Tmin, Tmax, Tint, fixedTint=False, weighting=True, continuity=3):
"""
Convert a :class:`WilhoitModel` object `Wilhoit` to a :class:`NASAModel`
object. You must specify the minimum and maximum temperatures of the fit
`Tmin` and `Tmax`, as well as the intermediate temperature `Tint` to use
as the bridge between the two fitted polynomials. The remaining parameters
can be used to modify the fitting algorithm used:
* `fixedTint` - ``False`` to allow `Tint` to vary in order to improve the fit, or ``True`` to keep it fixed
* `weighting` - ``True`` to weight the fit by :math:`T^{-1}` to emphasize good fit at lower temperatures, or ``False`` to not use weighting
* `continuity` - The number of continuity constraints to enforce at `Tint`:
- 0: no constraints on continuity of :math:`C_\\mathrm{p}(T)` at `Tint`
- 1: constrain :math:`C_\\mathrm{p}(T)` to be continous at `Tint`
- 2: constrain :math:`C_\\mathrm{p}(T)` and :math:`\\frac{d C_\\mathrm{p}}{dT}` to be continuous at `Tint`
- 3: constrain :math:`C_\\mathrm{p}(T)`, :math:`\\frac{d C_\\mathrm{p}}{dT}`, and :math:`\\frac{d^2 C_\\mathrm{p}}{dT^2}` to be continuous at `Tint`
- 4: constrain :math:`C_\\mathrm{p}(T)`, :math:`\\frac{d C_\\mathrm{p}}{dT}`, :math:`\\frac{d^2 C_\\mathrm{p}}{dT^2}`, and :math:`\\frac{d^3 C_\\mathrm{p}}{dT^3}` to be continuous at `Tint`
- 5: constrain :math:`C_\\mathrm{p}(T)`, :math:`\\frac{d C_\\mathrm{p}}{dT}`, :math:`\\frac{d^2 C_\\mathrm{p}}{dT^2}`, :math:`\\frac{d^3 C_\\mathrm{p}}{dT^3}`, and :math:`\\frac{d^4 C_\\mathrm{p}}{dT^4}` to be continuous at `Tint`
Note that values of `continuity` of 5 or higher effectively constrain all
the coefficients to be equal and should be equivalent to fitting only one
polynomial (rather than two).
Returns the fitted :class:`NASAModel` object containing the two fitted
:class:`NASAPolynomial` objects.
"""
# Scale the temperatures to kK
Tmin /= 1000.
Tint /= 1000.
Tmax /= 1000.
# Make copy of Wilhoit data so we don't modify the original
wilhoit_scaled = WilhoitModel(wilhoit.cp0, wilhoit.cpInf, wilhoit.a0, wilhoit.a1, wilhoit.a2, wilhoit.a3, wilhoit.H0, wilhoit.S0, wilhoit.comment, B=wilhoit.B)
# Rescale Wilhoit parameters
wilhoit_scaled.cp0 /= constants.R
wilhoit_scaled.cpInf /= constants.R
wilhoit_scaled.B /= 1000.
#if we are using fixed Tint, do not allow Tint to float
if fixedTint:
nasa_low, nasa_high = Wilhoit2NASA(wilhoit_scaled, Tmin, Tmax, Tint, weighting, continuity)
else:
nasa_low, nasa_high, Tint = Wilhoit2NASA_TintOpt(wilhoit_scaled, Tmin, Tmax, weighting, continuity)
iseUnw = TintOpt_objFun(Tint, wilhoit_scaled, Tmin, Tmax, 0, continuity) #the scaled, unweighted ISE (integral of squared error)
rmsUnw = math.sqrt(iseUnw/(Tmax-Tmin))
rmsStr = '(Unweighted) RMS error = %.3f*R;'%(rmsUnw)
if(weighting == 1):
iseWei= TintOpt_objFun(Tint, wilhoit_scaled, Tmin, Tmax, weighting, continuity) #the scaled, weighted ISE
rmsWei = math.sqrt(iseWei/math.log(Tmax/Tmin))
rmsStr = 'Weighted RMS error = %.3f*R;'%(rmsWei)+rmsStr
#print a warning if the rms fit is worse that 0.25*R
if(rmsUnw > 0.25 or rmsWei > 0.25):
logging.warning("Poor Wilhoit-to-NASA fit quality: RMS error = %.3f*R" % (rmsWei if weighting == 1 else rmsUnw))
#restore to conventional units of K for Tint and units based on K rather than kK in NASA polynomial coefficients
Tint *= 1000.
Tmin *= 1000.
Tmax *= 1000.
nasa_low.c1 /= 1000.
nasa_low.c2 /= 1000000.
nasa_low.c3 /= 1000000000.
nasa_low.c4 /= 1000000000000.
nasa_high.c1 /= 1000.
nasa_high.c2 /= 1000000.
nasa_high.c3 /= 1000000000.
nasa_high.c4 /= 1000000000000.
# output comment
comment = 'NASA function fitted to Wilhoit function. ' + rmsStr + wilhoit.comment
nasa_low.Tmin = Tmin; nasa_low.Tmax = Tint
nasa_low.comment = 'Low temperature range polynomial'
nasa_high.Tmin = Tint; nasa_high.Tmax = Tmax
nasa_high.comment = 'High temperature range polynomial'
#for the low polynomial, we want the results to match the Wilhoit value at 298.15K
#low polynomial enthalpy:
Hlow = (wilhoit.getEnthalpy(298.15) - nasa_low.getEnthalpy(298.15))/constants.R
#low polynomial entropy:
Slow = (wilhoit.getEntropy(298.15) - nasa_low.getEntropy(298.15))/constants.R
# update last two coefficients
nasa_low.c5 = Hlow
nasa_low.c6 = Slow
#for the high polynomial, we want the results to match the low polynomial value at tint
#high polynomial enthalpy:
Hhigh = (nasa_low.getEnthalpy(Tint) - nasa_high.getEnthalpy(Tint))/constants.R
#high polynomial entropy:
Shigh = (nasa_low.getEntropy(Tint) - nasa_high.getEntropy(Tint))/constants.R
# update last two coefficients
#polynomial_high.coeffs = (b6,b7,b8,b9,b10,Hhigh,Shigh)
nasa_high.c5 = Hhigh
nasa_high.c6 = Shigh
return NASAModel(Tmin=Tmin, Tmax=Tmax, polynomials=[nasa_low,nasa_high], comment=comment)
def Wilhoit2NASA(wilhoit, tmin, tmax, tint, weighting, contCons):
"""
input: Wilhoit parameters, Cp0/R, CpInf/R, and B (kK), a0, a1, a2, a3,
Tmin (minimum temperature (in kiloKelvin),
Tmax (maximum temperature (in kiloKelvin),
Tint (intermediate temperature, in kiloKelvin)
weighting (boolean: should the fit be weighted by 1/T?)
contCons: a measure of the continutity constraints on the fitted NASA polynomials; possible values are:
5: constrain Cp, dCp/dT, d2Cp/dT2, d3Cp/dT3, and d4Cp/dT4 to be continuous at tint; note: this effectively constrains all the coefficients to be equal and should be equivalent to fitting only one polynomial (rather than two)
4: constrain Cp, dCp/dT, d2Cp/dT2, and d3Cp/dT3 to be continuous at tint
3 (default): constrain Cp, dCp/dT, and d2Cp/dT2 to be continuous at tint
2: constrain Cp and dCp/dT to be continuous at tint
1: constrain Cp to be continous at tint
0: no constraints on continuity of Cp(T) at tint
note: 5th (and higher) derivatives of NASA Cp(T) are zero and hence will automatically be continuous at tint by the form of the Cp(T) function
output: NASA polynomials (nasa_low, nasa_high) with scaled parameters
"""
#construct (typically 13*13) symmetric A matrix (in A*x = b); other elements will be zero
A = zeros([10+contCons,10+contCons])
b = zeros([10+contCons])
if weighting:
A[0,0] = 2*math.log(tint/tmin)
A[0,1] = 2*(tint - tmin)
A[0,2] = tint*tint - tmin*tmin
A[0,3] = 2.*(tint*tint*tint - tmin*tmin*tmin)/3
A[0,4] = (tint*tint*tint*tint - tmin*tmin*tmin*tmin)/2
A[1,4] = 2.*(tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin)/5
A[2,4] = (tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin)/3
A[3,4] = 2.*(tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin)/7
A[4,4] = (tint*tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin*tmin)/4
else:
A[0,0] = 2*(tint - tmin)
A[0,1] = tint*tint - tmin*tmin
A[0,2] = 2.*(tint*tint*tint - tmin*tmin*tmin)/3
A[0,3] = (tint*tint*tint*tint - tmin*tmin*tmin*tmin)/2
A[0,4] = 2.*(tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin)/5
A[1,4] = (tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin)/3
A[2,4] = 2.*(tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin)/7
A[3,4] = (tint*tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin*tmin)/4
A[4,4] = 2.*(tint*tint*tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin*tmin*tmin)/9
A[1,1] = A[0,2]
A[1,2] = A[0,3]
A[1,3] = A[0,4]
A[2,2] = A[0,4]
A[2,3] = A[1,4]
A[3,3] = A[2,4]
if weighting:
A[5,5] = 2*math.log(tmax/tint)
A[5,6] = 2*(tmax - tint)
A[5,7] = tmax*tmax - tint*tint
A[5,8] = 2.*(tmax*tmax*tmax - tint*tint*tint)/3
A[5,9] = (tmax*tmax*tmax*tmax - tint*tint*tint*tint)/2
A[6,9] = 2.*(tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint)/5
A[7,9] = (tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint)/3
A[8,9] = 2.*(tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint)/7
A[9,9] = (tmax*tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint*tint)/4
else:
A[5,5] = 2*(tmax - tint)
A[5,6] = tmax*tmax - tint*tint
A[5,7] = 2.*(tmax*tmax*tmax - tint*tint*tint)/3
A[5,8] = (tmax*tmax*tmax*tmax - tint*tint*tint*tint)/2
A[5,9] = 2.*(tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint)/5
A[6,9] = (tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint)/3
A[7,9] = 2.*(tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint)/7
A[8,9] = (tmax*tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint*tint)/4
A[9,9] = 2.*(tmax*tmax*tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint*tint*tint)/9
A[6,6] = A[5,7]
A[6,7] = A[5,8]
A[6,8] = A[5,9]
A[7,7] = A[5,9]
A[7,8] = A[6,9]
A[8,8] = A[7,9]
if(contCons > 0):#set non-zero elements in the 11th column for Cp(T) continuity contraint
A[0,10] = 1.
A[1,10] = tint
A[2,10] = tint*tint
A[3,10] = A[2,10]*tint
A[4,10] = A[3,10]*tint
A[5,10] = -A[0,10]
A[6,10] = -A[1,10]
A[7,10] = -A[2,10]
A[8,10] = -A[3,10]
A[9,10] = -A[4,10]
if(contCons > 1): #set non-zero elements in the 12th column for dCp/dT continuity constraint
A[1,11] = 1.
A[2,11] = 2*tint
A[3,11] = 3*A[2,10]
A[4,11] = 4*A[3,10]
A[6,11] = -A[1,11]
A[7,11] = -A[2,11]
A[8,11] = -A[3,11]
A[9,11] = -A[4,11]
if(contCons > 2): #set non-zero elements in the 13th column for d2Cp/dT2 continuity constraint
A[2,12] = 2.
A[3,12] = 6*tint
A[4,12] = 12*A[2,10]
A[7,12] = -A[2,12]
A[8,12] = -A[3,12]
A[9,12] = -A[4,12]
if(contCons > 3): #set non-zero elements in the 14th column for d3Cp/dT3 continuity constraint
A[3,13] = 6
A[4,13] = 24*tint
A[8,13] = -A[3,13]
A[9,13] = -A[4,13]
if(contCons > 4): #set non-zero elements in the 15th column for d4Cp/dT4 continuity constraint
A[4,14] = 24
A[9,14] = -A[4,14]
# make the matrix symmetric
for i in range(1,10+contCons):
for j in range(0, i):
A[i,j] = A[j,i]
#construct b vector
w0int = Wilhoit_integral_T0(wilhoit, tint)
w1int = Wilhoit_integral_T1(wilhoit, tint)
w2int = Wilhoit_integral_T2(wilhoit, tint)
w3int = Wilhoit_integral_T3(wilhoit, tint)
w0min = Wilhoit_integral_T0(wilhoit, tmin)
w1min = Wilhoit_integral_T1(wilhoit, tmin)
w2min = Wilhoit_integral_T2(wilhoit, tmin)
w3min = Wilhoit_integral_T3(wilhoit, tmin)
w0max = Wilhoit_integral_T0(wilhoit, tmax)
w1max = Wilhoit_integral_T1(wilhoit, tmax)
w2max = Wilhoit_integral_T2(wilhoit, tmax)
w3max = Wilhoit_integral_T3(wilhoit, tmax)
if weighting:
wM1int = Wilhoit_integral_TM1(wilhoit, tint)
wM1min = Wilhoit_integral_TM1(wilhoit, tmin)
wM1max = Wilhoit_integral_TM1(wilhoit, tmax)
else:
w4int = Wilhoit_integral_T4(wilhoit, tint)
w4min = Wilhoit_integral_T4(wilhoit, tmin)
w4max = Wilhoit_integral_T4(wilhoit, tmax)
if weighting:
b[0] = 2*(wM1int - wM1min)
b[1] = 2*(w0int - w0min)
b[2] = 2*(w1int - w1min)
b[3] = 2*(w2int - w2min)
b[4] = 2*(w3int - w3min)
b[5] = 2*(wM1max - wM1int)
b[6] = 2*(w0max - w0int)
b[7] = 2*(w1max - w1int)
b[8] = 2*(w2max - w2int)
b[9] = 2*(w3max - w3int)
else:
b[0] = 2*(w0int - w0min)
b[1] = 2*(w1int - w1min)
b[2] = 2*(w2int - w2min)
b[3] = 2*(w3int - w3min)
b[4] = 2*(w4int - w4min)
b[5] = 2*(w0max - w0int)
b[6] = 2*(w1max - w1int)
b[7] = 2*(w2max - w2int)
b[8] = 2*(w3max - w3int)
b[9] = 2*(w4max - w4int)
# solve A*x=b for x (note that factor of 2 in b vector and 10*10 submatrix of A
# matrix is not required; not including it should give same result, except
# Lagrange multipliers will differ by a factor of two)
x = linalg.solve(A,b,overwrite_a=1,overwrite_b=1)
nasa_low = NASAPolynomial(Tmin=0, Tmax=0, coeffs=[x[0], x[1], x[2], x[3], x[4], 0.0, 0.0], comment='')
nasa_high = NASAPolynomial(Tmin=0, Tmax=0, coeffs=[x[5], x[6], x[7], x[8], x[9], 0.0, 0.0], comment='')
return nasa_low, nasa_high
def Wilhoit2NASA_TintOpt(wilhoit, tmin, tmax, weighting, contCons):
#input: Wilhoit parameters, Cp0/R, CpInf/R, and B (kK), a0, a1, a2, a3, Tmin (minimum temperature (in kiloKelvin), Tmax (maximum temperature (in kiloKelvin)
#output: NASA parameters for Cp/R, b1, b2, b3, b4, b5 (low temp parameters) and b6, b7, b8, b9, b10 (high temp parameters), and Tint
#1. vary Tint, bounded by tmin and tmax, to minimize TintOpt_objFun
#cf. http://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html and http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fminbound.html#scipy.optimize.fminbound)
tint = optimize.fminbound(TintOpt_objFun, tmin, tmax, args=(wilhoit, tmin, tmax, weighting, contCons))
#note that we have not used any guess when using this minimization routine
#2. determine the bi parameters based on the optimized Tint (alternatively, maybe we could have TintOpt_objFun also return these parameters, along with the objective function, which would avoid an extra calculation)
(nasa1, nasa2) = Wilhoit2NASA(wilhoit, tmin, tmax, tint, weighting, contCons)
return nasa1, nasa2, tint
def TintOpt_objFun(tint, wilhoit, tmin, tmax, weighting, contCons):
#input: Tint (intermediate temperature, in kiloKelvin); Wilhoit parameters, Cp0/R, CpInf/R, and B (kK), a0, a1, a2, a3, Tmin (minimum temperature (in kiloKelvin), Tmax (maximum temperature (in kiloKelvin)
#output: the quantity Integrate[(Cp(Wilhoit)/R-Cp(NASA)/R)^2, {t, tmin, tmax}]
if (weighting == 1):
result = TintOpt_objFun_W(tint, wilhoit, tmin, tmax, contCons)
else:
result = TintOpt_objFun_NW(tint, wilhoit, tmin, tmax, contCons)
# numerical errors could accumulate to give a slightly negative result
# this is unphysical (it's the integral of a *squared* error) so we
# set it to zero to avoid later problems when we try find the square root.
if result < 0:
if result<-1E-13:
logging.error("Greg thought he fixed the numerical problem, but apparently it is still an issue; please e-mail him with the following results:")
logging.error(tint)
logging.error(wilhoit)
logging.error(tmin)
logging.error(tmax)
logging.error(weighting)
logging.error(result)
logging.info("Negative ISE of %f reset to zero."%(result))
result = 0
return result
def TintOpt_objFun_NW(tint, wilhoit, tmin, tmax, contCons):
"""
Evaluate the objective function - the integral of the square of the error in the fit.
input: Tint (intermediate temperature, in kiloKelvin)
Wilhoit parameters, Cp0/R, CpInf/R, and B (kK), a0, a1, a2, a3,
Tmin (minimum temperature (in kiloKelvin),
Tmax (maximum temperature (in kiloKelvin)
output: the quantity Integrate[(Cp(Wilhoit)/R-Cp(NASA)/R)^2, {t, tmin, tmax}]
"""
nasa_low, nasa_high = Wilhoit2NASA(wilhoit,tmin,tmax,tint, 0, contCons)
b1, b2, b3, b4, b5 = nasa_low.c0, nasa_low.c1, nasa_low.c2, nasa_low.c3, nasa_low.c4
b6, b7, b8, b9, b10 = nasa_high.c0, nasa_high.c1, nasa_high.c2, nasa_high.c3, nasa_high.c4
q0=Wilhoit_integral_T0(wilhoit, tint)
q1=Wilhoit_integral_T1(wilhoit, tint)
q2=Wilhoit_integral_T2(wilhoit, tint)
q3=Wilhoit_integral_T3(wilhoit, tint)
q4=Wilhoit_integral_T4(wilhoit, tint)
result = (Wilhoit_integral2_T0(wilhoit, tmax) - Wilhoit_integral2_T0(wilhoit, tmin) +
NASAPolynomial_integral2_T0(nasa_low, tint) - NASAPolynomial_integral2_T0(nasa_low, tmin) +
NASAPolynomial_integral2_T0(nasa_high, tmax) - NASAPolynomial_integral2_T0(nasa_high, tint)
- 2* (b6*(Wilhoit_integral_T0(wilhoit, tmax)-q0)+b1*(q0-Wilhoit_integral_T0(wilhoit, tmin))
+b7*(Wilhoit_integral_T1(wilhoit, tmax) - q1) +b2*(q1 - Wilhoit_integral_T1(wilhoit, tmin))
+b8*(Wilhoit_integral_T2(wilhoit, tmax) - q2) +b3*(q2 - Wilhoit_integral_T2(wilhoit, tmin))
+b9*(Wilhoit_integral_T3(wilhoit, tmax) - q3) +b4*(q3 - Wilhoit_integral_T3(wilhoit, tmin))
+b10*(Wilhoit_integral_T4(wilhoit, tmax) - q4)+b5*(q4 - Wilhoit_integral_T4(wilhoit, tmin))))
return result
def TintOpt_objFun_W(tint, wilhoit, tmin, tmax, contCons):
"""
Evaluate the objective function - the integral of the square of the error in the fit.
If fit is close to perfect, result may be slightly negative due to numerical errors in evaluating this integral.
input: Tint (intermediate temperature, in kiloKelvin)
Wilhoit parameters: Cp0/R, CpInf/R, and B (kK), a0, a1, a2, a3,
Tmin (minimum temperature (in kiloKelvin),
Tmax (maximum temperature (in kiloKelvin)
output: the quantity Integrate[1/t*(Cp(Wilhoit)/R-Cp(NASA)/R)^2, {t, tmin, tmax}]
"""
nasa_low, nasa_high = Wilhoit2NASA(wilhoit,tmin,tmax,tint, 1, contCons)
b1, b2, b3, b4, b5 = nasa_low.c0, nasa_low.c1, nasa_low.c2, nasa_low.c3, nasa_low.c4
b6, b7, b8, b9, b10 = nasa_high.c0, nasa_high.c1, nasa_high.c2, nasa_high.c3, nasa_high.c4
qM1=Wilhoit_integral_TM1(wilhoit, tint)
q0=Wilhoit_integral_T0(wilhoit, tint)
q1=Wilhoit_integral_T1(wilhoit, tint)
q2=Wilhoit_integral_T2(wilhoit, tint)
q3=Wilhoit_integral_T3(wilhoit, tint)
result = (Wilhoit_integral2_TM1(wilhoit, tmax) - Wilhoit_integral2_TM1(wilhoit, tmin) +
NASAPolynomial_integral2_TM1(nasa_low, tint) - NASAPolynomial_integral2_TM1(nasa_low, tmin) +
NASAPolynomial_integral2_TM1(nasa_high, tmax) - NASAPolynomial_integral2_TM1(nasa_high, tint)
- 2* (b6*(Wilhoit_integral_TM1(wilhoit, tmax)-qM1)+b1*(qM1 - Wilhoit_integral_TM1(wilhoit, tmin))
+b7*(Wilhoit_integral_T0(wilhoit, tmax)-q0)+b2*(q0 - Wilhoit_integral_T0(wilhoit, tmin))
+b8*(Wilhoit_integral_T1(wilhoit, tmax)-q1)+b3*(q1 - Wilhoit_integral_T1(wilhoit, tmin))
+b9*(Wilhoit_integral_T2(wilhoit, tmax)-q2)+b4*(q2 - Wilhoit_integral_T2(wilhoit, tmin))
+b10*(Wilhoit_integral_T3(wilhoit, tmax)-q3)+b5*(q3 - Wilhoit_integral_T3(wilhoit, tmin))))
return result
####################################################################################################
#below are functions for conversion of general Cp to NASA polynomials
#because they use numerical integration, they are, in general, likely to be slower and less accurate than versions with analytical integrals for the starting Cp form (e.g. Wilhoit polynomials)
#therefore, this should only be used when no analytic alternatives are available
def convertCpToNASA(CpObject, H298, S298, fixed=1, weighting=0, tint=1000.0, Tmin = 298.0, Tmax=6000.0, contCons=3):
"""Convert an arbitrary heat capacity function into a NASA polynomial thermo instance (using numerical integration)
Takes: CpObject: an object with method "getHeatCapacity(self,T) that will return Cp in J/mol-K with argument T in K
H298: enthalpy at 298.15 K (in J/mol)
S298: entropy at 298.15 K (in J/mol-K)
fixed: 1 (default) to fix tint; 0 to allow it to float to get a better fit
weighting: 0 (default) to not weight the fit by 1/T; 1 to weight by 1/T to emphasize good fit at lower temperatures
tint, Tmin, Tmax: intermediate, minimum, and maximum temperatures in Kelvin
contCons: a measure of the continutity constraints on the fitted NASA polynomials; possible values are:
5: constrain Cp, dCp/dT, d2Cp/dT2, d3Cp/dT3, and d4Cp/dT4 to be continuous at tint; note: this effectively constrains all the coefficients to be equal and should be equivalent to fitting only one polynomial (rather than two)
4: constrain Cp, dCp/dT, d2Cp/dT2, and d3Cp/dT3 to be continuous at tint
3 (default): constrain Cp, dCp/dT, and d2Cp/dT2 to be continuous at tint
2: constrain Cp and dCp/dT to be continuous at tint
1: constrain Cp to be continous at tint
0: no constraints on continuity of Cp(T) at tint
note: 5th (and higher) derivatives of NASA Cp(T) are zero and hence will automatically be continuous at tint by the form of the Cp(T) function
Returns a `NASAModel` instance containing two `NASAPolynomial` polynomials
"""
# Scale the temperatures to kK
Tmin = Tmin/1000
tint = tint/1000
Tmax = Tmax/1000
#if we are using fixed tint, do not allow tint to float
if(fixed == 1):
nasa_low, nasa_high = Cp2NASA(CpObject, Tmin, Tmax, tint, weighting, contCons)
else:
nasa_low, nasa_high, tint = Cp2NASA_TintOpt(CpObject, Tmin, Tmax, weighting, contCons)
iseUnw = Cp_TintOpt_objFun(tint, CpObject, Tmin, Tmax, 0, contCons) #the scaled, unweighted ISE (integral of squared error)
rmsUnw = math.sqrt(iseUnw/(Tmax-Tmin))
rmsStr = '(Unweighted) RMS error = %.3f*R;'%(rmsUnw)
if(weighting == 1):
iseWei= Cp_TintOpt_objFun(tint, CpObject, Tmin, Tmax, weighting, contCons) #the scaled, weighted ISE
rmsWei = math.sqrt(iseWei/math.log(Tmax/Tmin))
rmsStr = 'Weighted RMS error = %.3f*R;'%(rmsWei)+rmsStr
else:
rmsWei = 0.0
#print a warning if the rms fit is worse that 0.25*R
if(rmsUnw > 0.25 or rmsWei > 0.25):
logging.warning("Poor Cp-to-NASA fit quality: RMS error = %.3f*R" % (rmsWei if weighting == 1 else rmsUnw))
#restore to conventional units of K for Tint and units based on K rather than kK in NASA polynomial coefficients
tint=tint*1000.
Tmin = Tmin*1000
Tmax = Tmax*1000
nasa_low.c1 /= 1000.
nasa_low.c2 /= 1000000.
nasa_low.c3 /= 1000000000.
nasa_low.c4 /= 1000000000000.
nasa_high.c1 /= 1000.
nasa_high.c2 /= 1000000.
nasa_high.c3 /= 1000000000.
nasa_high.c4 /= 1000000000000.
# output comment
comment = 'Cp function fitted to NASA function. ' + rmsStr
nasa_low.Tmin = Tmin; nasa_low.Tmax = tint
nasa_low.comment = 'Low temperature range polynomial'
nasa_high.Tmin = tint; nasa_high.Tmax = Tmax
nasa_high.comment = 'High temperature range polynomial'
#for the low polynomial, we want the results to match the given values at 298.15K
#low polynomial enthalpy:
Hlow = (H298 - nasa_low.getEnthalpy(298.15))/constants.R
#low polynomial entropy:
Slow = (S298 - nasa_low.getEntropy(298.15))/constants.R
#***consider changing this to use getEnthalpy and getEntropy methods of thermoObject
# update last two coefficients
nasa_low.c5 = Hlow
nasa_low.c6 = Slow
#for the high polynomial, we want the results to match the low polynomial value at tint
#high polynomial enthalpy:
Hhigh = (nasa_low.getEnthalpy(tint) - nasa_high.getEnthalpy(tint))/constants.R
#high polynomial entropy:
Shigh = (nasa_low.getEntropy(tint) - nasa_high.getEntropy(tint))/constants.R
# update last two coefficients
#polynomial_high.coeffs = (b6,b7,b8,b9,b10,Hhigh,Shigh)
nasa_high.c5 = Hhigh
nasa_high.c6 = Shigh
NASAthermo = NASAModel(Tmin=Tmin, Tmax=Tmax, polynomials=[nasa_low,nasa_high], comment=comment)
return NASAthermo
def Cp2NASA(CpObject, tmin, tmax, tint, weighting, contCons):
"""
input: CpObject: an object with method "getHeatCapacity(self,T) that will return Cp in J/mol-K with argument T in K
Tmin (minimum temperature (in kiloKelvin),
Tmax (maximum temperature (in kiloKelvin),
Tint (intermediate temperature, in kiloKelvin)
weighting (boolean: should the fit be weighted by 1/T?)
contCons: a measure of the continutity constraints on the fitted NASA polynomials; possible values are:
5: constrain Cp, dCp/dT, d2Cp/dT2, d3Cp/dT3, and d4Cp/dT4 to be continuous at tint; note: this effectively constrains all the coefficients to be equal and should be equivalent to fitting only one polynomial (rather than two)
4: constrain Cp, dCp/dT, d2Cp/dT2, and d3Cp/dT3 to be continuous at tint
3 (default): constrain Cp, dCp/dT, and d2Cp/dT2 to be continuous at tint
2: constrain Cp and dCp/dT to be continuous at tint
1: constrain Cp to be continous at tint
0: no constraints on continuity of Cp(T) at tint
note: 5th (and higher) derivatives of NASA Cp(T) are zero and hence will automatically be continuous at tint by the form of the Cp(T) function
output: NASA polynomials (nasa_low, nasa_high) with scaled parameters
"""
#construct (typically 13*13) symmetric A matrix (in A*x = b); other elements will be zero
A = zeros([10+contCons,10+contCons])
b = zeros([10+contCons])
if weighting:
A[0,0] = 2*math.log(tint/tmin)
A[0,1] = 2*(tint - tmin)
A[0,2] = tint*tint - tmin*tmin
A[0,3] = 2.*(tint*tint*tint - tmin*tmin*tmin)/3
A[0,4] = (tint*tint*tint*tint - tmin*tmin*tmin*tmin)/2
A[1,4] = 2.*(tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin)/5
A[2,4] = (tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin)/3
A[3,4] = 2.*(tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin)/7
A[4,4] = (tint*tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin*tmin)/4
else:
A[0,0] = 2*(tint - tmin)
A[0,1] = tint*tint - tmin*tmin
A[0,2] = 2.*(tint*tint*tint - tmin*tmin*tmin)/3
A[0,3] = (tint*tint*tint*tint - tmin*tmin*tmin*tmin)/2
A[0,4] = 2.*(tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin)/5
A[1,4] = (tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin)/3
A[2,4] = 2.*(tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin)/7
A[3,4] = (tint*tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin*tmin)/4
A[4,4] = 2.*(tint*tint*tint*tint*tint*tint*tint*tint*tint - tmin*tmin*tmin*tmin*tmin*tmin*tmin*tmin*tmin)/9
A[1,1] = A[0,2]
A[1,2] = A[0,3]
A[1,3] = A[0,4]
A[2,2] = A[0,4]
A[2,3] = A[1,4]
A[3,3] = A[2,4]
if weighting:
A[5,5] = 2*math.log(tmax/tint)
A[5,6] = 2*(tmax - tint)
A[5,7] = tmax*tmax - tint*tint
A[5,8] = 2.*(tmax*tmax*tmax - tint*tint*tint)/3
A[5,9] = (tmax*tmax*tmax*tmax - tint*tint*tint*tint)/2
A[6,9] = 2.*(tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint)/5
A[7,9] = (tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint)/3
A[8,9] = 2.*(tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint)/7
A[9,9] = (tmax*tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint*tint)/4
else:
A[5,5] = 2*(tmax - tint)
A[5,6] = tmax*tmax - tint*tint
A[5,7] = 2.*(tmax*tmax*tmax - tint*tint*tint)/3
A[5,8] = (tmax*tmax*tmax*tmax - tint*tint*tint*tint)/2
A[5,9] = 2.*(tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint)/5
A[6,9] = (tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint)/3
A[7,9] = 2.*(tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint)/7
A[8,9] = (tmax*tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint*tint)/4
A[9,9] = 2.*(tmax*tmax*tmax*tmax*tmax*tmax*tmax*tmax*tmax - tint*tint*tint*tint*tint*tint*tint*tint*tint)/9
A[6,6] = A[5,7]
A[6,7] = A[5,8]
A[6,8] = A[5,9]
A[7,7] = A[5,9]
A[7,8] = A[6,9]
A[8,8] = A[7,9]
if(contCons > 0):#set non-zero elements in the 11th column for Cp(T) continuity contraint
A[0,10] = 1.
A[1,10] = tint
A[2,10] = tint*tint
A[3,10] = A[2,10]*tint
A[4,10] = A[3,10]*tint
A[5,10] = -A[0,10]
A[6,10] = -A[1,10]
A[7,10] = -A[2,10]
A[8,10] = -A[3,10]
A[9,10] = -A[4,10]
if(contCons > 1): #set non-zero elements in the 12th column for dCp/dT continuity constraint
A[1,11] = 1.
A[2,11] = 2*tint
A[3,11] = 3*A[2,10]
A[4,11] = 4*A[3,10]
A[6,11] = -A[1,11]
A[7,11] = -A[2,11]
A[8,11] = -A[3,11]
A[9,11] = -A[4,11]
if(contCons > 2): #set non-zero elements in the 13th column for d2Cp/dT2 continuity constraint
A[2,12] = 2.
A[3,12] = 6*tint
A[4,12] = 12*A[2,10]
A[7,12] = -A[2,12]
A[8,12] = -A[3,12]
A[9,12] = -A[4,12]
if(contCons > 3): #set non-zero elements in the 14th column for d3Cp/dT3 continuity constraint
A[3,13] = 6
A[4,13] = 24*tint
A[8,13] = -A[3,13]
A[9,13] = -A[4,13]
if(contCons > 4): #set non-zero elements in the 15th column for d4Cp/dT4 continuity constraint
A[4,14] = 24
A[9,14] = -A[4,14]
# make the matrix symmetric
for i in range(1,10+contCons):
for j in range(0, i):
A[i,j] = A[j,i]
#construct b vector
w0low = Nintegral_T0(CpObject,tmin,tint)
w1low = Nintegral_T1(CpObject,tmin,tint)
w2low = Nintegral_T2(CpObject,tmin,tint)
w3low = Nintegral_T3(CpObject,tmin,tint)
w0high = Nintegral_T0(CpObject,tint,tmax)
w1high = Nintegral_T1(CpObject,tint,tmax)
w2high = Nintegral_T2(CpObject,tint,tmax)
w3high = Nintegral_T3(CpObject,tint,tmax)
if weighting:
wM1low = Nintegral_TM1(CpObject,tmin,tint)
wM1high = Nintegral_TM1(CpObject,tint,tmax)
else:
w4low = Nintegral_T4(CpObject,tmin,tint)
w4high = Nintegral_T4(CpObject,tint,tmax)
if weighting:
b[0] = 2*wM1low
b[1] = 2*w0low
b[2] = 2*w1low
b[3] = 2*w2low
b[4] = 2*w3low
b[5] = 2*wM1high
b[6] = 2*w0high
b[7] = 2*w1high
b[8] = 2*w2high
b[9] = 2*w3high
else:
b[0] = 2*w0low
b[1] = 2*w1low
b[2] = 2*w2low
b[3] = 2*w3low
b[4] = 2*w4low
b[5] = 2*w0high
b[6] = 2*w1high
b[7] = 2*w2high
b[8] = 2*w3high
b[9] = 2*w4high
# solve A*x=b for x (note that factor of 2 in b vector and 10*10 submatrix of A
# matrix is not required; not including it should give same result, except
# Lagrange multipliers will differ by a factor of two)
x = linalg.solve(A,b,overwrite_a=1,overwrite_b=1)
nasa_low = NASAPolynomial(Tmin=0, Tmax=0, coeffs=[x[0], x[1], x[2], x[3], x[4], 0.0, 0.0], comment='')
nasa_high = NASAPolynomial(Tmin=0, Tmax=0, coeffs=[x[5], x[6], x[7], x[8], x[9], 0.0, 0.0], comment='')
return nasa_low, nasa_high
def Cp2NASA_TintOpt(CpObject, tmin, tmax, weighting, contCons):
#input: CpObject: an object with method "getHeatCapacity(self,T) that will return Cp in J/mol-K with argument T in K
#output: NASA parameters for Cp/R, b1, b2, b3, b4, b5 (low temp parameters) and b6, b7, b8, b9, b10 (high temp parameters), and Tint
#1. vary Tint, bounded by tmin and tmax, to minimize TintOpt_objFun
#cf. http://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html and http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fminbound.html#scipy.optimize.fminbound)
tint = optimize.fminbound(Cp_TintOpt_objFun, tmin, tmax, args=(CpObject, tmin, tmax, weighting, contCons))
#note that we have not used any guess when using this minimization routine
#2. determine the bi parameters based on the optimized Tint (alternatively, maybe we could have TintOpt_objFun also return these parameters, along with the objective function, which would avoid an extra calculation)
(nasa1, nasa2) = Cp2NASA(CpObject, tmin, tmax, tint, weighting, contCons)
return nasa1, nasa2, tint
def Cp_TintOpt_objFun(tint, CpObject, tmin, tmax, weighting, contCons):
#input: Tint (intermediate temperature, in kiloKelvin); CpObject: an object with method "getHeatCapacity(self,T) that will return Cp in J/mol-K with argument T in K, Tmin (minimum temperature (in kiloKelvin), Tmax (maximum temperature (in kiloKelvin)
#output: the quantity Integrate[(Cp/R-Cp(NASA)/R)^2, {t, tmin, tmax}]
if (weighting == 1):
result = Cp_TintOpt_objFun_W(tint, CpObject, tmin, tmax, contCons)
else:
result = Cp_TintOpt_objFun_NW(tint, CpObject, tmin, tmax, contCons)
# numerical errors could accumulate to give a slightly negative result
# this is unphysical (it's the integral of a *squared* error) so we
# set it to zero to avoid later problems when we try find the square root.
if result<0:
logging.error("Numerical integral results suggest sum of squared errors is negative; please e-mail Greg with the following results:")
logging.error(tint)
logging.error(CpObject)
logging.error(tmin)
logging.error(tmax)
logging.error(weighting)
logging.error(result)
result = 0
return result
def Cp_TintOpt_objFun_NW(tint, CpObject, tmin, tmax, contCons):
"""
Evaluate the objective function - the integral of the square of the error in the fit.
input: Tint (intermediate temperature, in kiloKelvin)
CpObject: an object with method "getHeatCapacity(self,T) that will return Cp in J/mol-K with argument T in K
Tmin (minimum temperature (in kiloKelvin),
Tmax (maximum temperature (in kiloKelvin)
output: the quantity Integrate[(Cp/R-Cp(NASA)/R)^2, {t, tmin, tmax}]
"""
nasa_low, nasa_high = Cp2NASA(CpObject,tmin,tmax,tint, 0, contCons)
b1, b2, b3, b4, b5 = nasa_low.c0, nasa_low.c1, nasa_low.c2, nasa_low.c3, nasa_low.c4
b6, b7, b8, b9, b10 = nasa_high.c0, nasa_high.c1, nasa_high.c2, nasa_high.c3, nasa_high.c4
result = (Nintegral2_T0(CpObject,tmin,tmax) +
nasa_low.integral2_T0(tint)-nasa_low.integral2_T0(tmin) + nasa_high.integral2_T0(tmax) - nasa_high.integral2_T0(tint)
- 2* (b6*Nintegral_T0(CpObject,tint,tmax)+b1*Nintegral_T0(CpObject,tmin,tint)
+b7*Nintegral_T1(CpObject,tint,tmax) +b2*Nintegral_T1(CpObject,tmin,tint)
+b8*Nintegral_T2(CpObject,tint,tmax) +b3*Nintegral_T2(CpObject,tmin,tint)
+b9*Nintegral_T3(CpObject,tint,tmax) +b4*Nintegral_T3(CpObject,tmin,tint)
+b10*Nintegral_T4(CpObject,tint,tmax)+b5*Nintegral_T4(CpObject,tmin,tint)))
return result
def Cp_TintOpt_objFun_W(tint, CpObject, tmin, tmax, contCons):
"""
Evaluate the objective function - the integral of the square of the error in the fit.
If fit is close to perfect, result may be slightly negative due to numerical errors in evaluating this integral.
input: Tint (intermediate temperature, in kiloKelvin)
CpObject: an object with method "getHeatCapacity(self,T) that will return Cp in J/mol-K with argument T in K
Tmin (minimum temperature (in kiloKelvin),
Tmax (maximum temperature (in kiloKelvin)
output: the quantity Integrate[1/t*(Cp/R-Cp(NASA)/R)^2, {t, tmin, tmax}]
"""
nasa_low, nasa_high = Cp2NASA(CpObject,tmin,tmax,tint, 1, contCons)
b1, b2, b3, b4, b5 = nasa_low.c0, nasa_low.c1, nasa_low.c2, nasa_low.c3, nasa_low.c4
b6, b7, b8, b9, b10 = nasa_high.c0, nasa_high.c1, nasa_high.c2, nasa_high.c3, nasa_high.c4
result = (Nintegral2_TM1(CpObject,tmin,tmax) +
nasa_low.integral2_TM1(tint)-nasa_low.integral2_TM1(tmin) + nasa_high.integral2_TM1(tmax) - nasa_high.integral2_TM1(tint)
- 2* (b6*Nintegral_TM1(CpObject,tint,tmax)+b1*Nintegral_TM1(CpObject,tmin,tint)
+b7*Nintegral_T0(CpObject,tint,tmax) +b2*Nintegral_T0(CpObject,tmin,tint)
+b8*Nintegral_T1(CpObject,tint,tmax) +b3*Nintegral_T1(CpObject,tmin,tint)
+b9*Nintegral_T2(CpObject,tint,tmax) +b4*Nintegral_T2(CpObject,tmin,tint)
+b10*Nintegral_T3(CpObject,tint,tmax)+b5*Nintegral_T3(CpObject,tmin,tint)))
return result
################################################################################
#a faster version of the integral based on H from Yelvington's thesis; it differs from the original (see above) by a constant (dependent on parameters but independent of t)
def Wilhoit_integral_T0(wilhoit, t):
#output: the quantity Integrate[Cp(Wilhoit)/R, t'] evaluated at t'=t
cython.declare(cp0=cython.double, cpInf=cython.double, B=cython.double, a0=cython.double, a1=cython.double, a2=cython.double, a3=cython.double)
cython.declare(y=cython.double, y2=cython.double, logBplust=cython.double, result=cython.double)
cp0, cpInf, B, a0, a1, a2, a3 = wilhoit.cp0, wilhoit.cpInf, wilhoit.B, wilhoit.a0, wilhoit.a1, wilhoit.a2, wilhoit.a3
y = t/(t+B)
y2 = y*y
if cython.compiled:
logBplust = log(B + t)
else:
logBplust = math.log(B + t)
result = cp0*t - (cpInf-cp0)*t*(y2*((3*a0 + a1 + a2 + a3)/6. + (4*a1 + a2 + a3)*y/12. + (5*a2 + a3)*y2/20. + a3*y2*y/5.) + (2 + a0 + a1 + a2 + a3)*( y/2. - 1 + (1/y-1)*logBplust))
return result
#a faster version of the integral based on S from Yelvington's thesis; it differs from the original by a constant (dependent on parameters but independent of t)
def Wilhoit_integral_TM1(wilhoit, t):
#output: the quantity Integrate[Cp(Wilhoit)/R*t^-1, t'] evaluated at t'=t
cython.declare(cp0=cython.double, cpInf=cython.double, B=cython.double, a0=cython.double, a1=cython.double, a2=cython.double, a3=cython.double)
cython.declare(y=cython.double, logt=cython.double, logy=cython.double, result=cython.double)
cp0, cpInf, B, a0, a1, a2, a3 = wilhoit.cp0, wilhoit.cpInf, wilhoit.B, wilhoit.a0, wilhoit.a1, wilhoit.a2, wilhoit.a3
y = t/(t+B)
if cython.compiled:
logy = log(y); logt = log(t)
else:
logy = math.log(y); logt = math.log(t)
result = cpInf*logt-(cpInf-cp0)*(logy+y*(1+y*(a0/2+y*(a1/3 + y*(a2/4 + y*a3/5)))))
return result
def Wilhoit_integral_T1(wilhoit, t):
#output: the quantity Integrate[Cp(Wilhoit)/R*t, t'] evaluated at t'=t
cython.declare(cp0=cython.double, cpInf=cython.double, B=cython.double, a0=cython.double, a1=cython.double, a2=cython.double, a3=cython.double)
cython.declare(logBplust=cython.double, result=cython.double)
cp0, cpInf, B, a0, a1, a2, a3 = wilhoit.cp0, wilhoit.cpInf, wilhoit.B, wilhoit.a0, wilhoit.a1, wilhoit.a2, wilhoit.a3
if cython.compiled:
logBplust = log(B + t)
else:
logBplust = math.log(B + t)
result = ( (2 + a0 + a1 + a2 + a3)*B*(cp0 - cpInf)*t + (cpInf*t**2)/2. + (a3*B**7*(-cp0 + cpInf))/(5.*(B + t)**5) + ((a2 + 6*a3)*B**6*(cp0 - cpInf))/(4.*(B + t)**4) -
((a1 + 5*(a2 + 3*a3))*B**5*(cp0 - cpInf))/(3.*(B + t)**3) + ((a0 + 4*a1 + 10*(a2 + 2*a3))*B**4*(cp0 - cpInf))/(2.*(B + t)**2) -
((1 + 3*a0 + 6*a1 + 10*a2 + 15*a3)*B**3*(cp0 - cpInf))/(B + t) - (3 + 3*a0 + 4*a1 + 5*a2 + 6*a3)*B**2*(cp0 - cpInf)*logBplust)
return result
def Wilhoit_integral_T2(wilhoit, t):
#output: the quantity Integrate[Cp(Wilhoit)/R*t^2, t'] evaluated at t'=t
cython.declare(cp0=cython.double, cpInf=cython.double, B=cython.double, a0=cython.double, a1=cython.double, a2=cython.double, a3=cython.double)
cython.declare(logBplust=cython.double, result=cython.double)
cp0, cpInf, B, a0, a1, a2, a3 = wilhoit.cp0, wilhoit.cpInf, wilhoit.B, wilhoit.a0, wilhoit.a1, wilhoit.a2, wilhoit.a3
if cython.compiled:
logBplust = log(B + t)
else:
logBplust = math.log(B + t)
result = ( -((3 + 3*a0 + 4*a1 + 5*a2 + 6*a3)*B**2*(cp0 - cpInf)*t) + ((2 + a0 + a1 + a2 + a3)*B*(cp0 - cpInf)*t**2)/2. + (cpInf*t**3)/3. + (a3*B**8*(cp0 - cpInf))/(5.*(B + t)**5) -
((a2 + 7*a3)*B**7*(cp0 - cpInf))/(4.*(B + t)**4) + ((a1 + 6*a2 + 21*a3)*B**6*(cp0 - cpInf))/(3.*(B + t)**3) - ((a0 + 5*(a1 + 3*a2 + 7*a3))*B**5*(cp0 - cpInf))/(2.*(B + t)**2) +
((1 + 4*a0 + 10*a1 + 20*a2 + 35*a3)*B**4*(cp0 - cpInf))/(B + t) + (4 + 6*a0 + 10*a1 + 15*a2 + 21*a3)*B**3*(cp0 - cpInf)*logBplust)
return result
def Wilhoit_integral_T3(wilhoit, t):
#output: the quantity Integrate[Cp(Wilhoit)/R*t^3, t'] evaluated at t'=t
cython.declare(cp0=cython.double, cpInf=cython.double, B=cython.double, a0=cython.double, a1=cython.double, a2=cython.double, a3=cython.double)
cython.declare(logBplust=cython.double, result=cython.double)
cp0, cpInf, B, a0, a1, a2, a3 = wilhoit.cp0, wilhoit.cpInf, wilhoit.B, wilhoit.a0, wilhoit.a1, wilhoit.a2, wilhoit.a3
if cython.compiled:
logBplust = log(B + t)
else:
logBplust = math.log(B + t)
result = ( (4 + 6*a0 + 10*a1 + 15*a2 + 21*a3)*B**3*(cp0 - cpInf)*t + ((3 + 3*a0 + 4*a1 + 5*a2 + 6*a3)*B**2*(-cp0 + cpInf)*t**2)/2. + ((2 + a0 + a1 + a2 + a3)*B*(cp0 - cpInf)*t**3)/3. +
(cpInf*t**4)/4. + (a3*B**9*(-cp0 + cpInf))/(5.*(B + t)**5) + ((a2 + 8*a3)*B**8*(cp0 - cpInf))/(4.*(B + t)**4) - ((a1 + 7*(a2 + 4*a3))*B**7*(cp0 - cpInf))/(3.*(B + t)**3) +
((a0 + 6*a1 + 21*a2 + 56*a3)*B**6*(cp0 - cpInf))/(2.*(B + t)**2) - ((1 + 5*a0 + 15*a1 + 35*a2 + 70*a3)*B**5*(cp0 - cpInf))/(B + t) -
(5 + 10*a0 + 20*a1 + 35*a2 + 56*a3)*B**4*(cp0 - cpInf)*logBplust)
return result
def Wilhoit_integral_T4(wilhoit, t):
#output: the quantity Integrate[Cp(Wilhoit)/R*t^4, t'] evaluated at t'=t
cython.declare(cp0=cython.double, cpInf=cython.double, B=cython.double, a0=cython.double, a1=cython.double, a2=cython.double, a3=cython.double)
cython.declare(logBplust=cython.double, result=cython.double)
cp0, cpInf, B, a0, a1, a2, a3 = wilhoit.cp0, wilhoit.cpInf, wilhoit.B, wilhoit.a0, wilhoit.a1, wilhoit.a2, wilhoit.a3
if cython.compiled:
logBplust = log(B + t)
else:
logBplust = math.log(B + t)
result = ( -((5 + 10*a0 + 20*a1 + 35*a2 + 56*a3)*B**4*(cp0 - cpInf)*t) + ((4 + 6*a0 + 10*a1 + 15*a2 + 21*a3)*B**3*(cp0 - cpInf)*t**2)/2. +
((3 + 3*a0 + 4*a1 + 5*a2 + 6*a3)*B**2*(-cp0 + cpInf)*t**3)/3. + ((2 + a0 + a1 + a2 + a3)*B*(cp0 - cpInf)*t**4)/4. + (cpInf*t**5)/5. + (a3*B**10*(cp0 - cpInf))/(5.*(B + t)**5) -
((a2 + 9*a3)*B**9*(cp0 - cpInf))/(4.*(B + t)**4) + ((a1 + 8*a2 + 36*a3)*B**8*(cp0 - cpInf))/(3.*(B + t)**3) - ((a0 + 7*(a1 + 4*(a2 + 3*a3)))*B**7*(cp0 - cpInf))/(2.*(B + t)**2) +
((1 + 6*a0 + 21*a1 + 56*a2 + 126*a3)*B**6*(cp0 - cpInf))/(B + t) + (6 + 15*a0 + 35*a1 + 70*a2 + 126*a3)*B**5*(cp0 - cpInf)*logBplust)
return result
def Wilhoit_integral2_T0(wilhoit, t):
#output: the quantity Integrate[(Cp(Wilhoit)/R)^2, t'] evaluated at t'=t
cython.declare(cp0=cython.double, cpInf=cython.double, B=cython.double, a0=cython.double, a1=cython.double, a2=cython.double, a3=cython.double)
cython.declare(logBplust=cython.double, result=cython.double)
cp0, cpInf, B, a0, a1, a2, a3 = wilhoit.cp0, wilhoit.cpInf, wilhoit.B, wilhoit.a0, wilhoit.a1, wilhoit.a2, wilhoit.a3
if cython.compiled:
logBplust = log(B + t)
else:
logBplust = math.log(B + t)
result = (cpInf**2*t - (a3**2*B**12*(cp0 - cpInf)**2)/(11.*(B + t)**11) + (a3*(a2 + 5*a3)*B**11*(cp0 - cpInf)**2)/(5.*(B + t)**10) -
((a2**2 + 18*a2*a3 + a3*(2*a1 + 45*a3))*B**10*(cp0 - cpInf)**2)/(9.*(B + t)**9) + ((4*a2**2 + 36*a2*a3 + a1*(a2 + 8*a3) + a3*(a0 + 60*a3))*B**9*(cp0 - cpInf)**2)/(4.*(B + t)**8) -
((a1**2 + 14*a1*(a2 + 4*a3) + 2*(14*a2**2 + a3 + 84*a2*a3 + 105*a3**2 + a0*(a2 + 7*a3)))*B**8*(cp0 - cpInf)**2)/(7.*(B + t)**7) +
((3*a1**2 + a2 + 28*a2**2 + 7*a3 + 126*a2*a3 + 126*a3**2 + 7*a1*(3*a2 + 8*a3) + a0*(a1 + 6*a2 + 21*a3))*B**7*(cp0 - cpInf)**2)/(3.*(B + t)**6) -
(B**6*(cp0 - cpInf)*(a0**2*(cp0 - cpInf) + 15*a1**2*(cp0 - cpInf) + 10*a0*(a1 + 3*a2 + 7*a3)*(cp0 - cpInf) + 2*a1*(1 + 35*a2 + 70*a3)*(cp0 - cpInf) +
2*(35*a2**2*(cp0 - cpInf) + 6*a2*(1 + 21*a3)*(cp0 - cpInf) + a3*(5*(4 + 21*a3)*cp0 - 21*(cpInf + 5*a3*cpInf)))))/(5.*(B + t)**5) +
(B**5*(cp0 - cpInf)*(14*a2*cp0 + 28*a2**2*cp0 + 30*a3*cp0 + 84*a2*a3*cp0 + 60*a3**2*cp0 + 2*a0**2*(cp0 - cpInf) + 10*a1**2*(cp0 - cpInf) +
a0*(1 + 10*a1 + 20*a2 + 35*a3)*(cp0 - cpInf) + a1*(5 + 35*a2 + 56*a3)*(cp0 - cpInf) - 15*a2*cpInf - 28*a2**2*cpInf - 35*a3*cpInf - 84*a2*a3*cpInf - 60*a3**2*cpInf))/
(2.*(B + t)**4) - (B**4*(cp0 - cpInf)*((1 + 6*a0**2 + 15*a1**2 + 32*a2 + 28*a2**2 + 50*a3 + 72*a2*a3 + 45*a3**2 + 2*a1*(9 + 21*a2 + 28*a3) + a0*(8 + 20*a1 + 30*a2 + 42*a3))*cp0 -
(1 + 6*a0**2 + 15*a1**2 + 40*a2 + 28*a2**2 + 70*a3 + 72*a2*a3 + 45*a3**2 + a0*(8 + 20*a1 + 30*a2 + 42*a3) + a1*(20 + 42*a2 + 56*a3))*cpInf))/(3.*(B + t)**3) +
(B**3*(cp0 - cpInf)*((2 + 2*a0**2 + 3*a1**2 + 9*a2 + 4*a2**2 + 11*a3 + 9*a2*a3 + 5*a3**2 + a0*(5 + 5*a1 + 6*a2 + 7*a3) + a1*(7 + 7*a2 + 8*a3))*cp0 -
(2 + 2*a0**2 + 3*a1**2 + 15*a2 + 4*a2**2 + 21*a3 + 9*a2*a3 + 5*a3**2 + a0*(6 + 5*a1 + 6*a2 + 7*a3) + a1*(10 + 7*a2 + 8*a3))*cpInf))/(B + t)**2 -
(B**2*((2 + a0 + a1 + a2 + a3)**2*cp0**2 - 2*(5 + a0**2 + a1**2 + 8*a2 + a2**2 + 9*a3 + 2*a2*a3 + a3**2 + 2*a0*(3 + a1 + a2 + a3) + a1*(7 + 2*a2 + 2*a3))*cp0*cpInf +
(6 + a0**2 + a1**2 + 12*a2 + a2**2 + 14*a3 + 2*a2*a3 + a3**2 + 2*a1*(5 + a2 + a3) + 2*a0*(4 + a1 + a2 + a3))*cpInf**2))/(B + t) +
2*(2 + a0 + a1 + a2 + a3)*B*(cp0 - cpInf)*cpInf*logBplust)
return result
def Wilhoit_integral2_TM1(wilhoit, t):
#output: the quantity Integrate[(Cp(Wilhoit)/R)^2*t^-1, t'] evaluated at t'=t
cython.declare(cp0=cython.double, cpInf=cython.double, B=cython.double, a0=cython.double, a1=cython.double, a2=cython.double, a3=cython.double)
cython.declare(logBplust=cython.double, logt=cython.double, result=cython.double)
cp0, cpInf, B, a0, a1, a2, a3 = wilhoit.cp0, wilhoit.cpInf, wilhoit.B, wilhoit.a0, wilhoit.a1, wilhoit.a2, wilhoit.a3
if cython.compiled:
logBplust = log(B + t); logt = log(t)
else:
logBplust = math.log(B + t); logt = math.log(t)
result = ( (a3**2*B**11*(cp0 - cpInf)**2)/(11.*(B + t)**11) - (a3*(2*a2 + 9*a3)*B**10*(cp0 - cpInf)**2)/(10.*(B + t)**10) +
((a2**2 + 16*a2*a3 + 2*a3*(a1 + 18*a3))*B**9*(cp0 - cpInf)**2)/(9.*(B + t)**9) -
((7*a2**2 + 56*a2*a3 + 2*a1*(a2 + 7*a3) + 2*a3*(a0 + 42*a3))*B**8*(cp0 - cpInf)**2)/(8.*(B + t)**8) +
((a1**2 + 21*a2**2 + 2*a3 + 112*a2*a3 + 126*a3**2 + 2*a0*(a2 + 6*a3) + 6*a1*(2*a2 + 7*a3))*B**7*(cp0 - cpInf)**2)/(7.*(B + t)**7) -
((5*a1**2 + 2*a2 + 30*a1*a2 + 35*a2**2 + 12*a3 + 70*a1*a3 + 140*a2*a3 + 126*a3**2 + 2*a0*(a1 + 5*(a2 + 3*a3)))*B**6*(cp0 - cpInf)**2)/(6.*(B + t)**6) +
(B**5*(cp0 - cpInf)*(10*a2*cp0 + 35*a2**2*cp0 + 28*a3*cp0 + 112*a2*a3*cp0 + 84*a3**2*cp0 + a0**2*(cp0 - cpInf) + 10*a1**2*(cp0 - cpInf) + 2*a1*(1 + 20*a2 + 35*a3)*(cp0 - cpInf) +
4*a0*(2*a1 + 5*(a2 + 2*a3))*(cp0 - cpInf) - 10*a2*cpInf - 35*a2**2*cpInf - 30*a3*cpInf - 112*a2*a3*cpInf - 84*a3**2*cpInf))/(5.*(B + t)**5) -
(B**4*(cp0 - cpInf)*(18*a2*cp0 + 21*a2**2*cp0 + 32*a3*cp0 + 56*a2*a3*cp0 + 36*a3**2*cp0 + 3*a0**2*(cp0 - cpInf) + 10*a1**2*(cp0 - cpInf) +
2*a0*(1 + 6*a1 + 10*a2 + 15*a3)*(cp0 - cpInf) + 2*a1*(4 + 15*a2 + 21*a3)*(cp0 - cpInf) - 20*a2*cpInf - 21*a2**2*cpInf - 40*a3*cpInf - 56*a2*a3*cpInf - 36*a3**2*cpInf))/
(4.*(B + t)**4) + (B**3*(cp0 - cpInf)*((1 + 3*a0**2 + 5*a1**2 + 14*a2 + 7*a2**2 + 18*a3 + 16*a2*a3 + 9*a3**2 + 2*a0*(3 + 4*a1 + 5*a2 + 6*a3) + 2*a1*(5 + 6*a2 + 7*a3))*cp0 -
(1 + 3*a0**2 + 5*a1**2 + 20*a2 + 7*a2**2 + 30*a3 + 16*a2*a3 + 9*a3**2 + 2*a0*(3 + 4*a1 + 5*a2 + 6*a3) + 2*a1*(6 + 6*a2 + 7*a3))*cpInf))/(3.*(B + t)**3) -
(B**2*((3 + a0**2 + a1**2 + 4*a2 + a2**2 + 4*a3 + 2*a2*a3 + a3**2 + 2*a1*(2 + a2 + a3) + 2*a0*(2 + a1 + a2 + a3))*cp0**2 -
2*(3 + a0**2 + a1**2 + 7*a2 + a2**2 + 8*a3 + 2*a2*a3 + a3**2 + 2*a1*(3 + a2 + a3) + a0*(5 + 2*a1 + 2*a2 + 2*a3))*cp0*cpInf +
(3 + a0**2 + a1**2 + 10*a2 + a2**2 + 12*a3 + 2*a2*a3 + a3**2 + 2*a1*(4 + a2 + a3) + 2*a0*(3 + a1 + a2 + a3))*cpInf**2))/(2.*(B + t)**2) +
(B*(cp0 - cpInf)*(cp0 - (3 + 2*a0 + 2*a1 + 2*a2 + 2*a3)*cpInf))/(B + t) + cp0**2*logt + (-cp0**2 + cpInf**2)*logBplust)
return result
################################################################################
def NASAPolynomial_integral2_T0(polynomial, T):
#output: the quantity Integrate[(Cp(NASAPolynomial)/R)^2, t'] evaluated at t'=t
cython.declare(c0=cython.double, c1=cython.double, c2=cython.double, c3=cython.double, c4=cython.double)
cython.declare(T2=cython.double, T4=cython.double, T8=cython.double)
c0, c1, c2, c3, c4 = polynomial.c0, polynomial.c1, polynomial.c2, polynomial.c3, polynomial.c4
T2=T*T; T4=T2*T2; T8=T4*T4
result = (
c0*c0*T + c0*c1*T2 + 2./3.*c0*c2*T2*T + 0.5*c0*c3*T4 + 0.4*c0*c4*T4*T +
c1*c1*T2*T/3. + 0.5*c1*c2*T4 + 0.4*c1*c3*T4*T + c1*c4*T4*T2/3. +
0.2*c2*c2*T4*T + c2*c3*T4*T2/3. + 2./7.*c2*c4*T4*T2*T +
c3*c3*T4*T2*T/7. + 0.25*c3*c4*T8 +
c4*c4*T8*T/9.
)
return result
def NASAPolynomial_integral2_TM1(polynomial, T):
#output: the quantity Integrate[(Cp(NASAPolynomial)/R)^2*t^-1, t'] evaluated at t'=t
cython.declare(c0=cython.double, c1=cython.double, c2=cython.double, c3=cython.double, c4=cython.double)
cython.declare(T2=cython.double, T4=cython.double, logT=cython.double)
c0, c1, c2, c3, c4 = polynomial.c0, polynomial.c1, polynomial.c2, polynomial.c3, polynomial.c4
T2=T*T; T4=T2*T2
if cython.compiled:
logT = log(T)
else:
logT = math.log(T)
result = (
c0*c0*logT + 2*c0*c1*T + c0*c2*T2 + 2./3.*c0*c3*T2*T + 0.5*c0*c4*T4 +
0.5*c1*c1*T2 + 2./3.*c1*c2*T2*T + 0.5*c1*c3*T4 + 0.4*c1*c4*T4*T +
0.25*c2*c2*T4 + 0.4*c2*c3*T4*T + c2*c4*T4*T2/3. +
c3*c3*T4*T2/6. + 2./7.*c3*c4*T4*T2*T +
c4*c4*T4*T4/8.
)
return result
################################################################################
#the numerical integrals:
def Nintegral_T0(CpObject, tmin, tmax):
#units of input and output are same as Nintegral
return Nintegral(CpObject,tmin,tmax,0,0)
def Nintegral_TM1(CpObject, tmin, tmax):
#units of input and output are same as Nintegral
return Nintegral(CpObject,tmin,tmax,-1,0)
def Nintegral_T1(CpObject, tmin, tmax):
#units of input and output are same as Nintegral
return Nintegral(CpObject,tmin,tmax,1,0)
def Nintegral_T2(CpObject, tmin, tmax):
#units of input and output are same as Nintegral
return Nintegral(CpObject,tmin,tmax,2,0)
def Nintegral_T3(CpObject, tmin, tmax):
#units of input and output are same as Nintegral
return Nintegral(CpObject,tmin,tmax,3,0)
def Nintegral_T4(CpObject, tmin, tmax):
#units of input and output are same as Nintegral
return Nintegral(CpObject,tmin,tmax,4,0)
def Nintegral2_T0(CpObject, tmin, tmax):
#units of input and output are same as Nintegral
return Nintegral(CpObject,tmin,tmax,0,1)
def Nintegral2_TM1(CpObject, tmin, tmax):
#units of input and output are same as Nintegral
return Nintegral(CpObject,tmin,tmax,-1,1)
def Nintegral(CpObject, tmin, tmax, n, squared):
#inputs:CpObject: an object with method "getHeatCapacity(self,T) that will return Cp in J/mol-K with argument T in K
# tmin, tmax: limits of integration in kiloKelvin
# n: integeer exponent on t (see below), typically -1 to 4
# squared: 0 if integrating Cp/R(t)*t^n; 1 if integrating Cp/R(t)^2*t^n
#output: a numerical approximation to the quantity Integrate[Cp/R(t)*t^n, {t, tmin, tmax}] or Integrate[Cp/R(t)^2*t^n, {t, tmin, tmax}], in units based on kiloKelvin
return integrate.quad(integrand,tmin,tmax,args=(CpObject,n,squared))[0]
def integrand(t, CpObject , n, squared):
#input requirements same as Nintegral above
result = CpObject.getHeatCapacity(t*1000)/constants.R#note that we multiply t by 1000, since the Cp function uses Kelvin rather than kiloKelvin; also, we divide by R to get the dimensionless Cp/R
if(squared):
result = result*result
if(n < 0):
for i in range(0,abs(n)):#divide by t, |n| times
result = result/t
else:
for i in range(0,n):#multiply by t, n times
result = result*t
return result
| jwallen/ChemPy | chempy/ext/thermo_converter.py | Python | mit | 56,584 | [
"ChemPy"
] | 892369da6611fc3d05ad2715d73a71386557a9e0c9a4a8f34e95ef3dc64c0582 |
# Copyright 2008-2010 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SeqIO support for the "tab" (simple tab separated) file format.
You are expected to use this module via the Bio.SeqIO functions.
The "tab" format is an ad-hoc plain text file format where each sequence is
on one (long) line. Each line contains the identifier/description, followed
by a tab, followed by the sequence. For example, consider the following
short FASTA format file::
>ID123456 possible binding site?
CATCNAGATGACACTACGACTACGACTCAGACTAC
>ID123457 random sequence
ACACTACGACTACGACTCAGACTACAAN
Apart from the descriptions, this can be represented in the simple two column
tab separated format as follows::
ID123456(tab)CATCNAGATGACACTACGACTACGACTCAGACTAC
ID123457(tab)ACACTACGACTACGACTCAGACTACAAN
When reading this file, "ID123456" or "ID123457" will be taken as the record's
.id and .name property. There is no other information to record.
Similarly, when writing to this format, Biopython will ONLY record the record's
.id and .seq (and not the description or any other information) as in the
example above.
"""
from __future__ import print_function
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqIO.Interfaces import SequentialSequenceWriter
__docformat__ = "restructuredtext en"
def TabIterator(handle, alphabet=single_letter_alphabet):
"""Iterates over tab separated lines (as SeqRecord objects).
Each line of the file should contain one tab only, dividing the line
into an identifier and the full sequence.
Arguments:
- handle - input file
- alphabet - optional alphabet
The first field is taken as the record's .id and .name (regardless of
any spaces within the text) and the second field is the sequence.
Any blank lines are ignored.
"""
for line in handle:
try:
title, seq = line.split("\t") # will fail if more than one tab!
except:
if line.strip() == "":
# It's a blank line, ignore it
continue
raise ValueError("Each line should have one tab separating the" +
" title and sequence, this line has %i tabs: %s"
% (line.count("\t"), repr(line)))
title = title.strip()
seq = seq.strip() # removes the trailing new line
yield SeqRecord(Seq(seq, alphabet),
id=title, name=title,
description="")
class TabWriter(SequentialSequenceWriter):
"""Class to write simple tab separated format files.
Each line consists of "id(tab)sequence" only.
Any description, name or other annotation is not recorded.
"""
def write_record(self, record):
"""Write a single tab line to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
title = self.clean(record.id)
seq = self._get_seq_string(record) # Catches sequence being None
assert "\t" not in title
assert "\n" not in title
assert "\r" not in title
assert "\t" not in seq
assert "\n" not in seq
assert "\r" not in seq
self.handle.write("%s\t%s\n" % (title, seq))
if __name__ == "__main__":
print("Running quick self test")
from Bio._py3k import StringIO
# This example has a trailing blank line which should be ignored
handle = StringIO("Alpha\tAAAAAAA\nBeta\tCCCCCCC\n\n")
records = list(TabIterator(handle))
assert len(records) == 2
handle = StringIO("Alpha\tAAAAAAA\tExtra\nBeta\tCCCCCCC\n")
try:
records = list(TabIterator(handle))
assert False, "Should have reject this invalid example!"
except ValueError:
# Good!
pass
print("Done")
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/SeqIO/TabIO.py | Python | gpl-2.0 | 4,058 | [
"Biopython"
] | 88f92c62e6bc5a3a7f6332742c7fc3b890eb087ee57c16541d23fe0e55a3d3b8 |
# test_ksolve.py ---
# Originally written by Upinder S. Bhalla. It has been
# modified by Dilawar Singh Wed Aug 5 19:21:19 2015 to turn it into a
# test script. NOT TO BE USED for scientific demos or tutorials. The sole
# purpose of this script is to benchmark the Ksolve solver with various methods.
import sys
import moose
import math
import os
import time
import matplotlib.pyplot as plt
EREST_ACT = -70e-3
# Gate equations have the form:
#
# y(x) = (A + B * x) / (C + exp((x + D) / F))
#
# where x is membrane voltage and y is the rate constant for gate
# closing or opening
Na_m_params = [1e5 * (25e-3 + EREST_ACT), # 'A_A':
-1e5, # 'A_B':
-1.0, # 'A_C':
-25e-3 - EREST_ACT, # 'A_D':
-10e-3, # 'A_F':
4e3, # 'B_A':
0.0, # 'B_B':
0.0, # 'B_C':
0.0 - EREST_ACT, # 'B_D':
18e-3 # 'B_F':
]
Na_h_params = [ 70.0, # 'A_A':
0.0, # 'A_B':
0.0, # 'A_C':
0.0 - EREST_ACT, # 'A_D':
0.02, # 'A_F':
1000.0, # 'B_A':
0.0, # 'B_B':
1.0, # 'B_C':
-30e-3 - EREST_ACT, # 'B_D':
-0.01 # 'B_F':
]
K_n_params = [ 1e4 * (10e-3 + EREST_ACT), # 'A_A':
-1e4, # 'A_B':
-1.0, # 'A_C':
-10e-3 - EREST_ACT, # 'A_D':
-10e-3, # 'A_F':
0.125e3, # 'B_A':
0.0, # 'B_B':
0.0, # 'B_C':
0.0 - EREST_ACT, # 'B_D':
80e-3 # 'B_F':
]
VMIN = -30e-3 + EREST_ACT
VMAX = 120e-3 + EREST_ACT
VDIVS = 3000
def createSquid():
"""Create a single compartment squid model."""
parent = moose.Neutral ('/n' )
compt = moose.SymCompartment( '/n/compt' )
Em = EREST_ACT + 10.613e-3
compt.Em = Em
compt.initVm = EREST_ACT
compt.Cm = 7.85e-9 * 0.5
compt.Rm = 4.2e5 * 5.0
compt.Ra = 7639.44e3
nachan = moose.HHChannel( '/n/compt/Na' )
nachan.Xpower = 3
xGate = moose.HHGate(nachan.path + '/gateX')
xGate.setupAlpha(Na_m_params + [VDIVS, VMIN, VMAX])
xGate.useInterpolation = 1
nachan.Ypower = 1
yGate = moose.HHGate(nachan.path + '/gateY')
yGate.setupAlpha(Na_h_params + [VDIVS, VMIN, VMAX])
yGate.useInterpolation = 1
nachan.Gbar = 0.942e-3
nachan.Ek = 115e-3+EREST_ACT
moose.connect(nachan, 'channel', compt, 'channel', 'OneToOne')
kchan = moose.HHChannel( '/n/compt/K' )
kchan.Xpower = 4.0
xGate = moose.HHGate(kchan.path + '/gateX')
xGate.setupAlpha(K_n_params + [VDIVS, VMIN, VMAX])
xGate.useInterpolation = 1
kchan.Gbar = 0.2836e-3
kchan.Ek = -12e-3+EREST_ACT
moose.connect(kchan, 'channel', compt, 'channel', 'OneToOne')
return compt
def createSpine( parentCompt, parentObj, index, frac, length, dia, theta ):
"""Create spine of specified dimensions and index"""
RA = 1.0
RM = 1.0
CM = 0.01
shaftDia = dia / 5.0
sname = 'shaft' + str(index)
hname = 'head' + str(index)
shaft = moose.SymCompartment( parentObj.path + '/' + sname )
moose.connect( parentCompt, 'cylinder', shaft, 'proximalOnly','Single' )
x = parentCompt.x0 + frac * ( parentCompt.x - parentCompt.x0 )
y = parentCompt.y0 + frac * ( parentCompt.y - parentCompt.y0 )
z = parentCompt.z0 + frac * ( parentCompt.z - parentCompt.z0 )
shaft.x0 = x
shaft.y0 = y
shaft.z0 = z
sy = y + length * math.cos( theta * math.pi / 180.0 )
sz = z + length * math.sin( theta * math.pi / 180.0 )
shaft.x = x
shaft.y = sy
shaft.z = sz
shaft.diameter = dia / 2.0
shaft.length = length
xa = math.pi * shaftDia * shaftDia / 4
circumference = math.pi * shaftDia
shaft.Ra = RA * length / xa
shaft.Rm = RM / ( length * circumference )
shaft.Cm = CM * length * circumference
shaft.Em = EREST_ACT
shaft.initVm = EREST_ACT
head = moose.SymCompartment( parentObj.path + '/' + hname )
moose.connect( shaft, 'distal', head, 'proximal', 'Single' )
head.x0 = x
head.y0 = sy
head.z0 = sz
hy = sy + length * math.cos ( theta * math.pi / 180.0 )
hz = sz + length * math.sin ( theta * math.pi / 180.0 )
head.x = x
head.y = hy
head.z = hz
head.diameter = dia
head.length = length
xa = math.pi * dia * dia / 4.0
circumference = math.pi * dia
head.Ra = RA * length / xa
head.Rm = RM / ( length * circumference )
head.Cm = CM * length * circumference
head.Em = EREST_ACT
head.initVm = EREST_ACT
#print head.Rm, head.Ra, head.Cm, head.diameter, head.length
#print shaft.Rm, shaft.Ra, shaft.Cm, shaft.diameter, shaft.length
return head
def createSpineWithReceptor( compt, cell, index, frac ):
FaradayConst = 96485.3415 # s A / mol
spineLength = 5.0e-6
spineDia = 4.0e-6
head = createSpine( compt, cell, index, frac, spineLength, spineDia, 0.0 )
gluR = moose.SynChan( head.path + '/gluR' )
gluR.tau1 = 4e-3
gluR.tau2 = 4e-3
gluR.Gbar = 1e-6
gluR.Ek= 10.0e-3
moose.connect( head, 'channel', gluR, 'channel', 'Single' )
synh = moose.SimpleSynHandler( gluR.path + '/synh' )
moose.connect( synh, 'activationOut', gluR, 'activation' )
caPool = moose.CaConc( head.path + '/ca' )
caPool.CaBasal = 1e-4 # 0.1 micromolar
caPool.tau = 0.01
B = 1.0 / ( FaradayConst * spineLength * spineDia * spineDia *math.pi/4)
B = B / 20.0 # scaling factor for Ca buffering
caPool.B = B
moose.connect( gluR, 'IkOut', caPool, 'current', 'Single' )
return synh
def addPlot( objpath, field, plot ):
assert moose.exists( objpath )
tab = moose.Table2( '/graphs/' + plot )
obj = moose.element( objpath )
moose.connect( tab, 'requestOut', obj, field )
return tab
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/compt', 'getVm', 'elec/dendVm' )
#addPlot( '/n/compt/Na', 'getGbar', 'elec/NaGbar' )
addPlot( '/model/elec/compt/K', 'getGbar', 'elec/KGbar' )
#addPlot( '/n/compt/Na', 'getIk', 'elec/NaIk' )
#addPlot( '/n/compt/K', 'getIk', 'elec/KIk' )
#addPlot( '/n/compt/Na', 'getEk', 'elec/NaEk' )
#addPlot( '/n/compt/K', 'getEk', 'elec/KEk' )
addPlot( '/model/elec/head0', 'getVm', 'elec/head0Vm' )
addPlot( '/model/elec/head2', 'getVm', 'elec/head2Vm' )
#addPlot( '/n/head2', 'getIm', 'elec/head2Im' )
addPlot( '/model/elec/head0/ca', 'getCa', 'elec/head0Ca' )
addPlot( '/model/elec/head1/ca', 'getCa', 'elec/head1Ca' )
addPlot( '/model/elec/head2/ca', 'getCa', 'elec/head2Ca' )
addPlot( '/model/elec/head0/gluR', 'getIk', 'elec/head0Ik' )
addPlot( '/model/elec/head1/gluR', 'getIk', 'elec/head1Ik' )
addPlot( '/model/elec/head2/gluR', 'getIk', 'elec/head2Ik' )
addPlot( '/model/elec/head1/gluR', 'getGbar', 'elec/head1Gbar' )
addPlot( '/model/elec/head2/gluR', 'getGbar', 'elec/head2Gbar' )
#addPlot( '/n/head0/gluR', 'getGk', 'elec/head0Gk' )
#addPlot( '/n/head2/gluR', 'getGk', 'elec/head2Gk' )
def dumpPlots( outfile ):
print( 'Plotting ..' )
records = {}
for x in moose.wildcardFind( '/graphs/##[TYPE=Table2]' ):
records[x.name] = x
for i, r in enumerate(records):
print( '\t %d/%d %s' % (i, len(records), r) )
plt.subplot(1+len(records)//3, 3, i+1 )
plt.plot( records[r].vector, label = r'$%s$'%r )
plt.legend()
plt.tight_layout()
plt.savefig( outfile )
print( '[INFO] Saved to file %s' % outfile )
def makeSpinyCompt():
comptLength = 30e-6
comptDia = 6e-6
numSpines = 5
compt = createSquid()
compt.inject = 0
compt.x0 = 0
compt.y0 = 0
compt.z0 = 0
compt.x = comptLength
compt.y = 0
compt.z = 0
compt.length = comptLength
compt.diameter = comptDia
#kchan = moose.element( '/n/compt/K' )
#kchan.Gbar = 0.2e-3
synInput = moose.SpikeGen( '/n/compt/synInput' )
synInput.refractT = 47e-3
synInput.threshold = -1.0
synInput.edgeTriggered = 0
synInput.Vm( 0 )
cell = moose.element( '/n' )
for i in range( numSpines ):
r = createSpineWithReceptor( compt, cell, i, i/float(numSpines) )
r.synapse.num = 1
syn = moose.element( r.path + '/synapse' )
moose.connect( synInput, 'spikeOut', syn, 'addSpike', 'Single' )
syn.weight = 0.2 * i * ( 4 - i )
syn.delay = i * 1.0e-3
def createPool( compt, name, concInit ):
meshEntries = moose.element( compt.path + '/mesh' )
pool = moose.Pool( compt.path + '/' + name )
pool.concInit = concInit
pool.diffConst = 1e-11
return pool
def createChemModel( neuroCompt, spineCompt, psdCompt ):
# Stuff in spine + psd
#psdCa = createPool( psdCompt, 'Ca', 0.0001 )
psdGluR = createPool( psdCompt, 'psdGluR', 1 )
headCa = createPool( spineCompt, 'Ca', 1e-4 )
headGluR = createPool( spineCompt, 'headGluR', 2 )
toPsd = createPool( spineCompt, 'toPsd', 0 )
toPsdInact = createPool( spineCompt, 'toPsdInact', 1e-3 )
turnOnPsd = moose.Reac( spineCompt.path + '/turnOnPsd' )
moose.connect( turnOnPsd, 'sub', headCa, 'reac', 'OneToOne' )
moose.connect( turnOnPsd, 'sub', toPsdInact, 'reac', 'OneToOne' )
moose.connect( turnOnPsd, 'prd', toPsd, 'reac', 'OneToOne' )
turnOnPsd.Kf = 1e3
turnOnPsd.Kb = 1
toPsdEnz = moose.Enz( toPsd.path + '/enz' )
toPsdEnzCplx = moose.Pool( toPsdEnz.path + '/cplx' )
toPsdEnzCplx.concInit = 0
moose.connect( toPsdEnz, 'enz', toPsd, 'reac', 'OneToOne' )
moose.connect( toPsdEnz, 'sub', headGluR, 'reac', 'OneToOne' )
moose.connect( toPsdEnz, 'prd', psdGluR, 'reac', 'OneToOne' )
moose.connect( toPsdEnz, 'cplx', toPsdEnzCplx, 'reac', 'OneToOne' )
toPsdEnz.Km = 1.0e-3
toPsdEnz.kcat = 10.0
fromPsd = moose.Reac( psdCompt.path + '/fromPsd' )
moose.connect( fromPsd, 'sub', psdGluR, 'reac', 'OneToOne' )
moose.connect( fromPsd, 'prd', headGluR, 'reac', 'OneToOne' )
fromPsd.Kf = 0.5
fromPsd.Kb = 0.0
# Stuff in dendrite
dendCa = createPool( neuroCompt, 'Ca', 1e-4 )
bufCa = moose.Pool( neuroCompt.path + '/bufCa' )
bufCa.concInit = 1e-4
pumpCa = moose.Reac( neuroCompt.path + '/pumpCa' )
moose.connect( pumpCa, 'sub', dendCa, 'reac', 'OneToOne' )
moose.connect( pumpCa, 'prd', bufCa, 'reac', 'OneToOne' )
pumpCa.Kf = 1
pumpCa.Kb = 1
dendKinaseInact = createPool( neuroCompt, 'inact_kinase', 1e-4 )
dendKinase = createPool( neuroCompt, 'Ca.kinase', 0.0 )
dendTurnOnKinase = moose.Reac( neuroCompt.path + '/turnOnKinase' )
moose.connect( dendTurnOnKinase, 'sub', dendCa, 'reac' )
moose.connect( dendTurnOnKinase, 'sub', dendKinaseInact, 'reac' )
moose.connect( dendTurnOnKinase, 'prd', dendKinase, 'reac' )
dendTurnOnKinase.Kf = 50000
dendTurnOnKinase.Kb = 1
dendKinaseEnz = moose.Enz( dendKinase.path + '/enz' )
dendKinaseEnzCplx = moose.Pool( dendKinase.path + '/enz/cplx' )
kChan = createPool( neuroCompt, 'kChan', 1e-3 )
kChan_p = createPool( neuroCompt, 'kChan_p', 0.0 )
moose.connect( dendKinaseEnz, 'enz', dendKinase, 'reac', 'OneToOne' )
moose.connect( dendKinaseEnz, 'sub', kChan, 'reac', 'OneToOne' )
moose.connect( dendKinaseEnz, 'prd', kChan_p, 'reac', 'OneToOne' )
moose.connect( dendKinaseEnz, 'cplx', dendKinaseEnzCplx, 'reac', 'OneToOne' )
dendKinaseEnz.Km = 1e-4
dendKinaseEnz.kcat = 20
dendPhosphatase = moose.Reac( neuroCompt.path + '/phosphatase' )
moose.connect( dendPhosphatase, 'sub', kChan_p, 'reac' )
moose.connect( dendPhosphatase, 'prd', kChan, 'reac' )
dendPhosphatase.Kf = 1
dendPhosphatase.Kb = 0.0
# Just for printf debugging
def printMolVecs( title ):
print(title)
"""
nCa = moose.vec( '/model/chem/neuroMesh/Ca' )
sCa = moose.vec( '/model/chem/spineMesh/Ca' )
sR = moose.vec( '/model/chem/spineMesh/headGluR' )
pR = moose.vec( '/model/chem/psdMesh/psdGluR' )
print 'sizes: nCa, sCa, sR, pR = ', len(nCa), len(sCa), len(sR), len(pR)
#print 'nCa=', nCa.conc, ', sCa=', sCa.conc, ', sR=', sR.n, ', pR=', pR.n
print 'nCaConcInit=', nCa.concInit, ', sCa=', sCa.concInit
#print 'sRnInit=', sR.nInit, ', pR=', pR.nInit
print 'sRconcInit=', sR.concInit, ', pR=', pR.concInit
#print 'nCaSize=', nCa.volume, ', sCa=', sCa.volume, ', sR=', sR.n, ', pR=', pR.n
"""
def makeChemInCubeMesh():
dendSide = 10.8e-6
spineSide = 6.8e-6
psdSide = 8.565e-7
parent = moose.Neutral ('/model/chem' )
neuroMesh = moose.CubeMesh( '/model/chem/neuroMesh' )
spineMesh = moose.CubeMesh( '/model/chem/spineMesh' )
psdMesh = moose.CubeMesh( '/model/chem/psdMesh' )
coords = [dendSide] * 9
coords[0] = 0
coords[1] = 0
coords[2] = 0
neuroMesh.coords = coords
neuroMesh.preserveNumEntries = 1
coords = [spineSide] * 9
coords[0] = dendSide
coords[1] = 0
coords[2] = 0
coords[3] = spineSide + dendSide
spineMesh.coords = coords
spineMesh.preserveNumEntries = 1
coords = [psdSide] * 9
coords[0] = dendSide + spineSide
coords[1] = 0
coords[2] = 0
coords[3] = psdSide + spineSide + dendSide
psdMesh.coords = coords
psdMesh.preserveNumEntries = 1
createChemModel( neuroMesh, spineMesh, psdMesh )
dendCa = moose.element( '/model/chem/neuroMesh/Ca' )
assert dendCa.volume == dendSide * dendSide * dendSide
spineCa = moose.element( '/model/chem/spineMesh/Ca' )
assert spineCa.volume == spineSide * spineSide * spineSide
psdGluR = moose.element( '/model/chem/psdMesh/psdGluR' )
assert psdGluR.volume == psdSide * psdSide * psdSide
dendKinaseEnzCplx = moose.element( '/model/chem/neuroMesh/Ca.kinase/enz/cplx' )
assert dendKinaseEnzCplx.volume == dendSide * dendSide * dendSide
def makeSolvers( elecDt ):
# Put in the solvers, see how they fare.
# Here we kludge in a single chem solver for the whole system.
ksolve = moose.Ksolve( '/model/ksolve' )
stoich = moose.Stoich( '/model/stoich' )
stoich.compartment = moose.element( '/model/chem/neuroMesh' )
stoich.ksolve = ksolve
stoich.path = '/model/chem/##'
moose.useClock( 5, '/model/ksolve', 'init' )
moose.useClock( 6, '/model/ksolve', 'process' )
# Here is the elec solver
hsolve = moose.HSolve( '/model/hsolve' )
moose.useClock( 1, '/model/hsolve', 'process' )
hsolve.dt = elecDt
hsolve.target = '/model/elec/compt'
return ksolve
def makeCubeMultiscale():
makeSpinyCompt()
model = moose.Neutral( '/model' )
elec = moose.element( '/n' )
elec.name = 'elec'
moose.move( elec, model )
synInput = moose.element( '/model/elec/compt/synInput' )
synInput.refractT = 47e-3
makeChemInCubeMesh()
# set up a reaction to fake diffusion between compts.
headCa = moose.element( '/model/chem/spineMesh/Ca' )
dendCa = moose.element( '/model/chem/neuroMesh/Ca' )
diffReac = moose.Reac( '/model/chem/spineMesh/diff' )
moose.connect( diffReac, 'sub', headCa, 'reac' )
moose.connect( diffReac, 'prd', dendCa, 'reac' )
diffReac.Kf = 1
diffReac.Kb = headCa.volume / dendCa.volume
# set up adaptors
headCa = moose.element( '/model/chem/spineMesh/Ca' )
dendCa = moose.element( '/model/chem/neuroMesh/Ca' )
adaptCa = moose.Adaptor( '/model/chem/adaptCa' )
elecCa = moose.element( '/model/elec/head2/ca' )
# There are 5 spine heads in the electrical model. Average their input.
for i in range( 5 ):
path = '/model/elec/head' + str( i ) + '/ca'
elecCa = moose.element( path )
moose.connect( elecCa, 'concOut', adaptCa, 'input', 'Single' )
moose.connect( adaptCa, 'output', headCa, 'setConc' )
adaptCa.outputOffset = 0.0001 # 100 nM offset in chem.
adaptCa.scale = 0.05 # 0.06 to 0.003 mM
adaptGluR = moose.Adaptor( '/model/chem/psdMesh/adaptGluR' )
chemR = moose.element( '/model/chem/psdMesh/psdGluR' )
# Here we connect up the chem adaptors to only 3 of the spine
# heads in the elec model, just to make it interesting.
elec1R = moose.element( '/model/elec/head1/gluR' )
elec2R = moose.element( '/model/elec/head2/gluR' )
elec3R = moose.element( '/model/elec/head3/gluR' )
moose.connect( adaptGluR, 'requestOut', chemR, 'getN', 'OneToAll' )
moose.connect( adaptGluR, 'output', elec1R, 'setGbar', 'OneToAll' )
moose.connect( adaptGluR, 'output', elec2R, 'setGbar', 'OneToAll' )
moose.connect( adaptGluR, 'output', elec3R, 'setGbar', 'OneToAll' )
adaptGluR.outputOffset = 1e-9 # pS
adaptGluR.scale = 1e-8 / 100 # from n to pS
adaptK = moose.Adaptor( '/model/chem/neuroMesh/adaptK' )
chemK = moose.element( '/model/chem/neuroMesh/kChan' )
elecK = moose.element( '/model/elec/compt/K' )
moose.connect( adaptK, 'requestOut', chemK, 'getConc', 'OneToAll' )
moose.connect( adaptK, 'output', elecK, 'setGbar', 'OneToAll' )
adaptK.scale = 0.3 # from mM to Siemens
def makeChemPlots():
graphs = moose.Neutral( '/graphs' )
addPlot( '/model/chem/psdMesh/psdGluR', 'getN', 'psd0R' )
addPlot( '/model/chem/spineMesh/Ca', 'getConc', 'spine0Ca' )
addPlot( '/model/chem/neuroMesh/Ca', 'getConc', 'dend0Ca' )
addPlot( '/model/chem/neuroMesh/kChan_p', 'getConc', 'kChan_p' )
addPlot( '/model/chem/neuroMesh/kChan', 'getConc', 'kChan' )
addPlot( '/model/chem/neuroMesh/Ca.kinase', 'getConc', 'dendKinase' )
addPlot( '/model/chem/spineMesh/toPsd', 'getConc', 'toPsd0' )
#addPlot( '/n/neuroMesh/Ca', 'getConc', 'dendCa' )
#addPlot( '/n/neuroMesh/inact_kinase', 'getConc', 'inactDendKinase' )
#addPlot( '/n/psdMesh/psdGluR', 'getN', 'psdGluR' )
def testCubeMultiscale( method ):
elecDt = 10e-6
chemDt = 1e-4
plotDt = 5e-4
plotName = 'mc.plot'
elecDt = 50e-6
chemDt = 5e-3
plotName = 'mcs.plot'
makeCubeMultiscale()
makeChemPlots()
makeElecPlots()
moose.setClock( 0, elecDt )
moose.setClock( 1, elecDt )
moose.setClock( 2, elecDt )
moose.setClock( 5, chemDt )
moose.setClock( 6, chemDt )
moose.setClock( 7, plotDt )
moose.setClock( 8, plotDt )
moose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' )
moose.useClock( 1, '/model/elec/##[ISA=Compartment],/model/elec/##[ISA=SpikeGen]', 'process' )
moose.useClock( 2, '/model/elec/##[ISA=SynBase],/model/elec/##[ISA=ChanBase],/model/elec/##[ISA=CaConc]','process')
moose.useClock( 5, '/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' )
moose.useClock( 6, '/model/##[ISA=PoolBase],/model/chem/##[ISA=Adaptor]', 'process' )
moose.useClock( 7, '/graphs/#', 'process' )
moose.useClock( 8, '/graphs/elec/#', 'process' )
ksolve = makeSolvers( elecDt )
ksolve.method = method
print( '[INFO] Using method %s' % ksolve.method )
moose.reinit()
t = time.time()
moose.start( 1e-6 )
print( "Total time taken: %s sec "% (time.time()-t) )
plotName = '%s_%s.png' % (sys.argv[0], ksolve.method )
dumpPlots( plotName )
print( 'All done' )
def main( method ):
testCubeMultiscale( method )
if __name__ == '__main__':
if len(sys.argv) > 2:
main( sys.argv[1] )
else:
main( 'gsl' )
| BhallaLab/moose | moose-examples/kinetics/test_ksolve.py | Python | gpl-3.0 | 20,058 | [
"MOOSE"
] | f173693c60df6083fdc7e154f5b7621b3957f3f9456571a52448c38458fc1b8c |
#!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# most of it copied from AWX's scan_packages module
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: package_facts
short_description: package information as facts
description:
- Return information about installed packages as facts
options:
manager:
description:
- The package manager used by the system so we can query the package information.
- Since 2.8 this is a list and can support multiple package managers per system.
- The 'portage' and 'pkg' options were added in version 2.8.
default: ['auto']
choices: ['auto', 'rpm', 'apt', 'portage', 'pkg']
required: False
type: list
strategy:
description:
- This option controls how the module queres the package managers on the system.
C(first) means it will return only information for the first supported package manager available.
C(all) will return information for all supported and available package managers on the system.
choices: ['first', 'all']
default: 'first'
version_added: "2.8"
version_added: "2.5"
requirements:
- For 'portage' support it requires the C(qlist) utility, which is part of 'app-portage/portage-utils'.
- For Debian-based systems C(python-apt) package must be installed on targeted hosts.
author:
- Matthew Jones (@matburt)
- Brian Coca (@bcoca)
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: Gather the rpm package facts
package_facts:
manager: auto
- name: Print the rpm package facts
debug:
var: ansible_facts.packages
'''
RETURN = '''
ansible_facts:
description: facts to add to ansible_facts
returned: always
type: complex
contains:
packages:
description: list of dicts with package information
returned: when operating system level package manager is specified or auto detected manager
type: dict
sample_rpm:
{
"packages": {
"kernel": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.26.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.16.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.10.2.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "514.21.1.el7",
"source": "rpm",
"version": "3.10.0"
},
{
"arch": "x86_64",
"epoch": null,
"name": "kernel",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
"kernel-tools-libs": [
{
"arch": "x86_64",
"epoch": null,
"name": "kernel-tools-libs",
"release": "693.2.2.el7",
"source": "rpm",
"version": "3.10.0"
}
],
}
}
sample_deb:
{
"packages": {
"libbz2-1.0": [
{
"version": "1.0.6-5",
"source": "apt",
"arch": "amd64",
"name": "libbz2-1.0"
}
],
"patch": [
{
"version": "2.7.1-4ubuntu1",
"source": "apt",
"arch": "amd64",
"name": "patch"
}
],
}
}
'''
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.facts.packages import LibMgr, CLIMgr, get_all_pkg_managers
class RPM(LibMgr):
LIB = 'rpm'
def list_installed(self):
return self._lib.TransactionSet().dbMatch()
def get_package_details(self, package):
return dict(name=package[self._lib.RPMTAG_NAME],
version=package[self._lib.RPMTAG_VERSION],
release=package[self._lib.RPMTAG_RELEASE],
epoch=package[self._lib.RPMTAG_EPOCH],
arch=package[self._lib.RPMTAG_ARCH],)
def is_available(self):
''' we expect the python bindings installed, but this gives warning if they are missing and we have rpm cli'''
we_have_lib = super(RPM, self).is_available()
if not we_have_lib and get_bin_path('rpm'):
self.warnings.append('Found "rpm" but %s' % (missing_required_lib('rpm')))
return we_have_lib
class APT(LibMgr):
LIB = 'apt'
def __init__(self):
self._cache = None
super(APT, self).__init__()
@property
def pkg_cache(self):
if self._cache is not None:
return self._cache
self._cache = self._lib.Cache()
return self._cache
def is_available(self):
''' we expect the python bindings installed, but if there is apt/apt-get give warning about missing bindings'''
we_have_lib = super(APT, self).is_available()
if not we_have_lib:
for exe in ('apt', 'apt-get', 'aptitude'):
if get_bin_path(exe):
self.warnings.append('Found "%s" but %s' % (exe, missing_required_lib('apt')))
break
return we_have_lib
def list_installed(self):
# Store the cache to avoid running pkg_cache() for each item in the comprehension, which is very slow
cache = self.pkg_cache
return [pk for pk in cache.keys() if cache[pk].is_installed]
def get_package_details(self, package):
ac_pkg = self.pkg_cache[package].installed
return dict(name=package, version=ac_pkg.version, arch=ac_pkg.architecture, category=ac_pkg.section, origin=ac_pkg.origins[0].origin)
class PKG(CLIMgr):
CLI = 'pkg'
atoms = ['name', 'version', 'origin', 'installed', 'automatic', 'arch', 'category', 'prefix', 'vital']
def list_installed(self):
rc, out, err = module.run_command([self._cli, 'query', "%%%s" % '\t%'.join(['n', 'v', 'R', 't', 'a', 'q', 'o', 'p', 'V'])])
if rc != 0 or err:
raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
return out.splitlines()
def get_package_details(self, package):
pkg = dict(zip(self.atoms, package.split('\t')))
if 'arch' in pkg:
try:
pkg['arch'] = pkg['arch'].split(':')[2]
except IndexError:
pass
if 'automatic' in pkg:
pkg['automatic'] = bool(pkg['automatic'])
if 'category' in pkg:
pkg['category'] = pkg['category'].split('/', 1)[0]
if 'version' in pkg:
if ',' in pkg['version']:
pkg['version'], pkg['port_epoch'] = pkg['version'].split(',', 1)
else:
pkg['port_epoch'] = 0
if '_' in pkg['version']:
pkg['version'], pkg['revision'] = pkg['version'].split('_', 1)
else:
pkg['revision'] = '0'
if 'vital' in pkg:
pkg['vital'] = bool(pkg['vital'])
return pkg
class PORTAGE(CLIMgr):
CLI = 'qlist'
atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
def list_installed(self):
rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
if rc != 0:
raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
return out.splitlines()
def get_package_details(self, package):
return dict(zip(self.atoms, package.split()))
def main():
# get supported pkg managers
PKG_MANAGERS = get_all_pkg_managers()
PKG_MANAGER_NAMES = [x.lower() for x in PKG_MANAGERS.keys()]
# start work
global module
module = AnsibleModule(argument_spec=dict(manager={'type': 'list', 'default': ['auto']},
strategy={'choices': ['first', 'all'], 'default': 'first'}),
supports_check_mode=True)
packages = {}
results = {'ansible_facts': {}}
managers = [x.lower() for x in module.params['manager']]
strategy = module.params['strategy']
if 'auto' in managers:
# keep order from user, we do dedupe below
managers.extend(PKG_MANAGER_NAMES)
managers.remove('auto')
unsupported = set(managers).difference(PKG_MANAGER_NAMES)
if unsupported:
if 'auto' in module.params['manager']:
msg = 'Could not auto detect a usable package manager, check warnings for details.'
else:
msg = 'Unsupported package managers requested: %s' % (', '.join(unsupported))
module.fail_json(msg=msg)
found = 0
seen = set()
for pkgmgr in managers:
if found and strategy == 'first':
break
# dedupe as per above
if pkgmgr in seen:
continue
seen.add(pkgmgr)
try:
try:
# manager throws exception on init (calls self.test) if not usable.
manager = PKG_MANAGERS[pkgmgr]()
if manager.is_available():
found += 1
packages.update(manager.get_packages())
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Requested package manager %s was not usable by this module: %s' % (pkgmgr, to_text(e)))
continue
for warning in getattr(manager, 'warnings', []):
module.warn(warning)
except Exception as e:
if pkgmgr in module.params['manager']:
module.warn('Failed to retrieve packages with %s: %s' % (pkgmgr, to_text(e)))
if found == 0:
msg = ('Could not detect a supported package manager from the following list: %s, '
'or the required Python library is not installed. Check warnings for details.' % managers)
module.fail_json(msg=msg)
# Set the facts, this will override the facts in ansible_facts that might exist from previous runs
# when using operating system level or distribution package managers
results['ansible_facts']['packages'] = packages
module.exit_json(**results)
if __name__ == '__main__':
main()
| amenonsen/ansible | lib/ansible/modules/packaging/os/package_facts.py | Python | gpl-3.0 | 11,588 | [
"Brian"
] | 28d6f429eb1ec7d853c27258552f25eda1ff3735d2820848680fcfbd858f015e |
from scipy.signal import gaussian
from scipy.signal import tukey
from scipy.signal import hann
import numpy as np
class vz:
def __init__(self, channel, length, phi):
self.phi = phi
def is_vz(self):
return True
class vf:
def __init__(self, channel, length, freq):
self.freq = freq
def is_vf(self):
return True
class offset:
def __init__(self, channel, length, offset):
self.offset = offset
def is_offset(self):
return True
class pulses:
def __init__(self, channels={}):
self.channels = channels
self.settings = {}
self.initial_delay = 1e-6
self.final_delay = 1e-6
self.global_pre = [self.p(None, self.initial_delay, None)]
self.global_post = [self.p(None, self.final_delay, None)]
## generate waveform of a gaussian pulse with quadrature phase mixin
def gauss_hd(self, channel, length, amp_x, sigma, alpha=0.):
gauss = gaussian(int(round(length * self.channels[channel].get_clock())),
sigma * self.channels[channel].get_clock())
gauss -= gauss[0]
gauss /= np.max(gauss)
gauss_der = np.gradient(gauss) * self.channels[channel].get_clock()
return amp_x * (gauss + 1j * gauss_der * alpha)
# def rect_cos (self, channel, length, amp, alpha=0.):
# alfa = 0.5
# impulse = tukey(int(round(length*self.channels[channel].get_clock())), alfa)
# #print(alfa*self.channels[channel].get_clock())
# #print(length)
# #print(round(length*self.channels[channel].get_clock()))
# impulse -= impulse[0]
# impulse_der = np.gradient(impulse)*self.channels[channel].get_clock()
# return amp*(impulse + 1j*impulse_der*alpha)
def envelope(self, channel, length, function, impulse):
t = np.linspace(0, self.channels[channel].get_nop() / self.channels[channel].get_clock(),
self.channels[channel].get_nop(), endpoint=False)
# time_arr = [self.channels[channel].get_clock()*i for i in range(int(round(length*self.channels[channel].get_clock())))]
# print(function(time_arr[0]))
return np.asarray(impulse * function(
t)) # (1/self.channels[channel].get_clock()*np.arange(len(impulse))))# for i in range(len(impulse))], dtype = complex)
def rect_cos(self, channel, length, amp, length_tail, function_for_envelope=lambda x: 1, alpha=0.):
length_of_plato = length - length_tail * 2
length_of_one_tail = int(length_tail * self.channels[channel].get_clock())
hann_function = hann(2 * length_of_one_tail)
first = hann_function[:length_of_one_tail]
second = hann_function[length_of_one_tail:]
plato = np.ones(int(round(length_of_plato * self.channels[channel].get_clock())))
final = first.tolist()
final.extend(plato.tolist())
final.extend(second.tolist())
impulse = np.asarray(final)
impulse -= impulse[0]
impulse = self.envelope(channel, length, function_for_envelope, impulse)
# print(np.real(impulse)[50:])
impulse_der = np.gradient(impulse) * self.channels[channel].get_clock()
# print(self.channels[channel].get_clock())
# print(length_tail*self.channels[channel].get_clock())
# print(first)
# print(second)
# print(plato)
return amp * (impulse + 1j * impulse_der * alpha)
def sin(self, channel, length, amplitude, frequency):
return amplitude * np.sin(2*np.pi*frequency*np.arange(0, length, 1/self.channels[channel].get_clock()))
## generate waveform of a rectangular pulse
def rect(self, channel, length, amplitude):
return amplitude * np.ones(int(round(length * self.channels[channel].get_clock())), dtype=np.complex)
def pause(self, channel, length):
return self.rect(channel, length, 0)
def p(self, channel, length, pulse_type=None, *params):
pulses = {channel_name: self.pause(channel_name, length) for channel_name, channel in self.channels.items()}
if channel:
pulses[channel] = pulse_type(channel, length, *params)
return pulses
def ps(self, channel, length, pulse_type=None, *params):
pulses = {channel_name: self.pause(channel_name, length) for channel_name, channel in self.channels.items()}
if channel:
pulses[channel] = pulse_type(channel, length, *params)
return pulses
def pmulti(self, length, *params):
pulses = {channel_name: self.pause(channel_name, length) for channel_name, channel in self.channels.items()}
for pulse in params:
channel = pulse[0]
# print ('Setting multipulse: \npulse:', pulse[1], 'channel:', channel, 'length:', length, 'other args:', pulse[2:])
pulses[channel] = pulse[1](channel, length, *pulse[2:])
return pulses
def parallel(self, *pulse_sequences):
current_pulse = [0]*len(pulse_sequences)
sequence_lengths = [len(sequence) for sequence in pulse_sequences]
merged_sequence = []
depleted = [False] * len(pulse_sequences)
while not np.all(depleted):
physical_sequence = [False]*len(pulse_sequences)
max_physical_sequence_length = 0
for sequence_id, sequence in enumerate(pulse_sequences):
if len(sequence) <= current_pulse[sequence_id]:
depleted[sequence_id] = True # skip this sequence of there are no pulses in it left
continue
for channel_name, channel_pulse in sequence[current_pulse[sequence_id]].items():
if hasattr(channel_pulse, 'shape'):
if channel_pulse.shape[0] > 0:
physical_sequence[sequence_id] = True
if max_physical_sequence_length < channel_pulse.shape[0]/self.channels[channel_name].get_clock():
max_physical_sequence_length = channel_pulse.shape[0]/self.channels[channel_name].get_clock()
# if there are virtual gates pending, do them
if not np.all(np.logical_or(physical_sequence,depleted)):
# take the first virtual pulse from the sequences
sequence_id = np.arange(len(pulse_sequences))[np.logical_not(np.logical_or(physical_sequence, depleted))][0]
merged_sequence.append(pulse_sequences[sequence_id][current_pulse[sequence_id]])
current_pulse[sequence_id] += 1
else:
pulse = self.pmulti(max_physical_sequence_length)
for sequence_id, sequence in enumerate(pulse_sequences):
if depleted[sequence_id]:
continue
for channel_name, channel_pulse in sequence[current_pulse[sequence_id]].items():
pulse[channel_name][-len(channel_pulse):] += channel_pulse
current_pulse[sequence_id] += 1
merged_sequence.append(pulse)
return merged_sequence
def awg(self, channel, length, waveform):
return waveform
def set_seq(self, seq, force=True):
from time import time
pulse_seq_padded = self.global_pre + seq + self.global_post
try:
for channel, channel_device in self.channels.items():
channel_device.freeze()
virtual_phase = {k: 0 for k in self.channels.keys()}
df = {k: 0 for k in self.channels.keys()}
offsets = {k: 0 for k in self.channels.keys()}
pulse_shape = {k: [] for k in self.channels.keys()}
for channel, channel_device in self.channels.items():
for pulse in pulse_seq_padded:
if hasattr(pulse[channel], 'is_vz'):
virtual_phase[channel] += pulse[channel].phi
continue
if hasattr(pulse[channel], 'is_vf'):
df[channel] = pulse[channel].freq
continue
if hasattr(pulse[channel], 'is_offset'):
offsets[channel] = pulse[channel].offset
continue
# print (channel, df[channel])
pulse_shape[channel].extend(pulse[channel] * np.exp(1j * (
virtual_phase[channel] + 2 * np.pi * df[channel] / self.channels[
channel].get_clock() * np.arange(len(pulse[channel])))) + offsets[channel])
virtual_phase[channel] += 2 * np.pi * df[channel] / self.channels[channel].get_clock() * len(
pulse[channel])
pulse_shape[channel] = np.asarray(pulse_shape[channel])
if len(pulse_shape[channel]) > channel_device.get_nop():
tmp = np.zeros(channel_device.get_nop(), dtype=pulse_shape[channel].dtype)
tmp = pulse_shape[channel][-channel_device.get_nop():]
pulse_shape[channel] = tmp
raise (ValueError('pulse sequence too long'))
else:
tmp = np.zeros(channel_device.get_nop(), dtype=pulse_shape[channel].dtype)
tmp[-len(pulse_shape[channel]):] = pulse_shape[channel]
pulse_shape[channel] = tmp
# print (channel, pulse_shape[channel], len(pulse_shape[channel]))
# print ('Calling set_waveform on device '+channel)
# setter_start = time()
channel_device.set_waveform(pulse_shape[channel])
# print ('channel {} time: {}'.format(channel, time() - setter_start))
finally:
for channel, channel_device in self.channels.items():
# setter_start = time()
channel_device.unfreeze()
# print ('channel {} unfreeze time: {}'.format(channel, time() - setter_start))
self.last_seq = seq
devices = []
for channel in self.channels.values():
devices.extend(channel.get_physical_devices())
for device in list(set(devices)):
device.run()
| ooovector/qtlab_replacement | pulses.py | Python | gpl-3.0 | 10,208 | [
"Gaussian"
] | c0820abc2efa9e00ec777e8d7e6371f6fcb49a3433d81e1c0c7e54214b11b485 |
#===============================================================================
#
# IO.py
#
# This file is part of ANNarchy.
#
# Copyright (C) 2013-2016 Julien Vitay <julien.vitay@gmail.com>,
# Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
from ANNarchy.core import Global
import os
try:
import cPickle as pickle # Python2
except:
import pickle # Python3
import numpy as np
def load_parameters(filename, global_only=True, verbose=False, net_id=0):
"""
Loads the global parameters of a network (flag ``population`` for neurons, ``projection`` for synapses) from a JSON file.
It is advised to generate the JSON file first with ``save_parameters()`` and later edit it manually.
A strong restriction is that population/projection names cannot change between saving and loading.
By default, they take names such as ``pop0`` or ``proj2``, we advise setting explicitly a name in their constructor for readability.
If you add a parameter name to the JSON file but it does not exist in te neuron/synapse, it will be silently skipped.
Enable ``verbose=True`` to see which parameters are effectively changed.
If you set ``global_only`` to True, you will be able to set values for non-global parameters (e.g. synapse-specific), but a single value will be loaded for all.
The JSON file cannot contain arrays.
If you want to save/load the value of variables after a simulation, please refer to ``save()`` or ``load()``.
:param filename: path to the JSON file.
:param global_only: True if only global parameters (flags ``population`` and ``projection``) should be loaded, the other values are ignored. (default: True)
:param verbose: True if the old and new values of the parameters should be printed (default: False).
:param net_id: ID of the network (default: 0, the global network).
:return: a dictionary of additional parameters not related to populations or projections (keyword ``network`` in the JSON file).
"""
import json
with open(filename, 'r') as rfile:
desc = json.load(rfile)
if verbose:
Global._print('Loading parameters from file', filename)
Global._print('-'*40)
# Populations
try:
populations = desc['populations']
except:
populations = {}
if verbose:
Global._print('load_parameters(): no population parameters.')
for name, parameters in populations.items():
# Get the population
for pop in Global._network[net_id]['populations']:
if pop.name == name:
population = pop
break
else:
Global._warning('The population', name, 'defined in the file', filename, 'does not exist in the current network.')
if verbose:
Global._print('Population', name)
# Set the parameters
for name, val in parameters.items():
# Check that the variable indeed exists
if not name in population.parameters:
Global._print(' ', name, 'is not a global parameter of', population.name, ', skipping.')
continue
if global_only and not name in population.neuron_type.description['global']:
Global._print(' ', name, 'is not a global parameter of', population.name, ', skipping.')
continue
if verbose:
Global._print(' ', name, ':', population.get(name), '->', val)
population.set({name: float(val)})
# Projections
try:
projections = desc['projections']
except:
projections = {}
if verbose:
Global._print('load_parameters(): no projection parameters.')
for name, parameters in projections.items():
# Get the projection
for proj in Global._network[net_id]['projections']:
if proj.name == name:
projection = proj
break
else:
Global._warning('The projection', name, 'defined in the file', filename, 'does not exist in the current network.')
if verbose:
Global._print('Projection', name)
# Set the parameters
for name, val in parameters.items():
# Check that the variable indeed exists
if not name in projection.parameters:
Global._print(' ', name, 'is not a global parameter of', population.name, ', skipping.')
continue
if global_only and not name in projection.synapse_type.description['global']:
Global._print(' ', name, 'is not a global parameter of', population.name, ', skipping.')
continue
if verbose:
Global._print(' ', name, ':', projection.get(name), '->', val)
projection.set({name: float(val)})
# Constants
try:
constants = desc['constants']
except:
constants = {}
if verbose:
Global._print('load_parameters(): no constants.')
for name, value in constants.items():
if name in Global.list_constants(): # modify it
Global.get_constant(name).value = value
else: # create it
_ = Global.Constant(name, value)
# Global user-defined parameters
try:
network_parameters = {}
for name, val in desc['network'].items():
network_parameters[name] = float(val)
except:
network_parameters = {}
return network_parameters
def save_parameters(filename, net_id=0):
"""
Saves the global parameters of a network (flag ``population`` for neurons, ``projection`` for synapses) to a JSON file.
:param filename: path to the JSON file.
:param net_id: ID of the network (default: 0, the global network).
"""
import json
# Get the netowrk description
network = Global._network[net_id]
# Dictionary of parameters
description = {
'populations' : {},
'projections' : {},
'network' : {},
'constants' : {},
}
# Constants
for constant in Global._objects['constants']:
description['constants'][constant.name] = constant.value
# Populations
for pop in network['populations']:
# Get the neuron description
neuron = pop.neuron_type
pop_description = {}
for param in neuron.description['global']:
pop_description[param] = pop.init[param]
description['populations'][pop.name] = pop_description
# Projections
for proj in network['projections']:
# Get the synapse description
synapse = proj.synapse_type
proj_description = {}
for param in synapse.description['global']:
proj_description[param] = proj.init[param]
description['projections'][proj.name] = proj_description
# Save the description in a json file
with open(filename, 'w') as wfile:
json.dump(description, wfile, indent=4)
# Backwards compatibility with XML
def load_parameter(in_file):
Global._warning('load_parameter() is deprecated. Use load_parameters() and JSON files instead.')
return _load_parameters_from_xml(in_file)
def _load_parameters_from_xml(in_file):
"""
Load parameter set from xml file.
If the location of the xml file differs from the base directory, you need to provide relative or absolute path.
:param in_file: either single or collection of strings.
"""
try:
from lxml import etree
except:
Global._print('lxml is not installed. Unable to load in xml format.')
return
par = {}
damaged_pars = [] # for printout
files = []
if isinstance(in_file,str):
files.append(in_file)
else:
files = in_file
for file in files:
try:
doc = etree.parse(file)
except IOError:
Global._print('Error: file \'', file, '\' not found.')
continue
matches = doc.findall('parameter')
for parameter in matches:
childs = parameter.getchildren()
#TODO: allways correct ???
if len(childs) != 2:
Global._print('Error: to much tags in parameter')
name=None
value=None
for child in childs:
if child.tag == 'name':
name = child.text
elif child.tag == 'value':
value = child.text
if value is None:
Global._print('Error: no value defined for',name)
damaged_pars.append(name)
value = 0
else:
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
value = value
else:
Global._print('Error: unexpected xml-tag', child.tag)
if name is None:
Global._print('Error: no name in parameter set.')
elif value is None:
Global._print('Error: no value in parameter set.')
damaged_pars.append(name)
elif name in par.keys():
Global._print("Error: parameter",name,"already exists.")
damaged_pars.append(name)
else:
par[name] = value
return par
def _save_data(filename, data):
"""
Internal routine to save data in a file.
"""
# Check if the repertory exist
(path, fname) = os.path.split(filename)
if not path == '':
if not os.path.isdir(path):
Global._print('Creating folder', path)
os.mkdir(path)
extension = os.path.splitext(fname)[1]
if extension == '.mat':
Global._print("Saving network in Matlab format...")
try:
import scipy.io as sio
sio.savemat(filename, data)
except Exception as e:
Global._error('Error while saving in Matlab format.')
Global._print(e)
return
elif extension == '.gz':
Global._print("Saving network in gunzipped binary format...")
try:
import gzip
except:
Global._error('gzip is not installed.')
return
with gzip.open(filename, mode = 'wb') as w_file:
try:
pickle.dump(data, w_file, protocol=pickle.HIGHEST_PROTOCOL)
except Exception as e:
Global._print('Error while saving in gzipped binary format.')
Global._print(e)
return
elif extension == '.npz':
Global._print("Saving network in Numpy format...")
np.savez_compressed(filename, allow_pickle=True, **data )
else:
Global._print("Saving network in text format...")
# save in Pythons pickle format
with open(filename, mode = 'wb') as w_file:
try:
pickle.dump(data, w_file, protocol=pickle.HIGHEST_PROTOCOL)
except Exception as e:
Global._print('Error while saving in text format.')
Global._print(e)
return
return
def save(filename, populations=True, projections=True, net_id=0):#, pure_data=True):
"""
Save the current network state (parameters and variables) to a file.
* If the extension is '.npz', the data will be saved and compressed using `np.savez_compressed` (recommended).
* If the extension is '.mat', the data will be saved as a Matlab 7.2 file. Scipy must be installed.
* If the extension ends with '.gz', the data will be pickled into a binary file and compressed using gzip.
* Otherwise, the data will be pickled into a simple binary text file using cPickle.
**Warning:** The '.mat' data will not be loadable by ANNarchy, it is only for external analysis purpose.
Example:
```python
save('results/init.npz')
save('results/init.data')
save('results/init.txt.gz')
save('1000_trials.mat')
```
:param filename: filename, may contain relative or absolute path.
:param populations: if True, population data will be saved (by default True)
:param projections: if True, projection data will be saved (by default True)
"""
data = _net_description(populations, projections, net_id)
_save_data(filename, data)
def _load_data(filename):
"""
Internally loads data contained in a given file.
:param filename: path to the file.
:return: A dictionary with the connectivity and synaptic variables if the file ``filename`` is available otherwise None is returned.
"""
(_, fname) = os.path.split(filename)
extension = os.path.splitext(fname)[1]
if extension == '.mat':
Global._error('Unable to load Matlab format.')
return None
elif extension == '.gz':
try:
import gzip
except:
Global._error('gzip is not installed.')
return None
try:
with gzip.open(filename, mode = 'rb') as r_file:
desc = pickle.load(r_file)
return desc
except Exception as e:
Global._print('Unable to read the file ' + filename)
Global._print(e)
return None
elif extension == '.npz':
try:
data = np.load(filename, allow_pickle=True)
desc = {}
for attribute in data.files:
# We need to distinguish two cases: 1) full network save
# or 2) single pop/proj. The first case leads to a dictionary
# of several objects. The latter to a dictionary containing all
# values.
if data[attribute].dtype == np.dtype('O'):
# attribute is a collection of multiple objects
desc[attribute] = data[attribute].item(0)
else:
# attribute is a scalar/array
desc[attribute] = data[attribute]
return desc
except Exception as e:
Global._print('Unable to read the file ' + filename)
Global._print(e)
return None
else:
try:
with open(filename, mode = 'rb') as r_file:
desc = pickle.load(r_file)
return desc
except Exception as e:
Global._print('Unable to read the file ' + filename)
Global._print(e)
return None
def _load_connectivity_data(filename):
"""
Internally loads data contained in a given file.
:param filename: path to the file.
:return: A dictionary with the connectivity and synaptic variables if the file ``filename`` is available otherwise None is returned.
"""
(_, fname) = os.path.split(filename)
extension = os.path.splitext(fname)[1]
if extension == '.mat':
Global._error('Unable to load Matlab format.')
return None
elif extension == '.gz':
try:
import gzip
except:
Global._error('gzip is not installed.')
return None
try:
with gzip.open(filename, mode = 'rb') as r_file:
desc = pickle.load(r_file)
return desc
except Exception as e:
Global._print('Unable to read the file ' + filename)
Global._print(e)
return None
elif extension == '.npz':
try:
data = np.load(filename, allow_pickle=True)
desc = {}
for attribute in data.files:
# We need to distinguish two cases: 1) full network save
# or 2) single pop/proj. The first case leads to a dictionary
# of several objects. The latter to a dictionary containing all
# values.
desc[attribute] = data[attribute]
return desc
except Exception as e:
Global._print('Unable to read the file ' + filename)
Global._print(e)
return None
else:
try:
with open(filename, mode = 'rb') as r_file:
desc = pickle.load(r_file)
return desc
except Exception as e:
Global._print('Unable to read the file ' + filename)
Global._print(e)
return None
def load(filename, populations=True, projections=True, net_id=0):
"""
Loads a saved state of the network.
**Warning:** Matlab data can not be loaded.
Example:
```python
load('results/network.npz')
```
:param filename: the filename with relative or absolute path.
:param populations: if True, population data will be loaded (by default True)
:param projections: if True, projection data will be loaded (by default True)
"""
desc = _load_data(filename)
if desc is None:
return
if 'time_step' in desc.keys():
Global.set_current_step(desc['time_step'], net_id)
if populations:
# Over all populations
for pop in Global._network[net_id]['populations']:
# check if the population is contained in save file
if pop.name in desc.keys():
pop._load_pop_data(desc[pop.name])
if projections:
for proj in Global._network[net_id]['projections'] :
if proj.name in desc.keys():
proj._load_proj_data(desc[proj.name])
def _net_description(populations, projections, net_id=0):
"""
Returns a dictionary containing the requested network data.
:param populations: if True, the population data will be saved.
:param projections: if True, the projection data will be saved.
"""
network_desc = {}
network_desc['time_step'] = Global.get_current_step(net_id)
network_desc['net_id'] = net_id
pop_names = []
proj_names = []
if populations:
for pop in Global._network[net_id]['populations']:
network_desc[pop.name] = pop._data()
pop_names.append(pop.name)
if projections:
for proj in Global._network[net_id]['projections']:
# Some specific projections are note saveable
if not proj._saveable:
continue
network_desc[proj.name] = proj._data()
proj_names.append(proj.name)
network_desc['obj_names'] = {
'populations': pop_names,
'projections': proj_names,
}
return network_desc
| ANNarchy/ANNarchy | ANNarchy/core/IO.py | Python | gpl-2.0 | 19,379 | [
"NEURON"
] | b360ea099a19210ebe9372e1711fa41922814ec39ddf7f37008cba9ec8c6d438 |
#! /usr/bin/env python
#All Functions
import cv2
import numpy as np
import matplotlib.pyplot as plt
import math
#Select the Image Filename
#FILENAME = 'chessboard.png'
#FILENAME = 'chessboard.jpg'
#FILENAME = 'chessboard_skew.jpg'
#FILENAME = 'checkerboard2.png'
#FILENAME ='image.jpg'
#FILENAME = "lena1.png"
class TCOLORS:
GRAY = '\x1b[30m'
RED = '\x1b[31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
BLUE = '\x1b[34m'
PURPLE = '\x1b[35m'
CYAN = '\x1b[36m'
NORMAL = '\x1b[0m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
def rgb2gray(rgb):
'''
Read in an RGB image and return a Grayscale image in ndarray format
'''
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def gen_image_derivatives(gray_image):
'''
Pass in a grayscale image in ndarray format
Returns tuple of image derivatives
(image dx, image dy)
'''
height = len(gray_image)
width = len(gray_image[0])
#print "Width: %d" % width
#print "Height: %d" % height
#Find the derivative of an image
image_x = np.ndarray(shape=(height, width), dtype=np.int32)
image_y = np.ndarray(shape=(height, width), dtype=np.int32)
for y in range(0, height - 1):
for x in range(0, width - 1):
#Get rid of edge cases
if (x < 1) or (y < 1) or (x > width - 1) or (y > height - 1):
image_x[y, x] = 0
#X Data
#Ros Before
image_x[y, x] = gray_image[y - 1, x + 1] - gray_image[y - 1, x - 1]
#Current Ros
image_x[y, x] += gray_image[y , x + 1] - gray_image[y , x - 1]
#Row After
image_x[y, x] += gray_image[y + 1, x + 1] - gray_image[y + 1, x - 1]
image_x[y, x] = int(abs(image_x[y, x]) / 3)
#Y Data
#Column Above
image_y[y, x] = gray_image[y + 1, x - 1] - gray_image[y - 1, x - 1]
#Current Column
image_y[y, x] += gray_image[y + 1, x ] - gray_image[y - 1, x ]
#Column Below
image_y[y, x] += gray_image[y + 1, x + 1] - gray_image[y - 1, x + 1]
image_y[y,x] = int (abs(image_y[y, x]) / 3)
return (image_x, image_y)
def gen_image_sobel(gray_image):
'''
Pass in a grayscale image in ndarray format
Returns tuple of image derivatives
(image dx, image dy)
'''
height = len(gray_image)
width = len(gray_image[0])
#print "Width: %d" % width
#print "Height: %d" % height
#Find the derivative of an image
image_x = np.ndarray(shape=(height, width), dtype=np.int32)
image_y = np.ndarray(shape=(height, width), dtype=np.int32)
for y in range(0, height - 1):
for x in range(0, width - 1):
#Get rid of edge cases
if (x < 1) or (y < 1) or (x > width - 1) or (y > height - 1):
image_x[y, x] = 0
#X Data
#Ros Before
image_x[y, x] = gray_image[y - 1, x + 1] - gray_image[y - 1, x - 1]
#Current Ros
image_x[y, x] += (gray_image[y , x + 1] * 2) - (gray_image[y , x - 1] * 2)
#Row After
image_x[y, x] += gray_image[y + 1, x + 1] - gray_image[y + 1, x - 1]
image_x[y, x] = int(abs(image_x[y, x]) / 4)
#Y Data
#Column Above
image_y[y, x] = gray_image[y + 1, x - 1] - gray_image[y - 1, x - 1]
#Current Column
image_y[y, x] += (gray_image[y + 1, x ] * 2) - (gray_image[y - 1, x ] * 2)
#Column Below
image_y[y, x] += gray_image[y + 1, x + 1] - gray_image[y - 1, x + 1]
image_y[y,x] = int (abs(image_y[y, x]) / 4)
return (image_x, image_y)
GAUSSIAN_BITRANGE=18
GAUSSIAN_DIST=1 #Mapping from gaussian location to array position (1 = 1:1)
def gen_deviation_array(sigma, length=5):
midpoint = int(length / 2)
sd = []
for i in range(length):
d = abs((i - midpoint))
sd.append((1 / (math.sqrt(2 * math.pi) * sigma)) * math.exp(-(d**2)/(2 * sigma**2)))
#Normalize all the values
#scale_value = 1 / sd[midpoint]
#for i in range(length):
# sd[i] = sd[i] * scale_value
return sd
def gen_2d_deviation_array(sigma, length=5):
midpoint = int(length / 2)
sd = np.ndarray(shape=(length, length), dtype=np.float)
for y in range (length):
for x in range(length):
x_abs = abs((x - midpoint))
y_abs = abs((y - midpoint))
sd[y, x] = ((1 / (math.sqrt(2 * math.pi))) * math.exp(-(x_abs**2 + y_abs**2)/(2*sigma**2)))
scale_value = 1 / sd[midpoint, midpoint]
for y in range (length):
for x in range(length):
sd[y, x] = sd[y, x] * scale_value
return sd
def convert_gaussian_to_digital_array(gaussian_array, bitrange = GAUSSIAN_BITRANGE, dist = GAUSSIAN_DIST):
'''
Generate an integer representation of a guassian array distribution,
Takes in a floating point gaussian array as well as the bitrange to map to
and the dist array element is (usually 1 for 1 to 1 pixel mapping)
'''
maxvalue = (2 ** bitrange) - 1
midpoint = int(len(gaussian_array) / 2)
digital_array = [0] * len(gaussian_array)
for i in range(len(gaussian_array)):
digital_array[i] = int(maxvalue * gaussian_array[i])
if digital_array[i] < 0:
digital_array[i] = 0
return digital_array
def generate_matrix_values(ix, iy, ga):
'''
ix = derivative of image WRT X
iy = derivative of image WRT Y
ga = Gaussian Array
Generate the following arrays
Sum_(u,v)<Ix^2 * W(u,v)>
Sum_(u,v)<IxIy * W(u,v)>
Sum_(u,v)<Iy^2 * W(u,v)>
'''
width = len(ix[0])
height = len(ix)
win_height = ga.shape[0]
win_width = ga.shape[1]
win_x_midpoint = int(win_width / 2)
win_y_midpoint = int(win_height / 2)
#print ("window width: %d" % win_width)
#print ("window height: %d" % win_height)
#print ("window midpoint x: %d" % win_x_midpoint)
#print ("window midpoint y: %d" % win_y_midpoint)
a_out = np.ndarray(shape=(height, width), dtype=np.int32)
bc_out = np.ndarray(shape=(height, width), dtype=np.int32)
d_out = np.ndarray(shape=(height, width), dtype=np.int32)
for y in range(0, height):
for x in range(0, width):
#Get rid of edge cases
a_out[y, x] = 0
bc_out[y, x] = 0
d_out[y, x] = 0
if (x < win_x_midpoint) or (y < win_y_midpoint) or (x > width - win_x_midpoint - 1) or (y > height - win_y_midpoint - 1):
continue
for wy in range (win_height):
for wx in range(win_width):
#X Values
#pos = win_midpoint - i
xpos = wx - win_x_midpoint
ypos = wy - win_y_midpoint
a_out[y, x] += float(ix[y + ypos, x + xpos] * ix[y + ypos, x + xpos] * ga[wx, wy])
bc_out[y, x] += float(ix[y + ypos, x + xpos] * iy[y + ypos, x + xpos] * ga[wx, wy])
d_out[y, x] += float(iy[y + ypos, x + xpos] * iy[y + ypos, x + xpos] * ga[wx, wy])
return (a_out, bc_out, d_out)
def generate_mc_debug(a, bc, d, k, threshold):
'''
Return an array of corners that are found using the 'k' value
Arguments:
a: Sum_(u,v) Ix(u,v)^2 * W(u,v)
bc: Sum_(u,v) Ix(u,v)Iy(u,v) * W(u,v)
d: Sum_(u,v) Iy(u,v)^2 * W(u,v)
k: Scaling value of corners to detect
threshold: value at which a 'good' corner is detected
Return a new image with only the corners highlighted and the intermediately
generated images
'''
width = len(a[0])
height = len(a)
rarray = np.ndarray(shape=(height, width))
corners = np.ndarray(shape=(height, width))
det = np.ndarray(shape=(height, width))
trc = np.ndarray(shape=(height, width))
max_r = 0
for y in range(0, height):
for x in range(0, width):
r = float( ((a[y, x] * d[y,x]) - (bc[y,x] * bc[y,x])) - k * ((a[y,x] + d[y,x]) ** 2))
det[y, x] = ((a[y, x] * d[y,x]) - (bc[y,x] * bc[y,x]))
trc[y, x] = k * ((a[y,x] + d[y,x]) ** 2)
if r > max_r:
max_r = r
if r < 0:
corners[y, x] = 0
elif r > threshold:
corners[y, x] = 255
else:
corners[y, x] = 0
print "Max R: %d" % max_r
return (corners, det, trc)
def generate_mc(a, bc, d, k, threshold):
'''
Return an array of corners that are found using the 'k' value
Arguments:
a: Sum_(u,v) Ix(u,v)^2 * W(u,v)
bc: Sum_(u,v) Ix(u,v)Iy(u,v) * W(u,v)
d: Sum_(u,v) Iy(u,v)^2 * W(u,v)
k: Scaling value of corners to detect
threshold: value at which a 'good' corner is detected
Return a new image with only the corners highlighted
'''
width = len(a[0])
height = len(a)
rarray = np.ndarray(shape=(height, width))
corners = np.ndarray(shape=(height, width))
max_r = 0
for y in range(0, height):
for x in range(0, width):
r = float( ((a[y, x] * d[y,x]) - (bc[y,x] * bc[y,x])) - k * ((a[y,x] + d[y,x]) ** 2))
if r > max_r:
max_r = r
if r < 0:
corners[y, x] = 0
elif r > threshold:
corners[y, x] = 255
else:
corners[y, x] = 0
print "Max R: %d" % max_r
return corners
| CospanDesign/python | image_processor/image_processor.py | Python | mit | 9,649 | [
"Gaussian"
] | ba62169f99c489e0e7c7fd6495da78606635087f5c511e1940c7e71cb511ac3f |
"""
defines the ZoomStyle class
"""
import vtk
#left_button_down=self._zoom_picker,
#left_button_up=self._zoom_picker,
#right_button_down=self._zoom_reset,
class ZoomStyle(vtk.vtkInteractorStyleRubberBandZoom):
"""Custom Rubber Band Zoom"""
def __init__(self, parent=None):
"""creates the ZoomStyle instance"""
self.AddObserver("LeftButtonPressEvent", self.left_button_press_event)
self.AddObserver("LeftButtonReleaseEvent", self.left_button_release_event)
self.AddObserver("RightButtonPressEvent", self.right_button_press_event)
self.parent = parent
self.zoom_button = self.parent.actions['zoom']
self.picker_points = []
#def leftButtonPressEvent(self, obj, event):
#pass
def left_button_press_event(self, obj, event):
"""
gets the first point
"""
self.OnLeftButtonDown()
pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()
self.picker_points.append((pixel_x, pixel_y))
def left_button_release_event(self, obj, event):
"""
gets the second point and zooms
TODO: doesn't handle panning of the camera to center the image
with respect to the selected limits
"""
self.OnLeftButtonUp()
pixel_x, pixel_y = self.parent.vtk_interactor.GetEventPosition()
self.picker_points.append((pixel_x, pixel_y))
camera = self.parent.rend.GetActiveCamera()
x, y, z = camera.GetPosition()
p1x, p1y = self.picker_points[0]
p2x, p2y = self.picker_points[1]
dx = abs(p1x - p2x)
dy = abs(p1y - p2y)
#x_avg = (p1x + p2x) / 2.
#y_avg = (p1y + p2y) / 2.
main_window = self.parent.window()
width = main_window.frameGeometry().width()
height = main_window.frameGeometry().height()
#print('dx=%s dy=%s' % (dx, dy))
# otherwise it's a failed zoom (they didn't hold the button down)
self.picker_points = []
if dx > 0 and dy > 0:
#xmin = min(p1x, p2x)
#ymin = min(p1y, p2y)
#xmax = max(p1x, p2x)
#ymax = max(p1y, p2y)
aspect_ratio_x = width / dx
aspect_ratio_y = height / dy
zoom_factor = min([aspect_ratio_x, aspect_ratio_y])
#distance = camera.GetDistance()
#a = vtk.vtkCamera()
# +---------+ --- ymax
# | |
# | |
# | |
# +---------+ --- ymin
#
#camera.SetScreenBottomLeft(xmin, ymin)
#camera.SetScreenBottomRight(float, float)
#camera.SetScreenTopRight(float, float)
#print(' p1 =', p1x, p1y)
#print(' p2 =', p2x, p2y)
#print(' z=%s distance=%s' % (z, distance))
#print(' zoom_factor = %s\n' % zoom_factor)
#camera.SetPosition(x, y, z)
self.parent.zoom(zoom_factor)
self.zoom_button.setChecked(False)
self.parent.setup_mouse_buttons(mode='default')
def right_button_press_event(self, obj, event):
"""cancels the zoom button"""
self.zoom_button.setChecked(False)
self.parent.setup_mouse_buttons(mode='default')
self.parent.vtk_interactor.Render()
| saullocastro/pyNastran | pyNastran/gui/styles/zoom_style.py | Python | lgpl-3.0 | 3,355 | [
"VTK"
] | 9514d75227f826e0b4d917a6d58f130d09916853b7d264a43664fb65cbd4350a |
#!/usr/bin/env python3
try:
from netCDF4 import Dataset
except:
print("netCDF4 is not installed!")
sys.exit(1)
import numpy as np
import pylab as plt
from optparse import OptionParser
parser = OptionParser()
parser.usage = "usage: %prog [options] FILE"
parser.description = "A script to compare PISM flowline velocities with Stokes solution."
(options, args) = parser.parse_args()
plot_acab = True
if len(args) != 1:
print('wrong number of arguments, 1 expected')
exit(1)
try:
nc = Dataset(args[0], 'r')
except:
print("file %s not found ... ending ..." % args[0])
exit(2)
def permute(variable, output_order=('time', 'z', 'zb', 'y', 'x')):
"""Permute dimensions of a NetCDF variable to match the output storage order."""
input_dimensions = variable.dimensions
# filter out irrelevant dimensions
dimensions = [x for x in output_order if x in input_dimensions]
# create the mapping
mapping = [dimensions.index(x) for x in input_dimensions]
if mapping:
return np.transpose(variable[:], mapping)
else:
return variable[:] # so that it does not break processing "mapping"
x = nc.variables["x"][:]
b = np.squeeze(nc.variables["topg"][:])
s = np.squeeze(nc.variables["usurf"][:])
h = np.squeeze(nc.variables["thk"][:])
z = nc.variables["z"][:]
mask = h <= 1
us = np.ma.array(data=np.squeeze(nc.variables["uvelsurf"][:]), mask=mask)
ub = np.ma.array(data=np.squeeze(nc.variables["uvelbase"][:]), mask=mask)
# stuff needed for contour plots
xx = (np.tile(x, [len(z), 1]))
zz = ((np.tile(z, [len(x), 1])).transpose() + b)
# ignore the first level
cts = np.squeeze(permute(nc.variables["cts"]))
liqfrac = np.squeeze(permute(nc.variables["liqfrac"]))
temppa = np.squeeze(permute(nc.variables["temp_pa"]))
mask2 = np.zeros_like(cts)
mask2[zz > s] = 1
cts = np.ma.array(data=cts, mask=mask2)
liqfrac = np.ma.array(data=liqfrac, mask=mask2)
temppa = np.ma.array(data=temppa, mask=mask2)
# Contour level of the CTS
cts_level = [1]
liqfrac_levels = np.arange(0, 2.5, .25)
temppa_levels = [-6, -5, -4, -3, -2, -1, -.0001]
fig = plt.figure(figsize=(6.4, 7.4))
axUpperLeft = plt.axes([0.1, 0.6, 0.8, 0.25])
axLower = plt.axes([0.1, 0.05, 0.8, 0.5])
axUpperLeft.plot(x, us, color='#377EB8', lw=1.5)
axUpperLeft.plot(x, ub, '--', color='#377EB8', lw=1.5)
axUpperLeft.axes.set_xlim(-250, 3500)
axUpperLeft.axes.set_ylabel("velocity [m a$^{-1}$]")
plt.setp(axUpperLeft, xticks=[])
if (plot_acab == True):
acab = np.squeeze(nc.variables["climatic_mass_balance"][:])
axUpperRight = axUpperLeft.twinx()
axUpperRight.plot(x, acab / 910.0, color='#984EA3', lw=1.5)
axUpperRight.axes.set_ylabel("mass balance [m a$^{-1}$]")
axUpperRight.axes.set_xlim(-250, 3500)
axLower.plot(x, b, color='black', lw=1.5)
axLower.plot(x, s, color='black', lw=1.5)
c1 = axLower.contourf(xx, zz, liqfrac * 100, liqfrac_levels, cmap=plt.cm.Reds)
plt.colorbar(mappable=c1, ax=axLower, orientation='horizontal', pad=0.05, shrink=0.75, extend="max")
c2 = axLower.contourf(xx, zz, temppa, temppa_levels, cmap=plt.cm.Blues_r)
plt.colorbar(mappable=c2, ax=axLower, orientation='horizontal',
ticks=[-6, -5, -4, -3, -2, -1, 0], pad=0.20, shrink=0.75)
axLower.contour(xx, zz, cts, cts_level, colors='black', linestyles='dashed')
axLower.axes.set_xlim(-250, 3500)
axLower.axes.set_ylim(1100, 1800)
axLower.axes.set_xlabel("distance from bergschrund [m]")
axLower.axes.set_ylabel("elevation [m a.s.l.]")
plt.savefig('sg_results.pdf', bbox_inches='tight', pad_inches=0.35)
nc.close()
| pism/pism | examples/storglaciaren/plot_flowline_results.py | Python | gpl-3.0 | 3,571 | [
"NetCDF"
] | 4f71875098f9ebd8922180e2898218812697cd101c4c92faa7787f6b92f9aa75 |
# Django Imports
from django import template
from django.utils.safestring import mark_safe
'''
Name and description for trade resources.
'''
register = template.Library()
def resourcemine(resource):
if resource == 1:
name = 'Salmonite Factory'
description = 'Abbasid Salmonite is a vat-grown synthetic food that approximates the taste of Old Terran salmon. A galactic delicacy.'
elif resource == 2:
name = 'Drone Factory'
description = 'Personal Drones perform a variety of functions for the elderly and infirm, like cooking and cleaning.'
elif resource == 3:
name = 'Ice Moss Extractor'
description = 'Ice Moss is an exquisite spice that grows only on the coldest areas of Amyntasian moons.'
elif resource == 4:
name = 'Hyperfiber Factory'
description = 'Hyperfibers are nanoengineered strands of molecular carbon, used to create many light, strong, and heat-resistant materials.'
elif resource == 5:
name = 'Crystal Factory'
description = 'Dyon Crystals are synthetic crystals used to regulate domestic warp applications, and many other high-power processes.'
elif resource == 6:
name = 'Small Arms Factory'
description = 'There is no shortage of petty criminal scum in the galaxy. Stop them dead in their tracks with these Bion-produced weapons.'
elif resource == 7:
name = 'Boson Condenser'
description = 'Boson Condensate is an exotic form of matter used for high density backup energy storage.'
elif resource == 8:
name = 'Chronimium Gas Extractor'
description = 'Chronimium Gas, when purified and processed, is used as a moderator for various high-power reactions.'
elif resource == 9:
name = 'Tetramite Ore Extractor'
description = 'Tetramite is a versatile material used in all sorts of domestic construction projects.'
elif resource == 10:
name = 'Spider Factory'
description = 'Maintenance Spiders are small machines that crawl over structures and repair everything from frayed wires to burst pipes.'
elif resource == 11:
name = 'Holo Studio'
description = 'Draco makes various entertainment holos that allow the viewer to be a part of the latest action/detective/salacious story.'
elif resource == 12:
name = 'Quantum CPU Factory'
description = 'Quantum Dot CPUs are the most advanced kind of processor currently in production, with a vast array of applications.'
toreturn = '<td class="bigger" colspan="2">Build %s</td></tr><tr><td colspan="2">%s</td>' % (name, description)
return mark_safe(toreturn)
register.simple_tag(resourcemine)
| heidi666/WorldsAtWar | wawmembers/templatetags/resourcemine.py | Python | mit | 2,702 | [
"CRYSTAL",
"Galaxy"
] | 9b3c158f8da902570e97e6f2b51f0bf82d75e356f3bd84e0b0cea4a67a2358b6 |
# rfc1751.py : Converts between 128-bit strings and a human-readable
# sequence of words, as defined in RFC1751: "A Convention for
# Human-Readable 128-bit Keys", by Daniel L. McDonald.
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew M. Kuchling and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import binascii
from Crypto.Util.py3compat import *
from functools import reduce
binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101',
6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011',
12:'1100', 13:'1101', 14:'1110', 15:'1111'}
def _key2bin(s):
"Convert a key into a string of binary digits"
kl=[bord(x) for x in s]
kl=[binary[x>>4]+binary[x&15] for x in kl]
return ''.join(kl)
def _extract(key, start, length):
"""Extract a bitstring(2.x)/bytestring(2.x) from a string of binary digits, and return its
numeric value."""
k=key[start:start+length]
return reduce(lambda x,y: x*2+ord(y)-48, k, 0)
def key_to_english (key):
"""key_to_english(key:string(2.x)/bytes(3.x)) : string
Transform an arbitrary key into a string containing English words.
The key length must be a multiple of 8.
"""
english=''
for index in range(0, len(key), 8): # Loop over 8-byte subkeys
subkey=key[index:index+8]
# Compute the parity of the key
skbin=_key2bin(subkey) ; p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
# Append parity bits to the subkey
skbin=_key2bin(subkey+bchr((p<<6) & 255))
for i in range(0, 64, 11):
english=english+wordlist[_extract(skbin, i, 11)]+' '
return english[:-1] # Remove the trailing space
def english_to_key (s):
"""english_to_key(string):string(2.x)/bytes(2.x)
Transform a string into a corresponding key.
The string must contain words separated by whitespace; the number
of words must be a multiple of 6.
"""
L=s.upper().split() ; key=b('')
for index in range(0, len(L), 6):
sublist=L[index:index+6] ; char=9*[0] ; bits=0
for i in sublist:
index = wordlist.index(i)
shift = (8-(bits+11)%8) %8
y = index << shift
cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff
if (shift>5):
char[bits>>3] = char[bits>>3] | cl
char[(bits>>3)+1] = char[(bits>>3)+1] | cc
char[(bits>>3)+2] = char[(bits>>3)+2] | cr
elif shift>-3:
char[bits>>3] = char[bits>>3] | cc
char[(bits>>3)+1] = char[(bits>>3)+1] | cr
else: char[bits>>3] = char[bits>>3] | cr
bits=bits+11
subkey=reduce(lambda x,y:x+bchr(y), char, b(''))
# Check the parity of the resulting key
skbin=_key2bin(subkey)
p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
if (p&3) != _extract(skbin, 64, 2):
raise ValueError("Parity error in resulting key")
key=key+subkey[0:8]
return key
wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD",
"AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA",
"AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK",
"ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE",
"AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM",
"BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET",
"BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO",
"BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT",
"BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT",
"CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY",
"CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN",
"DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG",
"DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB",
"DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO",
"ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE",
"EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW",
"FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR",
"FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP",
"GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO",
"GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD",
"HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM",
"HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT",
"HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE",
"HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL",
"INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT",
"ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET",
"JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT",
"KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB",
"LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE",
"LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT",
"LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG",
"LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW",
"MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT",
"MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG",
"MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED",
"NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD",
"NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF",
"OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL",
"OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT",
"OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD",
"PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG",
"PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT",
"PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB",
"PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT",
"RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM",
"RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB",
"RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM",
"SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET",
"SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY",
"SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY",
"SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN",
"TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE",
"TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP",
"TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP",
"US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS",
"WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT",
"WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE",
"YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT",
"ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS",
"ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE",
"AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA",
"ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN",
"AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW",
"ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA",
"ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM",
"AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW",
"AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL",
"BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM",
"BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK",
"BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH",
"BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT",
"BEAU", "BECK", "BEEF", "BEEN", "BEER",
"BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN",
"BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE",
"BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE",
"BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT",
"BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK",
"BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT",
"BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK",
"BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS",
"BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN",
"BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD",
"BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG",
"BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST",
"BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF",
"CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL",
"CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL",
"CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF",
"CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG",
"CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY",
"CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA",
"COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN",
"COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK",
"COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST",
"COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB",
"CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY",
"CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE",
"DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN",
"DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS",
"DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED",
"DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK",
"DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT",
"DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES",
"DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA",
"DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG",
"DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK",
"DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK",
"DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST",
"EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT",
"EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT",
"EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED",
"FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL",
"FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT",
"FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST",
"FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE",
"FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE",
"FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW",
"FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM",
"FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL",
"FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL",
"FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY",
"FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY",
"FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA",
"GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH",
"GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE",
"GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT",
"GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN",
"GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD",
"GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG",
"GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB",
"GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN",
"GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH",
"GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR",
"HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK",
"HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE",
"HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR",
"HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL",
"HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN",
"HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT",
"HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE",
"HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK",
"HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL",
"HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK",
"HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE",
"HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH",
"INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE",
"ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE",
"JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL",
"JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN",
"JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY",
"JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST",
"JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL",
"KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL",
"KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW",
"KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD",
"KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN",
"LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD",
"LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS",
"LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER",
"LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST",
"LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU",
"LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB",
"LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST",
"LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE",
"LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD",
"LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK",
"LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE",
"LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE",
"MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI",
"MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK",
"MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE",
"MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK",
"MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH",
"MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT",
"MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS",
"MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD",
"MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON",
"MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH",
"MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK",
"MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL",
"NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR",
"NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS",
"NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA",
"NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON",
"NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB",
"OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY",
"OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE",
"ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS",
"OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY",
"OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT",
"RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE",
"RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR",
"RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA",
"REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT",
"RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD",
"ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME",
"ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS",
"ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY",
"RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE",
"RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE",
"SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE",
"SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR",
"SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK",
"SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS",
"SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN",
"SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE",
"SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE",
"SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW",
"SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY",
"SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT",
"SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB",
"SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA",
"SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE",
"SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR",
"STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH",
"SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF",
"SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM",
"TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK",
"TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM",
"TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS",
"TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN",
"THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER",
"TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY",
"TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG",
"TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR",
"TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG",
"TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE",
"TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK",
"TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER",
"USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST",
"VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY",
"VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE",
"WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK",
"WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM",
"WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY",
"WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR",
"WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM",
"WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE",
"WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE",
"WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD",
"WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE",
"YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR",
"YELL", "YOGA", "YOKE" ]
if __name__=='__main__':
data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'),
('CCAC2AED591056BE4F90FD441C534766',
'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'),
('EFF81F9BFBC65350920CDD7416DE8009',
'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL')
]
for key, words in data:
print('Trying key', key)
key=binascii.a2b_hex(key)
w2=key_to_english(key)
if w2!=words:
print('key_to_english fails on key', repr(key), ', producing', str(w2))
k2=english_to_key(words)
if k2!=key:
print('english_to_key fails on key', repr(key), ', producing', repr(k2))
| nparley/mylatitude | lib/Crypto/Util/RFC1751.py | Python | mit | 21,208 | [
"Elk",
"MOE"
] | a66f628a5027a2194d395170a30c5709b916294485b6271bcf1425cdd0460ef9 |
#!/usr/bin/env python
'''
Master load script that is driven by a dictionary of campaigns and the
commands that load them. This dictionary also drives the campaigns
served by a deployment of stoqs via the stoqs/campaigns.py file.
Mike McCann
MBARI 5 September 2015
'''
import os
import sys
app_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
sys.path.insert(0, app_dir)
os.environ['DJANGO_SETTINGS_MODULE']='config.settings.local'
import django
django.setup()
import time
import logging
import datetime
import fileinput
import glob
import importlib
import platform
import socket
import subprocess
from git import Repo
from shutil import copyfile
from django.conf import settings
from django.core.management import call_command
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import ConnectionDoesNotExist, OperationalError, ProgrammingError
from django.db import transaction, connections
from slacker import Slacker
from stoqs.models import ResourceType, Resource, Campaign, CampaignResource, MeasuredParameter, \
SampledParameter, Activity, Parameter, Platform
from timing import MINUTES
def tail(f, n):
return subprocess.getoutput(f"tail -{n} {f}")
class DatabaseCreationError(Exception):
pass
class DatabaseLoadError(Exception):
pass
class Loader(object):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
prov = {}
def _create_db(self, db):
'''Create database. Invoking user should have privileges to connect to
the database server as user postgres. Only the port number from the
DATABASE_URL is extracted to pass to the psql commands, we use the
local Unix domain socket to connect to the database.
'''
commands = ' && '.join((
'psql -p {port} -c \"CREATE DATABASE {db} owner=stoqsadm;\" -U postgres',
'psql -p {port} -c \"ALTER DATABASE {db} set timezone=\'GMT\';\" -U postgres',
'psql -p {port} -c \"GRANT ALL ON ALL TABLES IN SCHEMA public TO stoqsadm;\" -d {db} -U postgres'))
createdb = commands.format(**{'port': settings.DATABASES[db]['PORT'], 'db': db})
if self.args.clobber:
createdb = ('psql -p {port} -c \"DROP DATABASE {db};\" -U postgres && '
).format(**{'port': settings.DATABASES[db]['PORT'], 'db': db}) + createdb
self.logger.info('Creating database %s', db)
self.logger.debug('createdb = %s', createdb)
ret = os.system(createdb)
self.logger.debug('ret = %s', ret)
if ret != 0:
# Try again without DROP command if --clobber is specified
if self.args.clobber:
createdb = commands.format(**{'port': settings.DATABASES[db]['PORT'], 'db': db})
self.logger.debug('createdb = %s', createdb)
ret = os.system(createdb)
self.logger.debug('ret = %s', ret)
if ret != 0:
raise DatabaseCreationError((
'Failed to create {} even after trying without DROP command').format(db))
else:
return
raise DatabaseCreationError(('Failed to create {}').format(db))
# Create postgis extensions as superuser
create_ext = ('psql -p {port} -c \"CREATE EXTENSION postgis;\" -d {db} -U postgres && '
).format(**{'port': settings.DATABASES[db]['PORT'], 'db': db})
create_ext += ('psql -p {port} -c \"CREATE EXTENSION postgis_topology;\" -d {db} -U postgres'
).format(**{'port': settings.DATABASES[db]['PORT'], 'db': db})
self.logger.info('Creating postgis extensions for database %s', db)
self.logger.debug('create_ext = %s', create_ext)
ret = os.system(create_ext)
self.logger.debug('ret = %s', ret)
def _copy_log_file(self, log_file):
loadlogs_dir = os.path.join(settings.MEDIA_ROOT, 'loadlogs')
try:
os.makedirs(loadlogs_dir)
except OSError:
if not os.path.isdir(loadlogs_dir):
raise
log_file_url = os.path.basename(log_file) + '.txt'
try:
copyfile(log_file , os.path.join(loadlogs_dir, log_file_url))
self.prov['load_logfile'] = os.path.join(settings.MEDIA_URL, 'loadlogs', log_file_url)
except IOError as e:
self.logger.warn(e)
def _provenance_dict(self, db, load_command, log_file):
'''Return a dictionary of provenance Resource items. Special handling
for --background operation: don't tail log file, instead add those
items when run with the --updateprovenance flag.
'''
repo = Repo(app_dir, search_parent_directories=True)
if not self.args.updateprovenance:
# Inserted when load executed with or without --background
self.prov['load_command'] = load_command
self.prov['gitorigin'] = repo.remotes.origin.url
self.prov['gitcommit'] = repo.head.commit.hexsha
self.prov['environment'] = platform.platform() + " python " + sys.version.split('\n')[0]
self.prov['load_date_gmt'] = datetime.datetime.utcnow()
if not self.args.background and self.args.updateprovenance:
if not os.path.isfile(log_file):
self.logger.warn('Load log file not found: %s', log_file)
else:
# Look for line printed by timing module
for line in tail(log_file, 50).split('\n'):
if line.startswith(MINUTES):
self.prov['minutes_to_load'] =line.split(':')[1]
try:
# Inserted after the log_file has been written with --updateprovenance
self.prov['real_exection_time'] = tail(log_file, 3).split('\n')[0].split('\t')[1]
self.prov['user_exection_time'] = tail(log_file, 3).split('\n')[1].split('\t')[1]
self.prov['sys_exection_time'] = tail(log_file, 3).split('\n')[2].split('\t')[1]
except IndexError:
self.logger.debug('No execution_time information in %s', log_file)
# Counts
self.prov['MeasuredParameter_count'] = MeasuredParameter.objects.using(db).count()
self.prov['SampledParameter_count'] = SampledParameter.objects.using(db).count()
self.prov['Parameter_count'] = Parameter.objects.using(db).count()
self.prov['Activity_count'] = Activity.objects.using(db).count()
self.prov['Platform_count'] = Platform.objects.using(db).count()
def _log_file(self, script, db, load_command):
if self._has_no_t_option(db, load_command):
log_file = os.path.join(os.path.dirname(script.split()[0]), db + '.out')
else:
if self.args.test:
log_file = script.split()[0].replace('.py', '_t.out')
else:
log_file = script.split()[0].replace('.py', '.out')
return log_file
def _has_no_t_option(self, db, load_command):
return ((db.endswith('_o') and '-o' in load_command) or
'ROVCTD' in load_command or
load_command.endswith('.sh') or
'&&' in load_command)
def _drop_indexes(self):
# As of 2017 the STOQS project does not commit migration files.
# If significant schema changes are made the SOP is to reload databases;
# this also helps ensure that the archived NetCDF files are still accessible.
# To try loading data with more efficiency in the database this method
# removes migrations and modifies the models.py file to remove indexes.
# The migration files need to be removed because of this Django patch:
# https://code.djangoproject.com/ticket/28052
migration_files = glob.glob(os.path.join(app_dir, 'stoqs/migrations', '00*.py'))
for m_f in migration_files:
if '0001_initial.py' not in m_f:
self.logger.info('Removing migration file: %s', m_f)
os.remove(m_f)
model_files = (os.path.join(app_dir, 'stoqs/migrations/0001_initial.py'),
os.path.join(app_dir, 'stoqs/models.py'))
with fileinput.input(files=model_files, inplace=True, backup='.bak') as f:
for line in f:
if '_index=True' in line:
print(line.replace('_index=True', '_index=False'), end='')
else:
print(line, end='')
def _create_indexes(self):
# Add indexes back to models.py
##migration_files = glob.glob(os.path.join(app_dir, 'stoqs/migrations', '00*.py'))
##for m_f in migration_files:
## if '0001_initial.py' not in m_f:
## self.logger.info('Removing migration file: %s', m_f)
## os.remove(m_f)
model_file = os.path.join(app_dir, 'stoqs/models.py')
##model_file = os.path.join(app_dir, 'stoqs/migrations/0001_initial.py')
with fileinput.input(files=(model_file,), inplace=True) as f:
for line in f:
if '_index=False' in line:
print(line.replace('_index=False', '_index=True'), end='')
else:
print(line, end='')
def checks(self):
# That stoqs/campaigns.py file can be loaded
try:
campaigns = importlib.import_module(self.args.campaigns)
except ImportError:
print('The stoqs/campaigns.py could not be loaded. '
'Create a symbolic link named "campaigns.py" '
'pointing to the file for your site.')
print('Use stoqs/mbari_campaigns.py as a model')
sys.exit()
if self.args.db:
for d in self.args.db:
if d not in list(campaigns.campaigns.keys()):
self.logger.warn('%s not in %s', d, self.args.campaigns)
# That can connect as user postgres for creating and dropping databases
cmd = ('psql -p {} -c "\q" -U postgres').format(settings.DATABASES['default']['PORT'])
self.logger.debug('cmd = %s', cmd)
ret = os.system(cmd)
self.logger.debug('ret = %s', ret)
if ret != 0:
self.logger.warn('Cannot connect to the database server as user postgres. Either run as user postgres or alter your pg_hba.conf file.')
suggestion = '''
To permit simpler loading of your databases you may want to temporarilry open
up your server to allow any local acccount to connect as user postgres without
a password. WARNING: this opens up your server to potential attack, you should
undo this change when done with your loads.
In the "local" section of your /var/lib/pgsql/<version>/data/pg_hba.conf file
add a 'trust' entry for all local accounts above the other entries, e.g.:
# "local" is for Unix domain socket connections only
local all all trust
local all all peer
'''
self.logger.info(suggestion)
# That the user really wants to reload all production databases
if self.args.clobber and not self.args.test:
print(("On the server running on port =", settings.DATABASES['default']['PORT']))
print("You are about to drop all database(s) in the list below and reload them:")
print((('{:30s} {:>15s}').format('Database', 'Last Load time (min)')))
print((('{:30s} {:>15s}').format('-'*25, '-'*20)))
nothing_printed = True
for db,load_command in list(campaigns.campaigns.items()):
if self.args.db:
if db not in self.args.db:
continue
script = os.path.join(app_dir, 'loaders', load_command)
try:
with transaction.atomic(using=db):
minutes_to_load = CampaignResource.objects.using(db).get(
resource__name='minutes_to_load').resource.value
print(f"{db:30s} {minutes_to_load:>20}")
nothing_printed = False
except (CampaignResource.DoesNotExist, CampaignResource.MultipleObjectsReturned,
OperationalError, ProgrammingError) as e:
self.logger.debug(str(e))
self.logger.debug('Closing all connections:')
for conn in connections.all():
if conn.settings_dict['NAME'] not in self.args.db:
continue
self.logger.debug(f" {conn.settings_dict['NAME']}")
conn.close()
if nothing_printed:
print(f"{db:30s} {'--- ':>20}")
ans = input('\nAre you sure you want to drop these database(s) and reload them? [y/N] ')
if ans.lower() != 'y':
print('Exiting')
sys.exit()
# That user wants to load all the production databases (no command line arguments)
if not sys.argv[1:]:
print(("On the server running on port =", settings.DATABASES['default']['PORT']))
print("You are about to load all these databases:")
print((' '.join(list(campaigns.campaigns.keys()))))
ans = eval(input('\nAre you sure you want load all these databases? [y/N] '))
if ans.lower() != 'y':
print('Exiting')
sys.exit()
# That script support the --test option
if self.args.db and self.args.test:
for db in self.args.db:
if self._has_no_t_option(db, campaigns.campaigns[db]):
print(f'{campaigns.campaigns[db]} does not support the --test argument')
sys.exit(-1)
def recordprovenance(self, db, load_command, log_file):
'''Add Resources to the Campaign that describe what loaded it
'''
self.logger.debug('Recording provenance for %s using log_file = %s', db, log_file)
try:
rt, _ = ResourceType.objects.using(db).get_or_create( name='provenance',
description='Information about the source of data')
except (ConnectionDoesNotExist, OperationalError, ProgrammingError) as e:
self.logger.warn('Could not open database "%s" for updating provenance.', db)
self.logger.warn(e)
return
i = 0
c = None
while not c:
try:
self.logger.debug('Looking in database %s for first Campaign record', db)
c = Campaign.objects.using(db).get(id=1)
except ObjectDoesNotExist:
if self.args.background:
# Sleep a bit for background jobs to create the Campaign
sec_wait = 5
time.sleep(sec_wait)
i += 1
max_iter = 24
if i > max_iter:
raise DatabaseLoadError(('No campaign created after {:d} seconds. '
'Check log_file for errors: {}').format(sec_wait * max_iter, log_file))
else:
self.logger.error(f'Could not find Campaign record for {db} in the database.')
self.logger.error(f'Look for error messages in: {log_file}')
return
self.logger.info('Database %s', db)
self._provenance_dict(db, load_command, log_file)
for name,value in list(self.prov.items()):
r, _ = Resource.objects.using(db).get_or_create(
uristring='', name=name, value=value, resourcetype=rt)
CampaignResource.objects.using(db).get_or_create(
campaign=c, resource=r)
self.logger.info('Resource uristring="%s", name="%s", value="%s"', '', name, value)
def updateprovenance(self):
campaigns = importlib.import_module(self.args.campaigns)
for db,load_command in list(campaigns.campaigns.items()):
if self.args.db:
if db not in self.args.db:
continue
if self.args.test:
if self._has_no_t_option(db, load_command):
continue
db += '_t'
script = os.path.join(app_dir, 'loaders', load_command)
log_file = self._log_file(script, db, load_command)
try:
self.recordprovenance(db, load_command, log_file)
except (ObjectDoesNotExist, DatabaseLoadError) as e:
self.logger.warn('Could not record provenance in database %s', db)
self.logger.warn(e)
def grant_everyone_select(self):
campaigns = importlib.import_module(self.args.campaigns)
for db,load_command in list(campaigns.campaigns.items()):
if self.args.db:
if db not in self.args.db:
continue
if self.args.test:
if self._has_no_t_option(db, load_command):
continue
db += '_t'
command = 'psql -p {port} -c \"GRANT SELECT ON ALL TABLES IN SCHEMA public TO everyone;\" -d {db} -U postgres'
grant = command.format(**{'port': settings.DATABASES[db]['PORT'], 'db': db})
self.logger.info('Granting SELECT to everyone on database %s', db)
self.logger.debug('grant = %s', grant)
ret = os.system(grant)
self.logger.debug('ret = %s', ret)
def removetest(self):
self.logger.info('Removing test databases from sever running on port %s',
settings.DATABASES['default']['PORT'])
campaigns = importlib.import_module(self.args.campaigns)
for db,load_command in list(campaigns.campaigns.items()):
if self.args.db:
if db not in self.args.db:
continue
if self._has_no_t_option(db, load_command):
continue
db += '_t'
dropdb = ('psql -p {port} -c \"DROP DATABASE {db};\" -U postgres').format(
**{'port': settings.DATABASES['default']['PORT'], 'db': db})
self.logger.info('Dropping database %s', db)
self.logger.debug('dropdb = %s', dropdb)
ret = os.system(dropdb)
self.logger.debug('ret = %s', ret)
if ret != 0:
self.logger.warn('Failed to drop %s', db)
def list(self):
stoqs_campaigns = []
campaigns = importlib.import_module(self.args.campaigns)
for db,load_command in list(campaigns.campaigns.items()):
if self.args.db:
if db not in self.args.db:
continue
if self.args.test:
if self._has_no_t_option(db, load_command):
continue
db += '_t'
stoqs_campaigns.append(db)
print(('\n'.join(stoqs_campaigns)))
print(('export STOQS_CAMPAIGNS="' + ','.join(stoqs_campaigns) + '"'))
def lines_with_string(self, file_name, string, max_lines=10):
matching_lines = ''
with open(file_name) as f:
i = 0
for line in f:
if string in line:
i += 1
matching_lines += line
if i > max_lines:
break
if i >= max_lines:
matching_lines += f'\n(... truncated after {string} seen {max_lines} times ...)'
if not matching_lines:
matching_lines = f'No lines containing string {string}.'
return matching_lines
def load(self, campaigns=None, create_only=False):
if not campaigns:
campaigns = importlib.import_module(self.args.campaigns)
for db,load_command in list(campaigns.campaigns.items()):
if self.args.db:
if db not in self.args.db:
continue
if self.args.test:
if self._has_no_t_option(db, load_command):
continue
load_command += ' -t'
db += '_t'
# Borrowed from stoqs/config/settings/common.py
campaign = db
settings.DATABASES[campaign] = settings.DATABASES.get('default').copy()
settings.DATABASES[campaign]['NAME'] = campaign
settings.MAPSERVER_DATABASES[campaign] = settings.MAPSERVER_DATABASES.get('default').copy()
settings.MAPSERVER_DATABASES[campaign]['NAME'] = campaign
if db not in settings.DATABASES:
# Django docs say not to do this, but I can't seem to force a settings reload.
# Note that databases in campaigns.py are put in settings by settings.local.
settings.DATABASES[db] = settings.DATABASES.get('default').copy()
settings.DATABASES[db]['NAME'] = db
try:
self._create_db(db)
except DatabaseCreationError as e:
self.logger.warn(e)
self.logger.warn('Use the --clobber option, or fix the problem indicated.')
if self.args.db and not self.args.test:
raise Exception('Maybe use the --clobber option to recreate the database...')
else:
# If running test for all databases just go on to next database
continue
if self.args.drop_indexes:
self.logger.info('Dropping indexes...')
self._drop_indexes()
else:
call_command('makemigrations', 'stoqs', settings='config.settings.local', noinput=True)
call_command('migrate', settings='config.settings.local', noinput=True, database=db)
if create_only:
return
if hasattr(self.args, 'verbose') and not load_command.endswith('.sh'):
if self.args.verbose > 2:
load_command += ' -v'
# === Execute the load
script = os.path.join(app_dir, 'loaders', load_command)
log_file = self._log_file(script, db, load_command)
if script.endswith('.sh'):
cmd = (f'cd {os.path.dirname(script)} && (STOQS_CAMPAIGNS={db} time {script}) > {log_file} 2>&1;')
else:
cmd = (f'(STOQS_CAMPAIGNS={db} time {script}) > {log_file} 2>&1;')
if self.args.email:
# Send email on success or failure
cmd += ('''
if [ $? -eq 0 ]
then
(echo Any ERROR mesages and last 10 lines of: {log};
grep ERROR {log};
tail {log}) | mail -s "{db} load finished" {email}
else
(echo Any ERROR mesages and last 20 lines of: {log};
grep ERROR {log};
tail -20 {log}) | mail -s "{db} load FAILED" {email}
fi''').format(**{'log':log_file, 'db': db, 'email': self.args.email})
if self.args.background:
cmd = '({}) &'.format(cmd)
self.logger.info('Executing: %s', cmd)
ret = os.system(cmd)
self.logger.debug(f'ret = {ret}')
self._copy_log_file(log_file)
if self.args.slack:
server = os.environ.get('NGINX_SERVER_NAME', socket.gethostname())
message = f"{db} load into {settings.DATABASES[db]['HOST']} on {server}"
if ret == 0:
message += ' *succeded*.\n'
else:
message += ' *failed*.\n'
stoqs_icon_url = 'http://www.stoqs.org/wp-content/uploads/2017/07/STOQS_favicon_logo3_512.png'
self.slack.chat.post_message('#stoqs-loads', text=message, username='stoqsadm', icon_url=stoqs_icon_url)
message = f'All WARNING messages from {log_file}:'
message += f"```{self.lines_with_string(log_file, 'WARNING')}```"
self.slack.chat.post_message('#stoqs-loads', text=message, username='stoqsadm', icon_url=stoqs_icon_url)
message = f'All ERROR messages from {log_file}:'
message += f"```{self.lines_with_string(log_file, 'ERROR')}```"
self.slack.chat.post_message('#stoqs-loads', text=message, username='stoqsadm', icon_url=stoqs_icon_url)
num_lines = 20
message = f'Last {num_lines} lines of {log_file}:'
message += f"```{tail(log_file, num_lines)}```"
log_url = 'http://localhost:8008/media/loadlogs/' + os.path.basename(log_file) + '.txt'
self.slack.chat.post_message('#stoqs-loads', text=message, username='stoqsadm', icon_url=stoqs_icon_url, attachments=log_url)
self.logger.info('Message sent to Slack channel #stoqs-loads')
if ret != 0:
self.logger.error(f'Non-zero return code from load script. Check {log_file}')
if self.args.drop_indexes:
self.logger.info('Creating indexes...')
self._create_indexes()
call_command('makemigrations', 'stoqs', settings='config.settings.local', noinput=True)
call_command('migrate', settings='config.settings.local', noinput=True, database=db)
# Record details of the database load to the database
try:
self.recordprovenance(db, load_command, log_file)
except DatabaseLoadError as e:
self.logger.warn(str(e))
def process_command_line(self):
import argparse
from argparse import RawTextHelpFormatter
examples = 'Examples:' + '\n\n'
examples += " Load all databases:\n"
examples += " " + sys.argv[0] + "\n"
examples += " Reload all databases (dropping all existing databases):\n"
examples += " " + sys.argv[0] + " --clobber\n"
examples += " Reload specific databases from as background jobs with verbose output:\n"
examples += " " + sys.argv[0] + " --db stoqs_september2013 stoqs_may2015 --clobber --background --email mccann@mbari.org -v 1\n"
examples += " Drop specific test databases:\n"
examples += " " + sys.argv[0] + " --db stoqs_september2010 stoqs_october2010 --removetest -v 1\n"
examples += " Drop all test databases:\n"
examples += " " + sys.argv[0] + "--removetest -v 1\n"
examples += " List test databases to get STOQS_CAMPAIGNS string:\n"
examples += " " + sys.argv[0] + " --list --test"
examples += "\n"
examples += '\nIf running from cde-package replace ".py" with ".py.cde".'
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter,
description=('''
Script to load or reload STOQS databases using the dictionary in stoqs/campaigns.py
A typical workflow to build up a production server is:
1. Construct a stoqs/campaigns.py file (use mbari_campaigns.py as model)
2. Make *ex situ* Sampled Parameter data available:
a. Uncompress Sample data files included in the stoqs repository:
find stoqs/loaders -name "*.gz" | xargs gunzip
b. Copy BOG database extraction files to CANON/BOG_Data
c. (A Big TODO: Change these loads to a web-accessable method...)
3. Copy terrain data files that are not included or copied during test.sh execution:
cd stoqs/loaders
wget https://stoqs.mbari.org/terrain/Monterey25.grd
wget https://stoqs.mbari.org/terrain/Globe_1m_bath.grd
wget https://stoqs.mbari.org/terrain/MontereyCanyonBeds_1m+5m.grd
wget https://stoqs.mbari.org/terrain/SanPedroBasin50.grd
wget https://stoqs.mbari.org/terrain/michigan_lld.grd
4. Get the STOQS_CAMPAIGNS setting for running your server:
{load} --test --list
5. Load test (_t) databases to test all your load scripts:
{load} --test --clobber --background --email {user} -v > load.out 2>&1
(Check your email for load finished messages)
Email is not configured for a Docker installation, instead use Slack:
cd docker
docker exec -e SLACKTOKEN=<your_private_token> -e STOQS_CAMPAIGNS=<results_from_previous_step> stoqs {load} --test --slack
(The --clobber, --db <database>, and --verbose <num> options can be used to reload and debug problems.)
6. Add metadata to the database with links to the log files:
{load} --test --updateprovenance
7. Set your environment variables and run your server:
export DATABASE_URL=postgis://<dbuser>:<pw>@<host>:<port>/stoqs
export STOQS_CAMPAIGNS=<output_from_previous_step>
export MAPSERVER_HOST=<mapserver_ip_address>
stoqs/manage.py runserver 0.0.0.0:8000 --settings=config.settings.local
- or, however you start your uWSGI app, e.g.:
uwsgi --socket :8001 --module wsgi:application
8. Visit your server and see that your test databases are indeed loaded
9. Check all your output files for ERROR and WARNING messages
10. Fix any problems so that ALL the test database loads succeed
11. Remove the test databases:
{load} --removetest -v
12. Load your production databases:
{load} --background --email {user} -v > load.out 2>&1
13. Add provenance information to the database, with setting for non-default MEDIA_ROOT:
export MEDIA_ROOT=/usr/share/nginx/media
{load} --updateprovenance -v
14. Give the 'everyone' role SELECT privileges on all databases:
{load} --grant_everyone_select -v
15. After a final check announce the availability of these databases
To get any stdout/stderr output you must use -v, the default is no output.
''').format(**{'load': sys.argv[0], 'user': os.environ['USER']}),
epilog=examples)
parser.add_argument('--campaigns', action='store', help='Module containing campaigns dictionary (must also be in campaigns.py)', default='campaigns')
parser.add_argument('--db', action='store', help=('Specify databases from CAMPAIGNS to load'
' (do not append "_t", instead use --test'
' for test databases)'), nargs='*')
parser.add_argument('--test', action='store_true', help='Load test databases using -t option of loaders.LoadScript')
parser.add_argument('--clobber', action='store_true', help=('Drop databases before creating and loading them.'
' Need to confirm dropping production databases.'))
parser.add_argument('--background', action='store_true', help='Execute each load in the background to parallel process multiple loads')
parser.add_argument('--removetest', action='store_true', help='Drop all test databases; the --db option limits the dropping to those in the list')
parser.add_argument('--list', action='store_true', help='List the databases that are in --campaigns')
parser.add_argument('--email', action='store', help='Address to send mail to when the load finishes. Does not work from Docker, use --slack instead.')
parser.add_argument('--slack', action='store_true', help='Post message to stoqs-loads channel on Slack using SLACKTOKEN env variable')
parser.add_argument('--updateprovenance', action='store_true', help=('Use after background jobs finish to copy'
' loadlogs and update provenance information'))
parser.add_argument('--grant_everyone_select', action='store_true', help='Grant everyone role select privileges on all relations')
parser.add_argument('--drop_indexes', action='store_true', help='Before load drop indexes and create them following the load')
parser.add_argument('-v', '--verbose', nargs='?', choices=[1,2,3], type=int, help='Turn on verbose output. If > 2 load is verbose too.', const=1, default=0)
self.args = parser.parse_args()
self.commandline = ' '.join(sys.argv)
if self.args.slack:
try:
self.slack = Slacker(os.environ['SLACKTOKEN'])
except KeyError:
print('If using --slack must set SLACKTOKEN environment variable. [Never share your token!]')
sys.exit(-1)
if self.args.verbose > 1:
self.logger.setLevel(logging.DEBUG)
elif self.args.verbose > 0:
self.logger.setLevel(logging.INFO)
if __name__ == '__main__':
l = Loader()
l.process_command_line()
l.checks()
if l.args.removetest:
l.removetest()
elif l.args.list:
l.list()
elif l.args.updateprovenance:
l.updateprovenance()
elif l.args.grant_everyone_select:
l.grant_everyone_select()
else:
l.load()
| danellecline/stoqs | stoqs/loaders/load.py | Python | gpl-3.0 | 33,203 | [
"NetCDF",
"VisIt"
] | 06362d5a4a03ad06ae295a248a0fdd0d60be0a1e1c9469d53a6b877d96827a7a |
#!/usr/bin/env python3
import sys
import subprocess as sb
import numpy as np
import argparse
from utilities import filesFromList, writeLog
from plotTools import userLabels, extractFromCSV, addToPlot
import matplotlib.pyplot as plt
'''
Description:
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#======== Function definitions =============================#
def p2pMaxMin( r ):
# Peak to peak max and min evaluation routine.
dr = (r[1:] - r[:-1])
fpos = (dr>=0.).astype(int)
fneg = (dr<0.).astype(int)
rp_cum = 0.; rn_cum = 0.
rp_max = 0.; rn_min = 0.
i = 0
for fp in fpos:
if( fp == 0 ):
if( rp_cum > rp_max ): rp_max = rp_cum
rp_cum = 0.
rp_cum += float(fp)*dr[i]; i+=1
#print('rp_cum[{}] = {} '.format(i,rp_cum))
i = 0
for fn in fneg:
if( fn == 0 ):
if( rn_cum < rn_min ): rn_min = rn_cum
rn_cum = 0.
rn_cum += float(fn)*dr[i]; i+=1
#print('rn_cum[{}] = {} '.format(i,rn_cum))
return rp_max, rn_min
#==========================================================#
parser = argparse.ArgumentParser(prog='approachAnalysis.py')
parser.add_argument("strKey", help="Search string for collecting files.",nargs='?',\
default=".csv")
parser.add_argument("--magy", help="Magnitude of all variables.", action="store_true",\
default=False)
parser.add_argument("--yx", help="Reverse axes: plot(x,y) --> plot(y,x)", action="store_true",\
default=False)
parser.add_argument("--labels", help="User specified labels.", action="store_true",\
default=False)
parser.add_argument("--reuse", help="Reuse once specified variable selections.", action="store_true",\
default=False)
parser.add_argument("-v", "--var", help="Variable Name in CSV-file", type=str, nargs='+',\
default=['u','v','w'] )
parser.add_argument("-yl","--ylims", help="Y-axis limits: [min,max]. Default=[0,10]",\
type=float,nargs=2,default=[0.,10.])
parser.add_argument("-fn","--figName", help="Name of the (temporary) figures. (default=tmp)",\
type=str,default="tmp")
parser.add_argument("-fa","--fileAnim", help="Name of the animation file. (default=anim.gif)",\
type=str,default="anim.gif")
parser.add_argument("-na", "--noAnim", help="Do not make an animation.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args )
#==========================================================#
strKey = args.strKey
figName = args.figName
fileAnim = args.fileAnim
noAnimation = args.noAnim
ylims = args.ylims
varList = args.var
fileNos, fileList = filesFromList( "*"+strKey+"*" )
print(' The varList [-v, --var] option is over ridden at this point. ')
print(' Reading coordinate values from file {} ...'.format( fileList[0]) )
coordList = [ 'arc_length', 'Points:0', 'Points:1', 'Points:2']
xv = extractFromCSV( fileList[0] , coordList )
s = xv[0].copy() # arc_length
x = xv[1].copy(); y = xv[2].copy(); z = xv[3].copy()
xv = None
print(' Done.\n')
# -------------------------------------------------------- #
print(' Computing the mean velocity values ... ')
varList = ['u', 'v', 'w']
Ux_mean = None; Uy_mean = None; Uz_mean = None
n = 0
for fn in fileNos:
n += 1
#pfig = pl.figure(num=1, figsize=(18.,9.))
tv = extractFromCSV( fileList[fn] , varList )
u = tv[0].copy(); v = tv[1].copy(); w = tv[2].copy()
tv = None
# The velocity data may contain nan entries, which should be replaced by 0.
u[np.isnan(u)] = 0.; v[np.isnan(v)] = 0.; w[np.isnan(w)] = 0.
# Accumulate sums for mean values.
if( Ux_mean == None ):
Ux_mean = np.zeros( u.shape ) # Initialize
Uy_mean = np.zeros( u.shape )
Uz_mean = np.zeros( u.shape )
Ux_mean += u; Uy_mean += v; Uz_mean += w
# Use the sums to compute the mean values.
Ux_mean /= float(n); Uy_mean /= float(n); Uz_mean /= float(n)
print(' Done.\n')
# -------------------------------------------------------- #
print(' Extract directional data from the approach line ... ')
#pfig = plotCSV( pfig, fileList[fn], args.yx, args.magy, args.reuse )
rad2deg = 180./np.pi
deg2rad = np.pi/180.
# Starting point: Rissala's approach line (from Paraview)
p1 = np.array([ x[0], y[0], z[0] ]) # np.array([6800., 1250., 0.])
p2 = np.array([ x[-1],y[-1],z[-1] ]) # np.array([7700., 650., 72.])
da = p2 - p1
da_mag = np.sqrt( np.sum( da**2 ) )
da_xy = np.sqrt( np.sum( da[0:2]**2))
# Approach direction (normal vector)
na = da/da_mag
# Sharp angle between the runway and the mean wind
theta = np.arccos( da[0]/da_xy )
print(' Sharp angle between the runway and the mean wind: theta = {} deg'.format( theta*rad2deg ))
print(' Done.\n')
# -------------------------------------------------------- #
# Hornet's approach speed and velocity
Uappr_mag = 69.
Ua = Uappr_mag*na
# Mean headwind
Uhw_mean = Ux_mean * np.cos( theta ) - Uy_mean * np.sin( theta )
# Speed relative to the ground ... perhaps not needed.
U_grd = Uappr_mag - Uhw_mean
# Approach angle
gamma = np.arctan( da[2]/da_xy )
# F18 Data:
rho = 1.2 # standard air
CL = 1.2 # at 7deg angle of attack
CLa = 2.86 # 1/rad (alpha in range [3deg, 10deg])
Aref=18.*3.
K = 0.5*rho*Aref
# Extract deviations in the headwind and compute the changes in AoA [alpha].
Lift = K*Uappr_mag**2*CL
n = 0
dL_max = 0.
dL_sum = 0.
dL_mxv = 0. # Maximum variance.
dL_p2p_max = 0.
dL_p2p_min = 0.
for fn in fileNos:
n += 1
#pfig = pl.figure(num=1, figsize=(18.,9.))
tv = extractFromCSV( fileList[fn] , varList ) # NOTE: varList = ['u', 'v', 'w']
du = tv[0]-Ux_mean
dv = tv[1]-Uy_mean
dw = tv[2]-Uz_mean # Uz_mean could be replaced by 0.
tv = None
# The velocity data may contain nan entries, which should be replaced by 0.
du[np.isnan(du)] = 0.; dv[np.isnan(dv)] = 0.; dw[np.isnan(dw)] = 0.
dU_hw = du * np.cos( theta ) - dv * np.sin( theta )
dalpha = np.arctan( dw/Uappr_mag)
# Change in lift due to changes in AoA:
dL_a = K*Uappr_mag**2*CLa*dalpha
# Change in lift due to changes in head wind.
dL_u = 2.*K*CL*Uappr_mag*dU_hw
dLp_a = dL_a/Lift * 100. # In percentage
dLp_u = dL_u/Lift * 100.
dLp_mag= np.sqrt( (dLp_a+dLp_u)**2 )
#fig = plt.figure(num=1, figsize=(18,9))
fig, (ax1, ax2) = plt.subplots(num=1, nrows=2, sharex=True, figsize=(18,11))
lines11,=ax1.plot( s,dLp_a,'-o', linewidth=1.6 )
lines12,=ax1.plot( s,dLp_u,'-o', linewidth=1.6 )
ax1.legend( (lines11,lines12) , ('dL(alpha) [%]',' dL(u) [%]'), loc=1 )
ax1.set_ylim([-8., 8.])
ax1.set_xlim([ min(s) , 1.05*max(s)]) # s: arc_length
ax1.set_title(' Changes in Lift due to turbulence ', fontsize=22)
ax1.set_ylabel(' dL [%] ', fontsize=22); ax1.grid(True)
lines2,=ax2.plot(s,dLp_mag,'-ro', linewidth=1.6 )
ax2.legend( (lines2,) , (' ABS(SUM(dL)) [%]',), loc=1 )
ax2.set_xlim([ min(s) , 1.05*max(s)]) # s: arc_length
ax2.set_ylim([-1., 12.5]); ax2.set_xlim([ min(s) , max(s)])
ax2.set_xlabel(' Distance along approach line [m] ', fontsize=22 )
ax2.set_ylabel(' dL [%] ', fontsize=22 ); ax2.grid(True)
# Maximum variance
dL_ivar = np.var( dLp_mag[ du > 0 ] ) # Consider only nonzero values.
if( dL_ivar > dL_mxv ): dL_mxv = dL_ivar
# Mean variance
dL_sum += dL_ivar
dL_var = dL_sum/float(n)
dL_imax = np.max(dLp_mag)
if( dL_imax > dL_max ): dL_max = dL_imax
dL_ip2p_mx, dL_ip2p_mn = p2pMaxMin( (dLp_a+dLp_u) )
if( dL_ip2p_mx > dL_p2p_max ): dL_p2p_max = dL_ip2p_mx
if( dL_ip2p_mn < dL_p2p_min ): dL_p2p_min = dL_ip2p_mn
infoStr =' Time = {:4d}s\n'.format((n-1)*2)
infoStr +=' Current P2P(dL) [max,min] = [{:4.1f}% , {:4.1f}%]\n'.format(dL_ip2p_mx, dL_ip2p_mn)
infoStr +=' Running P2P(dL) [max,min] = [{:4.1f}% , {:4.1f}%]\n'.format(dL_p2p_max, dL_p2p_min)
#infoStr +=' Max(dL) = {:4.1f}%\n'.format(dL_imax)
infoStr +=' Running Max(dL) = {:4.1f}%\n'.format(dL_max)
#infoStr +=' Var(dL) = {:4.1f}%\n'.format(dL_ivar)
infoStr +=' Running Mean(Var(dL)) = {:4.1f}%\n'.format(dL_var)
infoStr +=' Running Max(Var(dL)) = {:4.1f}%\n'.format(dL_mxv)
plt.text( 1. , 5.5, infoStr , fontsize=20)
figStr = '{}_{:04d}.jpg'.format(figName,n)
print(' Saving figure {} '.format(figStr))
fig.savefig(figStr)
ax1.cla(); ax2.cla(); fig.clf()
if( not noAnimation ):
cmd = 'convert {}_* {} '.format(figName,fileAnim)
print ' Executing command: ${}'.format(cmd)
sb.call(cmd, shell=True)
print(' All Done! ')
| mjsauvinen/P4UL | pyAnalyze/approachAnalysis.py | Python | mit | 8,460 | [
"ParaView"
] | 15bfcb17829ba3b07320cb77f5553c2dd10611567021b4ac19569f19551926c8 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='django-less-styleguide',
version='0.0.1',
url='https://github.com/a-musing-moose/django-less-styleguide',
author="Jonathan Moss",
author_email="jonathan.moss@tangentone.com.au",
description="A Style Guide Generator for LESS files",
long_description=open('README.rst').read(),
keywords="LESS, CSS, Style Guide",
license='BSD',
platforms=['linux'],
packages=find_packages(),
install_requires=[],
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python']
)
| a-musing-moose/django-less-styleguide | setup.py | Python | bsd-3-clause | 924 | [
"MOOSE"
] | 26ae686d26fec89ac887ee61f1bc9f9cdc16d17d45bcae69ffd23381c43eea0d |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from mooseutils import message
from PyQt5 import QtCore
def setAppInformation(app_name="peacock", force=False):
"""
Set the application depending on whether we are testing
"""
QtCore.QCoreApplication.setOrganizationName("IdahoLab")
QtCore.QCoreApplication.setOrganizationDomain("inl.gov")
if message.MOOSE_TESTING_MODE:
if force or not QtCore.QCoreApplication.applicationName():
# We don't want to override the application name if it is already
# set since PeacockApp will set it to "peacock_peacockapp" and
# we want the names to be more unique.
QtCore.QCoreApplication.setApplicationName("test_%s" % app_name)
else:
QtCore.QCoreApplication.setApplicationName(app_name)
| harterj/moose | python/peacock/utils/qtutils.py | Python | lgpl-2.1 | 1,073 | [
"MOOSE"
] | 64723a672de1ec6a5a55e0ee8e3c0b8259774c7ab1b1b0652469732793ead795 |
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.fsl as fsl
import nipype.interfaces.c3 as c3
def create_nonlinear_register(name='nonlinear_register'):
"""
Performs non-linear registration of an input image to a template using FSL (FLIRT/FNIRT).
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
nonlinear_register : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.input_brain : string (nifti file)
File of brain to be normalized (registered)
inputspec.input_skull : string (nifti file)
File of input brain with skull
inputspec.reference_brain : string (nifti file)
Target brain file to normalize to
inputspec.reference_skull : string (nifti file)
Target brain with skull to normalize to
inputspec.fnirt_config : string (fsl fnirt config file)
Configuration file containing parameters that can be specified in fnirt
Workflow Outputs::
outputspec.output_brain : string (nifti file)
Normalizion of input brain file
outputspec.linear_xfm : string (.mat file)
Affine matrix of linear transformation of brain file
outputspec.invlinear_xfm : string
Inverse of affine matrix of linear transformation of brain file
outputspec.nonlinear_xfm : string
Nonlinear field coefficients file of nonlinear transformation
Registration Procedure:
1. Perform a linear registration to get affine transformation matrix.
2. Perform a nonlinear registration on an input file to the reference file utilizing affine
transformation from the previous step as a starting point.
3. Invert the affine transformation to provide the user a transformation (affine only) from the
space of the reference file to the input file.
Workflow Graph:
.. image:: ../images/nonlinear_register.dot.png
:width: 500
Detailed Workflow Graph:
.. image:: ../images/nonlinear_register_detailed.dot.png
:width: 500
"""
nonlinear_register = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['input_brain',
'input_skull',
'reference_brain',
'reference_skull',
'ref_mask',
'fnirt_config']),
name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=['output_brain',
'linear_xfm',
'invlinear_xfm',
'nonlinear_xfm']),
name='outputspec')
linear_reg = pe.Node(interface =fsl.FLIRT(),
name='linear_reg_0')
linear_reg.inputs.cost = 'corratio'
nonlinear_reg = pe.Node(interface=fsl.FNIRT(),
name='nonlinear_reg_1')
nonlinear_reg.inputs.fieldcoeff_file = True
nonlinear_reg.inputs.jacobian_file = True
brain_warp = pe.Node(interface=fsl.ApplyWarp(),
name='brain_warp')
inv_flirt_xfm = pe.Node(interface=fsl.utils.ConvertXFM(),
name='inv_linear_reg0_xfm')
inv_flirt_xfm.inputs.invert_xfm = True
nonlinear_register.connect(inputspec, 'input_brain',
linear_reg, 'in_file')
nonlinear_register.connect(inputspec, 'reference_brain',
linear_reg, 'reference')
nonlinear_register.connect(inputspec, 'input_skull',
nonlinear_reg, 'in_file')
nonlinear_register.connect(inputspec, 'reference_skull',
nonlinear_reg, 'ref_file')
nonlinear_register.connect(inputspec, 'ref_mask',
nonlinear_reg, 'refmask_file')
# FNIRT parameters are specified by FSL config file
# ${FSLDIR}/etc/flirtsch/TI_2_MNI152_2mm.cnf (or user-specified)
nonlinear_register.connect(inputspec, 'fnirt_config',
nonlinear_reg, 'config_file')
nonlinear_register.connect(linear_reg, 'out_matrix_file',
nonlinear_reg, 'affine_file')
nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file',
outputspec, 'nonlinear_xfm')
nonlinear_register.connect(inputspec, 'input_brain',
brain_warp, 'in_file')
nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file',
brain_warp, 'field_file')
nonlinear_register.connect(inputspec, 'reference_brain',
brain_warp, 'ref_file')
nonlinear_register.connect(brain_warp, 'out_file',
outputspec, 'output_brain')
nonlinear_register.connect(linear_reg, 'out_matrix_file',
inv_flirt_xfm, 'in_file')
nonlinear_register.connect(inv_flirt_xfm, 'out_file',
outputspec, 'invlinear_xfm')
nonlinear_register.connect(linear_reg, 'out_matrix_file',
outputspec, 'linear_xfm')
return nonlinear_register
def create_register_func_to_mni(name='register_func_to_mni'):
"""
Registers a functional scan in native space to MNI standard space. This is meant to be used
after create_nonlinear_register() has been run and relies on some of it's outputs.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
register_func_to_mni : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.func : string (nifti file)
Input functional scan to be registered to MNI space
inputspec.mni : string (nifti file)
Reference MNI file
inputspec.anat : string (nifti file)
Corresponding anatomical scan of subject
inputspec.interp : string
Type of interpolation to use ('trilinear' or 'nearestneighbour' or 'sinc')
inputspec.anat_to_mni_nonlinear_xfm : string (warp file)
Corresponding anatomical native space to MNI warp file
inputspec.anat_to_mni_linear_xfm : string (mat file)
Corresponding anatomical native space to MNI mat file
Workflow Outputs::
outputspec.func_to_anat_linear_xfm : string (mat file)
Affine transformation from functional to anatomical native space
outputspec.func_to_mni_linear_xfm : string (mat file)
Affine transformation from functional to MNI space
outputspec.mni_to_func_linear_xfm : string (mat file)
Affine transformation from MNI to functional space
outputspec.mni_func : string (nifti file)
Functional scan registered to MNI standard space
Workflow Graph:
.. image:: ../images/register_func_to_mni.dot.png
:width: 500
Detailed Workflow Graph:
.. image:: ../images/register_func_to_mni_detailed.dot.png
:width: 500
"""
register_func_to_mni = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
'mni',
'anat',
'interp',
'anat_to_mni_nonlinear_xfm',
'anat_to_mni_linear_xfm']),
name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm',
'func_to_mni_linear_xfm',
'mni_to_func_linear_xfm',
'mni_func']),
name='outputspec')
linear_reg = pe.Node(interface=fsl.FLIRT(),
name='linear_func_to_anat')
linear_reg.inputs.cost = 'corratio'
linear_reg.inputs.dof = 6
mni_warp = pe.Node(interface=fsl.ApplyWarp(),
name='mni_warp')
mni_affine = pe.Node(interface=fsl.ConvertXFM(),
name='mni_affine')
mni_affine.inputs.concat_xfm = True
register_func_to_mni.connect(linear_reg, 'out_matrix_file',
mni_affine, 'in_file2')
register_func_to_mni.connect(inputspec, 'anat_to_mni_linear_xfm',
mni_affine, 'in_file')
register_func_to_mni.connect(mni_affine, 'out_file',
outputspec, 'func_to_mni_linear_xfm')
inv_mni_affine = pe.Node(interface=fsl.ConvertXFM(),
name='inv_mni_affine')
inv_mni_affine.inputs.invert_xfm = True
register_func_to_mni.connect(mni_affine, 'out_file',
inv_mni_affine, 'in_file')
register_func_to_mni.connect(inv_mni_affine, 'out_file',
outputspec, 'mni_to_func_linear_xfm')
register_func_to_mni.connect(inputspec, 'func',
linear_reg, 'in_file')
register_func_to_mni.connect(inputspec, 'anat',
linear_reg, 'reference')
register_func_to_mni.connect(inputspec, 'interp',
linear_reg, 'interp')
register_func_to_mni.connect(inputspec, 'func',
mni_warp, 'in_file')
register_func_to_mni.connect(inputspec, 'mni',
mni_warp, 'ref_file')
register_func_to_mni.connect(inputspec, 'anat_to_mni_nonlinear_xfm',
mni_warp, 'field_file')
register_func_to_mni.connect(linear_reg, 'out_matrix_file',
mni_warp, 'premat')
register_func_to_mni.connect(linear_reg, 'out_matrix_file',
outputspec, 'func_to_anat_linear_xfm')
register_func_to_mni.connect(mni_warp, 'out_file',
outputspec, 'mni_func')
return register_func_to_mni
def create_register_func_to_anat(name='register_func_to_anat'):
"""
Registers a functional scan in native space to anatomical space using a
linear transform and does not include bbregister.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
create_register_func_to_anat : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.func : string (nifti file)
Input functional scan to be registered to anatomical space
inputspec.anat : string (nifti file)
Corresponding anatomical scan of subject
inputspec.interp : string
Type of interpolation to use ('trilinear' or 'nearestneighbour' or 'sinc')
Workflow Outputs::
outputspec.func_to_anat_linear_xfm_nobbreg : string (mat file)
Affine transformation from functional to anatomical native space
outputspec.anat_func_nobbreg : string (nifti file)
Functional scan registered to anatomical space
"""
register_func_to_anat = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
'anat',
'interp']),
name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm_nobbreg',
#'func_to_mni_linear_xfm',
#'mni_to_func_linear_xfm',
#'anat_wm_edge',
'anat_func_nobbreg']),
name='outputspec')
linear_reg = pe.Node(interface=fsl.FLIRT(),
name='linear_func_to_anat')
linear_reg.inputs.cost = 'corratio'
linear_reg.inputs.dof = 6
register_func_to_anat.connect(inputspec, 'func',
linear_reg, 'in_file')
register_func_to_anat.connect(inputspec, 'anat',
linear_reg, 'reference')
register_func_to_anat.connect(inputspec, 'interp',
linear_reg, 'interp')
register_func_to_anat.connect(linear_reg, 'out_matrix_file',
outputspec, 'func_to_anat_linear_xfm_nobbreg')
register_func_to_anat.connect(linear_reg, 'out_file',
outputspec, 'anat_func_nobbreg')
return register_func_to_anat
def create_bbregister_func_to_anat(name='bbregister_func_to_anat'):
"""
Registers a functional scan in native space to structural. This is meant to be used
after create_nonlinear_register() has been run and relies on some of it's outputs.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
register_func_to_anat : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.func : string (nifti file)
Input functional scan to be registered to MNI space
inputspec.anat_skull : string (nifti file)
Corresponding full-head scan of subject
inputspec.linear_reg_matrix : string (mat file)
Affine matrix from linear functional to anatomical registration
inputspec.anat_wm_segmentation : string (nifti file)
White matter segmentation probability mask in anatomical space
inputspec.bbr_schedule : string (.sch file)
Boundary based registration schedule file for flirt command
Workflow Outputs::
outputspec.func_to_anat_linear_xfm : string (mat file)
Affine transformation from functional to anatomical native space
outputspec.anat_func : string (nifti file)
Functional data in anatomical space
"""
register_bbregister_func_to_anat = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
'anat_skull',
'linear_reg_matrix',
'anat_wm_segmentation',
'bbr_schedule']),
name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm',
#'func_to_mni_linear_xfm',
#'mni_to_func_linear_xfm',
#'anat_wm_edge',
'anat_func']),
name='outputspec')
wm_bb_mask = pe.Node(interface=fsl.ImageMaths(),
name='wm_bb_mask')
wm_bb_mask.inputs.op_string = '-thr 0.5 -bin'
register_bbregister_func_to_anat.connect(inputspec, 'anat_wm_segmentation',
wm_bb_mask, 'in_file')
def wm_bb_edge_args(mas_file):
return '-edge -bin -mas ' + mas_file
#wm_bb_edge = pe.Node(interface=fsl.ImageMaths(),
# name='wm_bb_edge')
#register_func_to_mni.connect(wm_bb_mask, 'out_file',
# wm_bb_edge, 'in_file')
#register_func_to_mni.connect(wm_bb_mask, ('out_file', wm_bb_edge_args),
# wm_bb_edge, 'op_string')
def bbreg_args(bbreg_target):
return '-cost bbr -wmseg ' + bbreg_target
bbreg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name='bbreg_func_to_anat')
bbreg_func_to_anat.inputs.dof = 6
register_bbregister_func_to_anat.connect(inputspec, 'bbr_schedule',
bbreg_func_to_anat, 'schedule')
register_bbregister_func_to_anat.connect(wm_bb_mask, ('out_file', bbreg_args),
bbreg_func_to_anat, 'args')
register_bbregister_func_to_anat.connect(inputspec, 'func',
bbreg_func_to_anat, 'in_file')
register_bbregister_func_to_anat.connect(inputspec, 'anat_skull',
bbreg_func_to_anat, 'reference')
register_bbregister_func_to_anat.connect(inputspec, 'linear_reg_matrix',
bbreg_func_to_anat, 'in_matrix_file')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_matrix_file',
outputspec, 'func_to_anat_linear_xfm')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_file',
outputspec, 'anat_func')
#register_func_to_mni.connect(wm_bb_edge, 'out_file',
# outputspec, 'anat_wm_edge')
return register_bbregister_func_to_anat
def create_wf_calculate_ants_warp(name='create_wf_calculate_ants_warp', mult_input=0):
'''
Calculates the nonlinear ANTS registration transform. This workflow
employs the antsRegistration tool:
http://stnava.github.io/ANTs/
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
calc_ants_warp_wf : nipype.pipeline.engine.Workflow
Notes
-----
Some of the inputs listed below are lists or lists of lists. This is
because antsRegistration can perform multiple stages of calculations
depending on how the user configures their registration.
For example, if one wants to employ a different metric (with different
parameters) at each stage, the lists would be configured like this:
warp_wf.inputs.inputspec.transforms = ['Rigid','Affine','SyN']
warp_wf.inputs.inputspec.transform_parameters = [[0.1],[0.1],[0.1,3,0]]
..where each element in the first list is a metric to be used at each
stage, 'Rigid' being for stage 1, 'Affine' for stage 2, etc. The lists
within the list for transform_parameters would then correspond to each
stage's metric, with [0.1] applying to 'Rigid' and 'Affine' (stages 1 and
2), and [0.1,3,0] applying to 'SyN' of stage 3.
In some cases, when a parameter is not needed for a stage, 'None' must be
entered in its place if there are other parameters for other stages.
Workflow Inputs::
inputspec.anatomical_brain : string (nifti file)
File of brain to be normalized (registered)
inputspec.reference_brain : string (nifti file)
Target brain file to normalize to
inputspec.dimension : integer
Dimension of the image (default: 3)
inputspec.use_histogram_matching : boolean
Histogram match the images before registration
inputspec.winsorize_lower_quantile : float
Winsorize data based on quantiles (lower range)
inputspec.winsorize_higher_quantile : float
Winsorize data based on quantiles (higher range)
inputspec.metric : list of strings
Image metric(s) to be used at each stage
inputspec.metric_weight : list of floats
Modulate the per-stage weighting of the corresponding metric
inputspec.radius_or_number_of_bins : list of integers
Number of bins in each stage for the MI and Mattes metric, the
radius for other metrics
inputspec.sampling_strategy : list of strings
Sampling strategy (or strategies) to use for the metrics
{None, Regular, or Random}
inputspec.sampling_percentage : list of floats
Defines the sampling strategy
{float value, or None}
inputspec.number_of_iterations : list of lists of integers
Determines the convergence
inputspec.convergence_threshold : list of floats
Threshold compared to the slope of the line fitted in convergence
inputspec.convergence_window_size : list of integers
Window size of convergence calculations
inputspec.transforms : list of strings
Selection of transform options. See antsRegistration documentation
for a full list of options and their descriptions
inputspec.transform_parameters : list of lists of floats
Fine-tuning for the different transform options
inputspec.shrink_factors : list of lists of integers
Specify the shrink factor for the virtual domain (typically the
fixed image) at each level
inputspec.smoothing_sigmas : list of lists of floats
Specify the sigma of gaussian smoothing at each level
Workflow Outputs::
outputspec.warp_field : string (nifti file)
Output warp field of registration
outputspec.inverse_warp_field : string (nifti file)
Inverse of the warp field of the registration
outputspec.ants_affine_xfm : string (.mat file)
The affine matrix of the registration
outputspec.ants_inverse_affine_xfm : string (.mat file)
The affine matrix of the reverse registration
outputspec.composite_transform : string (nifti file)
The combined transform including the warp field and rigid & affine
linear warps
outputspec.normalized_output_brain : string (nifti file)
Template-registered version of input brain
Registration Procedure:
1. Calculates a nonlinear anatomical-to-template registration.
Workflow Graph:
.. image::
:width: 500
Detailed Workflow Graph:
.. image::
:width: 500
'''
import nipype.interfaces.ants as ants
from nipype.interfaces.utility import Function
from CPAC.registration.utils import seperate_warps_list, \
combine_inputs_into_list, \
hardcoded_reg
calc_ants_warp_wf = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['anatomical_brain',
'reference_brain', 'dimension', 'use_histogram_matching',
'winsorize_lower_quantile', 'winsorize_upper_quantile', 'metric',
'metric_weight', 'radius_or_number_of_bins', 'sampling_strategy',
'sampling_percentage', 'number_of_iterations',
'convergence_threshold', 'convergence_window_size', 'transforms',
'transform_parameters', 'shrink_factors', 'smoothing_sigmas',
'write_composite_transform', 'anatomical_skull',
'reference_skull']), name='inputspec')#,'wait']),name='inputspec')
# use ANTS to warp the masked anatomical image to a template image
'''
calculate_ants_warp = pe.Node(interface=ants.Registration(),
name='calculate_ants_warp')
calculate_ants_warp.inputs.output_warped_image = True
calculate_ants_warp.inputs.initial_moving_transform_com = 0
'''
calculate_ants_warp = pe.Node(interface=util.Function(input_names=['anatomical_brain', 'reference_brain', 'anatomical_skull', 'reference_skull', 'wait'], output_names=['warp_list', 'warped_image'], function=hardcoded_reg), name='calc_ants_warp')
select_forward_initial = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_initial')
select_forward_initial.inputs.selection = "Initial"
select_forward_rigid = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_rigid')
select_forward_rigid.inputs.selection = "Rigid"
select_forward_affine = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_affine')
select_forward_affine.inputs.selection = "Affine"
select_forward_warp = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_warp')
select_forward_warp.inputs.selection = "3Warp"
select_inverse_warp = pe.Node(util.Function(input_names=['warp_list',
'selection'], output_names=['selected_warp'],
function=seperate_warps_list), name='select_inverse_warp')
select_inverse_warp.inputs.selection = "Inverse"
outputspec = pe.Node(util.IdentityInterface(fields=['ants_initial_xfm',
'ants_rigid_xfm', 'ants_affine_xfm', 'warp_field',
'inverse_warp_field', 'composite_transform', 'wait',
'normalized_output_brain']), name='outputspec')
# connections from inputspec
if mult_input == 1:
'''
combine_inputs = pe.Node(util.Function(input_names=['input1', 'input2', 'input3'],
output_names=['inputs_list'], function=combine_inputs_into_list),
name='ants_reg_combine_inputs')
combine_refs = pe.Node(util.Function(input_names=['input1', 'input2', 'input3'],
output_names=['inputs_list'], function=combine_inputs_into_list),
name='ants_reg_combine_refs')
'''
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
calculate_ants_warp, 'anatomical_brain')
calc_ants_warp_wf.connect(inputspec, 'anatomical_skull',
calculate_ants_warp, 'anatomical_skull')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
calculate_ants_warp, 'reference_brain')
calc_ants_warp_wf.connect(inputspec, 'reference_skull',
calculate_ants_warp, 'reference_skull')
#calc_ants_warp_wf.connect(inputspec, 'wait',
# calculate_ants_warp, 'wait')
'''
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
combine_inputs, 'input1')
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
combine_inputs, 'input2')
calc_ants_warp_wf.connect(inputspec, 'anatomical_skull',
combine_inputs, 'input3')
calc_ants_warp_wf.connect(combine_inputs, 'inputs_list',
calculate_ants_warp, 'moving_image')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
combine_refs, 'input1')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
combine_refs, 'input2')
calc_ants_warp_wf.connect(inputspec, 'reference_skull',
combine_refs, 'input3')
calc_ants_warp_wf.connect(combine_refs, 'inputs_list',
calculate_ants_warp, 'fixed_image')
'''
else:
'''
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
calculate_ants_warp, 'moving_image')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
calculate_ants_warp, 'fixed_image')
'''
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
calculate_ants_warp, 'anatomical_brain')
calc_ants_warp_wf.connect(inputspec, 'anatomical_brain',
calculate_ants_warp, 'anatomical_skull')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
calculate_ants_warp, 'reference_brain')
calc_ants_warp_wf.connect(inputspec, 'reference_brain',
calculate_ants_warp, 'reference_skull')
#calc_ants_warp_wf.connect(inputspec, 'wait',
# calculate_ants_warp, 'wait')
calc_ants_warp_wf.connect(inputspec, 'dimension', calculate_ants_warp,
'dimension')
calc_ants_warp_wf.connect(inputspec, 'use_histogram_matching',
calculate_ants_warp, 'use_histogram_matching')
calc_ants_warp_wf.connect(inputspec, 'winsorize_lower_quantile',
calculate_ants_warp, 'winsorize_lower_quantile')
calc_ants_warp_wf.connect(inputspec, 'winsorize_upper_quantile',
calculate_ants_warp, 'winsorize_upper_quantile')
calc_ants_warp_wf.connect(inputspec, 'metric', calculate_ants_warp,
'metric')
calc_ants_warp_wf.connect(inputspec, 'metric_weight', calculate_ants_warp,
'metric_weight')
calc_ants_warp_wf.connect(inputspec, 'radius_or_number_of_bins',
calculate_ants_warp, 'radius_or_number_of_bins')
calc_ants_warp_wf.connect(inputspec, 'sampling_strategy',
calculate_ants_warp, 'sampling_strategy')
calc_ants_warp_wf.connect(inputspec, 'sampling_percentage',
calculate_ants_warp, 'sampling_percentage')
calc_ants_warp_wf.connect(inputspec, 'number_of_iterations',
calculate_ants_warp, 'number_of_iterations')
calc_ants_warp_wf.connect(inputspec, 'convergence_threshold',
calculate_ants_warp, 'convergence_threshold')
calc_ants_warp_wf.connect(inputspec, 'convergence_window_size',
calculate_ants_warp, 'convergence_window_size')
calc_ants_warp_wf.connect(inputspec, 'transforms', calculate_ants_warp,
'transforms')
calc_ants_warp_wf.connect(inputspec, 'transform_parameters',
calculate_ants_warp, 'transform_parameters')
calc_ants_warp_wf.connect(inputspec, 'shrink_factors',
calculate_ants_warp, 'shrink_factors')
calc_ants_warp_wf.connect(inputspec, 'smoothing_sigmas',
calculate_ants_warp, 'smoothing_sigmas')
calc_ants_warp_wf.connect(inputspec, 'write_composite_transform',
calculate_ants_warp, 'write_composite_transform')
# inter-workflow connections
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_forward_initial, 'warp_list')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_forward_rigid, 'warp_list')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_forward_affine, 'warp_list')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_forward_warp, 'warp_list')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warp_list',
select_inverse_warp, 'warp_list')
# connections to outputspec
calc_ants_warp_wf.connect(select_forward_initial, 'selected_warp',
outputspec, 'ants_initial_xfm')
calc_ants_warp_wf.connect(select_forward_rigid, 'selected_warp',
outputspec, 'ants_rigid_xfm')
calc_ants_warp_wf.connect(select_forward_affine, 'selected_warp',
outputspec, 'ants_affine_xfm')
calc_ants_warp_wf.connect(select_forward_warp, 'selected_warp',
outputspec, 'warp_field')
calc_ants_warp_wf.connect(select_inverse_warp, 'selected_warp',
outputspec, 'inverse_warp_field')
calc_ants_warp_wf.connect(calculate_ants_warp, 'warped_image',
outputspec, 'normalized_output_brain')
# calc_ants_warp_wf.connect(inputspec, 'wait',
# outputspec, 'wait')
return calc_ants_warp_wf
def create_wf_apply_ants_warp(map_node, name='create_wf_apply_ants_warp'):
"""
Applies previously calculated ANTS registration transforms to input
images. This workflow employs the antsApplyTransforms tool:
http://stnava.github.io/ANTs/
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
apply_ants_warp_wf : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.input_image : string (nifti file)
Image file of brain to be registered to reference
inputspec.reference_image : string (nifti file)
Image file of brain or template being used as a reference
inputspec.transforms : list of filepaths (nifti, .mat, .txt)
List of transforms and warps to be applied to the input image
inputspec.dimension : integer
Dimension value of image being registered (2, 3, or 4)
inputspec.interpolation : string
Type of interpolation to be used. See antsApplyTransforms
documentation or Nipype interface documentation for options
Workflow Outputs::
outputspec.output_image : string (nifti file)
Normalized output file
Workflow Graph:
.. image::
:width: 500
Detailed Workflow Graph:
.. image::
:width: 500
"""
import nipype.interfaces.ants as ants
apply_ants_warp_wf = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['input_image',
'reference_image', 'transforms', 'dimension', 'input_image_type',
'interpolation']), name='inputspec')
if map_node == 0:
apply_ants_warp = pe.Node(interface=ants.ApplyTransforms(),
name='apply_ants_warp')
elif map_node == 1:
apply_ants_warp = pe.MapNode(interface=ants.ApplyTransforms(),
name='apply_ants_warp_mapnode', iterfield=['input_image', \
'transforms'])
apply_ants_warp.inputs.out_postfix = '_antswarp'
outputspec = pe.Node(util.IdentityInterface(fields=['output_image']),
name='outputspec')
# connections from inputspec
apply_ants_warp_wf.connect(inputspec, 'input_image', apply_ants_warp,
'input_image')
apply_ants_warp_wf.connect(inputspec, 'reference_image', apply_ants_warp,
'reference_image')
apply_ants_warp_wf.connect(inputspec, 'transforms', apply_ants_warp,
'transforms')
apply_ants_warp_wf.connect(inputspec, 'dimension', apply_ants_warp,
'dimension')
apply_ants_warp_wf.connect(inputspec, 'input_image_type', apply_ants_warp,
'input_image_type')
apply_ants_warp_wf.connect(inputspec, 'interpolation', apply_ants_warp,
'interpolation')
# connections to outputspec
apply_ants_warp_wf.connect(apply_ants_warp, 'output_image',
outputspec, 'output_image')
return apply_ants_warp_wf
def create_wf_c3d_fsl_to_itk(map_node, input_image_type=0, name='create_wf_c3d_fsl_to_itk'):
"""
Converts an FSL-format output matrix to an ITK-format (ANTS) matrix
for use with ANTS registration tools.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
fsl_to_itk_conversion : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.affine_file : string (nifti file)
Output matrix of FSL-based functional to anatomical registration
inputspec.reference_file : string (nifti file)
File of skull-stripped anatomical brain to be used in affine
conversion
inputspec.source_file : string (nifti file)
Should match the input of the apply warp (in_file) unless you are
applying the warp to a 4-d file, in which case this file should
be a mean_functional file
Workflow Outputs::
outputspec.itk_transform : string (nifti file)
Converted affine transform in ITK format usable with ANTS
"""
import nipype.interfaces.c3 as c3
from nipype.interfaces.utility import Function
from CPAC.registration.utils import change_itk_transform_type
from nipype.interfaces.afni import preprocess
fsl_to_itk_conversion = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['affine_file',
'reference_file', 'source_file']), name='inputspec')
# converts FSL-format .mat affine xfm into ANTS-format .txt
# .mat affine comes from Func->Anat registration
if map_node == 0:
fsl_reg_2_itk = pe.Node(c3.C3dAffineTool(), name='fsl_reg_2_itk')
elif map_node == 1:
fsl_reg_2_itk = pe.MapNode(c3.C3dAffineTool(),
name='fsl_reg_2_itk_mapnode', iterfield=['source_file'])
fsl_reg_2_itk.inputs.itk_transform = True
fsl_reg_2_itk.inputs.fsl2ras = True
if map_node == 0:
change_transform = pe.Node(util.Function(\
input_names=['input_affine_file'],
output_names=['updated_affine_file'],
function=change_itk_transform_type),
name='change_transform_type')
elif map_node == 1:
change_transform = pe.MapNode(util.Function(\
input_names=['input_affine_file'],
output_names=['updated_affine_file'],
function=change_itk_transform_type),
name='change_transform_type', iterfield=['input_affine_file'])
outputspec = pe.Node(util.IdentityInterface(fields=['itk_transform']),
name='outputspec')
fsl_to_itk_conversion.connect(inputspec, 'affine_file', fsl_reg_2_itk,
'transform_file')
fsl_to_itk_conversion.connect(inputspec, 'reference_file', fsl_reg_2_itk,
'reference_file')
# source_file input of the conversion must be a 3D file, so if the source
# file is 4D (input_image_type=3), average it into a 3D file first
if input_image_type == 0:
fsl_to_itk_conversion.connect(inputspec, 'source_file', fsl_reg_2_itk,
'source_file')
elif input_image_type == 3:
tstat_source = pe.Node(interface=preprocess.TStat(),
name='fsl_to_itk_tcat_source')
tstat_source.inputs.outputtype = 'NIFTI_GZ'
tstat_source.inputs.options = '-mean'
fsl_to_itk_conversion.connect(inputspec, 'source_file', tstat_source,
'in_file')
fsl_to_itk_conversion.connect(tstat_source, 'out_file', fsl_reg_2_itk,
'source_file')
fsl_to_itk_conversion.connect(fsl_reg_2_itk, 'itk_transform',
change_transform, 'input_affine_file')
fsl_to_itk_conversion.connect(change_transform, 'updated_affine_file',
outputspec, 'itk_transform')
return fsl_to_itk_conversion
def create_wf_collect_transforms(map_node, name='create_wf_collect_transforms'):
"""
DOCSTRINGS
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
collect_transforms_wf : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.transform_file : string (nifti file)
Output matrix of FSL-based functional to anatomical registration
inputspec.reference_file : string (nifti file)
File of skull-stripped anatomical brain to be used in affine
conversion
inputspec.source_file : string (nifti file)
Should match the input of the apply warp (in_file) unless you are
applying the warp to a 4-d file, in which case this file should
be a mean_functional file
Workflow Outputs::
outputspec.itk_transform : string (nifti file)
Converted affine transform in ITK format usable with ANTS
"""
collect_transforms_wf = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['warp_file',
'linear_initial', 'linear_affine', 'linear_rigid', \
'fsl_to_itk_affine']), name='inputspec')
# converts FSL-format .mat affine xfm into ANTS-format .txt
# .mat affine comes from Func->Anat registration
if map_node == 0:
collect_transforms = pe.Node(util.Merge(5), name='collect_transforms')
elif map_node == 1:
collect_transforms = pe.MapNode(util.Merge(5),
name='collect_transforms_mapnode', iterfield=['in5'])
outputspec = pe.Node(util.IdentityInterface(
fields=['transformation_series']), name='outputspec')
# Field file from anatomical nonlinear registration
collect_transforms_wf.connect(inputspec, 'warp_file', collect_transforms,
'in1')
# affine transformation from anatomical registration
collect_transforms_wf.connect(inputspec, 'linear_affine',
collect_transforms, 'in2')
# rigid transformation from anatomical registration
collect_transforms_wf.connect(inputspec, 'linear_rigid',
collect_transforms, 'in3')
# initial transformation from anatomical registration
collect_transforms_wf.connect(inputspec, 'linear_initial',
collect_transforms, 'in4')
# Premat from Func->Anat linear reg and bbreg (if bbreg is enabled)
collect_transforms_wf.connect(inputspec, 'fsl_to_itk_affine',
collect_transforms, 'in5')
collect_transforms_wf.connect(collect_transforms, 'out', outputspec,
'transformation_series')
return collect_transforms_wf
| danlurie/C-PAC | CPAC/registration/registration.py | Python | bsd-3-clause | 41,865 | [
"Gaussian"
] | 65473d04482c56b855d1ea28aef5e0b7e1f86767e8abd2b3b8929a2d8256370b |
"""
==================================================
Image Processing and Analysis (:mod:`mango.image`)
==================================================
.. currentmodule:: mango.image
This module contains various functions for 3D image
processing and analysis.
Convolution-like Filters
========================
.. autosummary::
:toctree: generated/
convolve - 3D convolution with a specified 3D kernel/weight array.
sobel - 3D Sobel image gradient magnitude
discrete_gaussian - 3D convolution with Discrete-Gaussian kernel.
discrete_gaussian_kernel - 3D Discrete-Gaussian kernel.
discrete_gaussian_gradient_kernel - 3D Discrete-Gaussian gradient kernel.
discrete_gaussian_mean_stdd - 3D Discrete-Gaussian weighted mean and standard-deviation.
discrete_gaussian_gradient_magnitude - 3D Discrete-Gaussian convolved gradient magnitude.
Neighbourhood (structuring element) Filters
===========================================
Filter Functions
----------------
.. autosummary::
:toctree: generated/
mean_filter - Average intensity over neighbourhood.
median_filter - Median intensity over neighbourhood.
stdd_filter - Standard deviation over neighbourhood.
mad_filter - Median absolute deviation over neighbourhood.
bilateral_filter - Gaussian weighted bilateral filtering.
Structuring Element Factory Functions
-------------------------------------
.. autosummary::
:toctree: generated/
se - Create an arbitrarily shaped structuring element.
sphere_se - Create a spherical structuring element.
order_se - Create a spherical structuring element (using *neighbourhood order*).
box_se - Create a rectangular/box shaped structuring element.
Structuring Element Classes
---------------------------
.. autosummary::
:toctree: generated/
StructuringElement - Arbitrarily shaped structuring element.
SphereStructuringElement - Spherical structuring element.
OrderStructuringElement - Spherical structuring element.
BoxStructuringElement - Rectangular/box shaped structuring element.
Interpolation
=============
.. autosummary::
:toctree: generated/
resample - re-sample image on specified grid.
gaussian_downsample - re-sample an image on a coarse grid.
InterpolationType - types of interpolation.
affine_transform - Applies an affine transformation to an image.
rotate - Applies a rotation transformation to an image.
Measurement
===========
.. autosummary::
:toctree: generated/
histogramdd - Calculates :samp:`d` dimensional histogram from :samp:`d` :obj:`mango.Dds` arrays.
centre_of_mass - Calculate a centre of mass coordinate for an image.
moment_of_inertia - Calculate principal moment of inertia tensor for an image.
intensity_spherical_histogram - Populate a :func:`spherical_histogram` with *intensity counts*.
distance_spherical_histogram - Populate a :func:`spherical_histogram` with *distance counts*.
intensity_mult_distance_spherical_histogram - Populate a :func:`spherical_histogram` with *intensity times distance counts*.
label - Generates an image where each connected component has a unique label.
eliminate_labels_by_size - Removes labels from a labeled image whose size (number of voxels) lies in a specified range.
convex_hull_2d - Calculates per-slice convex hull of non-masked voxels in an image.
convex_hull_3d - Calculates convex hull of non-masked voxels in an image.
Morphology
==========
.. autosummary::
:toctree: generated/
distance_transform_edt - Calculates Euclidean distance transform.
max_covering_radius - Calculates maximal covering sphere transform of a Euclidean distance transform image.
Miscellaneous
=============
.. autosummary::
:toctree: generated/
crop - Crop an image.
auto_crop - Crop an image to minimal bounding box of non-masked values.
subset - Crop an image (same as :func:`crop`).
subsample - sample image on a regular sub-grid.
gather_slice - copy a 2D slice from a 3D :obj:`mango.Dds` to a single MPI process.
SphericalHistogram - Histogram of triangulated sphere surface (triangular bins).
spherical_histogram - Factory method for creating a :obj:`SphericalHistogram` instance.
"""
from ._dds_open_filters import *
import mango
if (mango.haveRegistration):
from . import registration
from .registration import affine_transform, rotate, rotation_matrix
from ._utils import *
__all__ = [s for s in dir() if not s.startswith('_')]
if (mango.haveRestricted):
from ._filters import *
from ._DiscreteGaussian import *
from ._FmmImage import *
| pymango/pymango | misc/python/mango/image/__init__.py | Python | bsd-2-clause | 4,612 | [
"Gaussian"
] | 59935aa95df25610a59cf650a6057fff108ebcfdafe010e3280a3e377959fb08 |
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# This file is part of PyPosAST.
# Please, consult the license terms in the LICENSE file.
"""PyPosAST Module"""
from __future__ import (absolute_import, division)
import ast
from .visitor import LineProvenanceVisitor as Visitor, extract_code
from .cross_version import native_decode_source, decode_source_to_unicode
def parse(code, filename='<unknown>', mode='exec', tree=None, **parse_args):
"""Parse the source into an AST node with PyPosAST.
Enhance nodes with positions
Arguments:
code -- code text
Keyword Arguments:
filename -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized
"""
visitor = Visitor(code, filename, mode, tree=tree, **parse_args)
return visitor.tree
class _GetVisitor(ast.NodeVisitor):
"""Visit nodes and store them in .result if they match the given type"""
def __init__(self, tree, desired_type):
self.desired_type = desired_type
self.result = []
self.visit(tree)
def generic_visit(self, node):
if isinstance(node, self.desired_type):
self.result.append(node)
return ast.NodeVisitor.generic_visit(self, node)
def get_nodes(code, desired_type, path="__main__", mode="exec", tree=None, **parse_args):
"""Find all nodes of a given type
Arguments:
code -- code text
desired_type -- ast Node or tuple
Keyword Arguments:
path -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized
"""
return _GetVisitor(parse(code, path, mode, tree, **parse_args), desired_type).result
| JoaoFelipe/pyposast | pyposast/__init__.py | Python | mit | 1,704 | [
"VisIt"
] | d524399e343ecbb4bd66f515f6ccb0a779b268d23a82aeadb9e798f3b39920fa |
# Copyright 2008, 2009, 2016 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import sys
import textwrap
from distutils.spawn import find_executable
from gi.repository import Gtk, GLib
from . import Utils, Actions, Constants
from ..core import Messages
class SimpleTextDisplay(Gtk.TextView):
"""
A non user-editable gtk text view.
"""
def __init__(self, text=''):
"""
TextDisplay constructor.
Args:
text: the text to display (string)
"""
Gtk.TextView.__init__(self)
self.set_text = self.get_buffer().set_text
self.set_text(text)
self.set_editable(False)
self.set_cursor_visible(False)
self.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
class TextDisplay(SimpleTextDisplay):
"""
A non user-editable scrollable text view with popup menu.
"""
def __init__(self, text=''):
"""
TextDisplay constructor.
Args:
text: the text to display (string)
"""
SimpleTextDisplay.__init__(self, text)
self.scroll_lock = True
self.connect("populate-popup", self.populate_popup)
def insert(self, line):
"""
Append text after handling backspaces and auto-scroll.
Args:
line: the text to append (string)
"""
line = self._consume_backspaces(line)
self.get_buffer().insert(self.get_buffer().get_end_iter(), line)
self.scroll_to_end()
def _consume_backspaces(self, line):
"""
Removes text from the buffer if line starts with '\b'
Args:
line: a string which may contain backspaces
Returns:
The string that remains from 'line' with leading '\b's removed.
"""
if not line:
return
# for each \b delete one char from the buffer
back_count = 0
start_iter = self.get_buffer().get_end_iter()
while len(line) > back_count and line[back_count] == '\b':
# stop at the beginning of a line
if not start_iter.starts_line():
start_iter.backward_char()
back_count += 1
# remove chars from buffer
self.get_buffer().delete(start_iter, self.get_buffer().get_end_iter())
return line[back_count:]
def scroll_to_end(self):
""" Update view's scroll position. """
if self.scroll_lock:
buf = self.get_buffer()
mark = buf.get_insert()
buf.move_mark(mark, buf.get_end_iter())
self.scroll_mark_onscreen(mark)
def clear(self):
""" Clear all text from buffer. """
buf = self.get_buffer()
buf.delete(buf.get_start_iter(), buf.get_end_iter())
def save(self, file_path):
"""
Save context of buffer to the given file.
Args:
file_path: location to save buffer contents
"""
with open(file_path, 'w') as logfile:
buf = self.get_buffer()
logfile.write(buf.get_text(buf.get_start_iter(),
buf.get_end_iter(), True))
# Action functions are set by the Application's init function
def clear_cb(self, menu_item, web_view):
""" Callback function to clear the text buffer """
Actions.CLEAR_CONSOLE()
def scroll_back_cb(self, menu_item, web_view):
""" Callback function to toggle scroll lock """
Actions.TOGGLE_SCROLL_LOCK()
def save_cb(self, menu_item, web_view):
""" Callback function to save the buffer """
Actions.SAVE_CONSOLE()
def populate_popup(self, view, menu):
"""Create a popup menu for the scroll lock and clear functions"""
menu.append(Gtk.SeparatorMenuItem())
lock = Gtk.CheckMenuItem(label = "Scroll Lock")
menu.append(lock)
lock.set_active(self.scroll_lock)
lock.connect('activate', self.scroll_back_cb, view)
save = Gtk.ImageMenuItem(label = "Save Console")
menu.append(save)
save.connect('activate', self.save_cb, view)
clear = Gtk.ImageMenuItem(label = "Clear Console")
menu.append(clear)
clear.connect('activate', self.clear_cb, view)
menu.show_all()
return False
class MessageDialogWrapper(Gtk.MessageDialog):
""" Run a message dialog. """
def __init__(self, parent, message_type, buttons, title=None, markup=None,
default_response=None, extra_buttons=None):
"""
Create a modal message dialog.
Args:
message_type: the type of message may be one of:
Gtk.MessageType.INFO
Gtk.MessageType.WARNING
Gtk.MessageType.QUESTION or Gtk.MessageType.ERROR
buttons: the predefined set of buttons to use:
Gtk.ButtonsType.NONE
Gtk.ButtonsType.OK
Gtk.ButtonsType.CLOSE
Gtk.ButtonsType.CANCEL
Gtk.ButtonsType.YES_NO
Gtk.ButtonsType.OK_CANCEL
title: the title of the window (string)
markup: the message text with pango markup
default_response: if set, determines which button is highlighted by default
extra_buttons: a tuple containing pairs of values:
each value is the button's text and the button's return value
"""
Gtk.MessageDialog.__init__(
self, transient_for=parent, modal=True, destroy_with_parent=True,
message_type=message_type, buttons=buttons
)
if title:
self.set_title(title)
if markup:
self.set_markup(markup)
if extra_buttons:
self.add_buttons(*extra_buttons)
if default_response:
self.set_default_response(default_response)
def run_and_destroy(self):
response = self.run()
self.hide()
return response
class ErrorsDialog(Gtk.Dialog):
""" Display flowgraph errors. """
def __init__(self, parent, flowgraph):
"""Create a listview of errors"""
Gtk.Dialog.__init__(
self,
title='Errors and Warnings',
transient_for=parent,
modal=True,
destroy_with_parent=True,
)
self.add_buttons(Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT)
self.set_size_request(750, Constants.MIN_DIALOG_HEIGHT)
self.set_border_width(10)
self.store = Gtk.ListStore(str, str, str)
self.update(flowgraph)
self.treeview = Gtk.TreeView(model=self.store)
for i, column_title in enumerate(["Block", "Aspect", "Message"]):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_title, renderer, text=i)
column.set_sort_column_id(i) # liststore id matches treeview id
column.set_resizable(True)
self.treeview.append_column(column)
self.scrollable = Gtk.ScrolledWindow()
self.scrollable.set_vexpand(True)
self.scrollable.add(self.treeview)
self.vbox.pack_start(self.scrollable, True, True, 0)
self.show_all()
def update(self, flowgraph):
self.store.clear()
for element, message in flowgraph.iter_error_messages():
if element.is_block:
src, aspect = element.name, ''
elif element.is_connection:
src = element.source_block.name
aspect = "Connection to '{}'".format(element.sink_block.name)
elif element.is_port:
src = element.parent_block.name
aspect = "{} '{}'".format('Sink' if element.is_sink else 'Source', element.name)
elif element.is_param:
src = element.parent_block.name
aspect = "Param '{}'".format(element.name)
else:
src = aspect = ''
self.store.append([src, aspect, message])
def run_and_destroy(self):
response = self.run()
self.hide()
return response
def show_about(parent, config):
ad = Gtk.AboutDialog(transient_for=parent)
ad.set_program_name(config.name)
ad.set_name('')
ad.set_license(config.license)
py_version = sys.version.split()[0]
ad.set_version("{} (Python {})".format(config.version, py_version))
try:
ad.set_logo(Gtk.IconTheme().load_icon('gnuradio-grc', 64, 0))
except GLib.Error:
Messages.send("Failed to set window logo\n")
#ad.set_comments("")
ad.set_copyright(config.license.splitlines()[0])
ad.set_website(config.website)
ad.connect("response", lambda action, param: action.hide())
ad.show()
def show_help(parent):
""" Display basic usage tips. """
markup = textwrap.dedent("""\
<b>Usage Tips</b>
\n\
<u>Add block</u>: drag and drop or double click a block in the block
selection window.
<u>Rotate block</u>: Select a block, press left/right on the keyboard.
<u>Change type</u>: Select a block, press up/down on the keyboard.
<u>Edit parameters</u>: double click on a block in the flow graph.
<u>Make connection</u>: click on the source port of one block, then
click on the sink port of another block.
<u>Remove connection</u>: select the connection and press delete, or
drag the connection.
\n\
*Press Ctrl+K or see menu for Keyboard - Shortcuts
\
""")
markup = markup.replace("Ctrl", Utils.get_modifier_key())
MessageDialogWrapper(
parent, Gtk.MessageType.INFO, Gtk.ButtonsType.CLOSE, title='Help', markup=markup
).run_and_destroy()
def show_keyboard_shortcuts(parent):
""" Display keyboard shortcut-keys. """
markup = textwrap.dedent("""\
<b>Keyboard Shortcuts</b>
\n\
<u>Ctrl+N</u>: Create a new flowgraph.
<u>Ctrl+O</u>: Open an existing flowgraph.
<u>Ctrl+S</u>: Save the current flowgraph or save as for new.
<u>Ctrl+W</u>: Close the current flowgraph.
<u>Ctrl+Z</u>: Undo a change to the flowgraph.
<u>Ctrl+Y</u>: Redo a change to the flowgraph.
<u>Ctrl+A</u>: Selects all blocks and connections.
<u>Ctrl+P</u>: Screen Capture of the Flowgraph.
<u>Ctrl+Shift+P</u>: Save the console output to file.
<u>Ctrl+L</u>: Clear the console.
<u>Ctrl+E</u>: Show variable editor.
<u>Ctrl+F</u>: Search for a block by name.
<u>Ctrl+Q</u>: Quit.
<u>F1</u> : Help menu.
<u>F5</u> : Generate the Flowgraph.
<u>F6</u> : Execute the Flowgraph.
<u>F7</u> : Kill the Flowgraph.
<u>Ctrl+Shift+S</u>: Save as the current flowgraph.
<u>Ctrl+Shift+D</u>: Create a duplicate of current flow graph.
<u>Ctrl+X/C/V</u>: Edit-cut/copy/paste.
<u>Ctrl+D/B/R</u>: Toggle visibility of disabled blocks or
connections/block tree widget/console.
<u>Shift+T/M/B/L/C/R</u>: Vertical Align Top/Middle/Bottom and
Horizontal Align Left/Center/Right respectively of the
selected block.
\
""")
markup = markup.replace("Ctrl", Utils.get_modifier_key())
MessageDialogWrapper(
parent, Gtk.MessageType.INFO, Gtk.ButtonsType.CLOSE, title='Keyboard - Shortcuts', markup=markup
).run_and_destroy()
def show_get_involved(parent):
"""Get Involved Instructions"""
markup = textwrap.dedent("""\
<tt><b>Welcome to GNU Radio Community!</b></tt>
\n\
<tt>For more details on contributing to GNU Radio and getting engaged with our great community visit </tt><a href="https://www.gnuradio.org/get-involved">here</a>.
\n\
<tt>You can also join our <a href="https://slack.gnuradio.org/">Slack Channel</a>, IRC Channel (#gnuradio) or contact through our <a href="https://lists.gnu.org/mailman/listinfo/discuss-gnuradio">mailing list(discuss-gnuradio)</a></tt>.
\
""")
MessageDialogWrapper(
parent, Gtk.MessageType.QUESTION, Gtk.ButtonsType.CLOSE, title='Get - Involved', markup=markup
).run_and_destroy()
def show_types(parent):
""" Display information about standard data types. """
colors = [(name, color) for name, key, sizeof, color in Constants.CORE_TYPES]
max_len = 10 + max(len(name) for name, code in colors)
message = '\n'.join(
'<span background="{color}"><tt>{name}</tt></span>'
''.format(color=color, name=Utils.encode(name).center(max_len))
for name, color in colors
)
MessageDialogWrapper(
parent, Gtk.MessageType.INFO, Gtk.ButtonsType.CLOSE, title='Types - Color Mapping', markup=message
).run_and_destroy()
def show_missing_xterm(parent, xterm):
markup = textwrap.dedent("""\
The xterm executable {0!r} is missing.
You can change this setting in your gnuradio.conf, in section [grc], 'xterm_executable'.
\n\
(This message is shown only once)\
""").format(xterm)
MessageDialogWrapper(
parent, message_type=Gtk.MessageType.WARNING, buttons=Gtk.ButtonsType.OK,
title='Warning: missing xterm executable', markup=markup
).run_and_destroy()
def choose_editor(parent, config):
"""
Give the option to either choose an editor or use the default.
"""
if config.editor and find_executable(config.editor):
return config.editor
buttons = (
'Choose Editor', Gtk.ResponseType.YES,
'Use Default', Gtk.ResponseType.NO,
Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL
)
response = MessageDialogWrapper(
parent, message_type=Gtk.MessageType.QUESTION, buttons=Gtk.ButtonsType.NONE,
title='Choose Editor', markup='Would you like to choose the editor to use?',
default_response=Gtk.ResponseType.YES, extra_buttons=buttons
).run_and_destroy()
# Handle the initial default/choose/cancel response
# User wants to choose the editor to use
editor = ''
if response == Gtk.ResponseType.YES:
file_dialog = Gtk.FileChooserDialog(
'Select an Editor...', None,
Gtk.FileChooserAction.OPEN,
('gtk-cancel', Gtk.ResponseType.CANCEL, 'gtk-open', Gtk.ResponseType.OK),
transient_for=parent
)
file_dialog.set_select_multiple(False)
file_dialog.set_local_only(True)
file_dialog.set_current_folder('/usr/bin')
try:
if file_dialog.run() == Gtk.ResponseType.OK:
editor = file_dialog.get_filename()
finally:
file_dialog.hide()
# Go with the default editor
elif response == Gtk.ResponseType.NO:
try:
process = None
if sys.platform.startswith('linux'):
process = find_executable('xdg-open')
elif sys.platform.startswith('darwin'):
process = find_executable('open')
if process is None:
raise ValueError("Can't find default editor executable")
# Save
editor = config.editor = process
except Exception:
Messages.send('>>> Unable to load the default editor. Please choose an editor.\n')
if editor == '':
Messages.send('>>> No editor selected.\n')
return editor
| trabucayre/gnuradio | grc/gui/Dialogs.py | Python | gpl-3.0 | 15,455 | [
"VisIt"
] | 58ccccf0ec0a425e5d49a93abcf1d1f907d09689ae470398000a757a009d1045 |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import tempfile
import mock
import sys
import matplotlib
if 'DISPLAY' not in os.environ: # noqa
matplotlib.use('Agg') # noqa
import neurom
from neurom.view import common, plotly
from neurom import load_neuron, viewer, NeuriteType
from nose import tools as nt
from numpy.testing import assert_allclose
_PWD = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(_PWD, '../../test_data/swc')
MORPH_FILENAME = os.path.join(DATA_PATH, 'Neuron.swc')
nrn = load_neuron(MORPH_FILENAME)
def _reload_module(module):
'''Force module reload'''
if sys.version_info >= (3,):
import importlib
importlib.reload(module)
else:
reload(module)
def test_plotly_extra_not_installed():
with mock.patch.dict(sys.modules, {'plotly': None}):
try:
_reload_module(neurom.view.plotly)
nt.ok_(False, "ImportError not triggered")
except ImportError as e:
nt.assert_equal(str(e),
'neurom[plotly] is not installed. '
'Please install it by doing: pip install neurom[plotly]')
def test_plotly_draw_neuron():
plotly.draw(nrn, plane='3d', auto_open=False)
def test_plotly_draw_neuron3d():
plotly.draw(nrn, plane='xy', auto_open=False)
def test_draw_neuron():
viewer.draw(nrn)
common.plt.close('all')
def test_draw_filter_neurite():
for mode in ['2d', '3d']:
viewer.draw(nrn, mode=mode, neurite_type=NeuriteType.basal_dendrite)
assert_allclose(common.plt.gca().get_ylim(),
[-30., 78], atol=5)
common.plt.close('all')
def test_draw_neuron3d():
viewer.draw(nrn, mode='3d')
common.plt.close('all')
def test_draw_tree():
viewer.draw(nrn.neurites[0])
common.plt.close('all')
def test_draw_tree3d():
viewer.draw(nrn.neurites[0], mode='3d')
common.plt.close('all')
def test_draw_soma():
viewer.draw(nrn.soma)
common.plt.close('all')
def test_draw_soma3d():
viewer.draw(nrn.soma, mode='3d')
common.plt.close('all')
def test_draw_dendrogram():
viewer.draw(nrn, mode='dendrogram')
common.plt.close('all')
@nt.raises(viewer.InvalidDrawModeError)
def test_invalid_draw_mode_raises():
viewer.draw(nrn, mode='4d')
@nt.raises(viewer.NotDrawableError)
def test_invalid_object_raises():
class Dummy(object):
pass
viewer.draw(Dummy())
@nt.raises(viewer.NotDrawableError)
def test_invalid_combo_raises():
viewer.draw(nrn.soma, mode='dendrogram')
def test_writing_output():
fig_name = 'Figure.png'
tempdir = tempfile.mkdtemp('test_viewer')
try:
old_dir = os.getcwd()
os.chdir(tempdir)
viewer.draw(nrn, mode='2d', output_path='subdir')
nt.ok_(os.path.isfile(os.path.join(tempdir, 'subdir', fig_name)))
finally:
os.chdir(old_dir)
shutil.rmtree(tempdir)
common.plt.close('all')
| lidakanari/NeuroM | neurom/tests/test_viewer.py | Python | bsd-3-clause | 4,652 | [
"NEURON"
] | 0690af4b1873ec6b409e178ec7f44f0c5d28749eaa22d68223f5f4bcb2ca867d |
#
# @file TestAncestor.py
# @brief SBML ancestor objects unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestAncestor.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestAncestor(unittest.TestCase):
def test_AlgebraicRule_ancestor_create(self):
m = libsbml.Model(2,4)
r = m.createAlgebraicRule()
lo = m.getListOfRules()
self.assert_( r.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( r.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( r.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( r.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getRule(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_AssignmentRule_ancestor_create(self):
m = libsbml.Model(2,4)
r = m.createAssignmentRule()
lo = m.getListOfRules()
self.assert_( r.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( r.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( r.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( r.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getRule(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_CompartmentType_ancestor_add(self):
ct = libsbml.CompartmentType(2,4)
m = libsbml.Model(2,4)
ct.setId("ct")
m.addCompartmentType(ct)
ct = None
lo = m.getListOfCompartmentTypes()
obj = m.getCompartmentType(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_CompartmentType_ancestor_create(self):
m = libsbml.Model(2,4)
ct = m.createCompartmentType()
lo = m.getListOfCompartmentTypes()
self.assert_( ct.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( ct.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( ct.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ct.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getCompartmentType(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Compartment_ancestor_add(self):
c = libsbml.Compartment(2,4)
c.setId("C")
m = libsbml.Model(2,4)
m.addCompartment(c)
c = None
lo = m.getListOfCompartments()
obj = m.getCompartment(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Compartment_ancestor_create(self):
m = libsbml.Model(2,4)
c = m.createCompartment()
lo = m.getListOfCompartments()
self.assert_( c.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( c.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( c.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( c.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getCompartment(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Constraint_ancestor_add(self):
ct = libsbml.Constraint(2,4)
m = libsbml.Model(2,4)
ct.setMath(libsbml.parseFormula("k+k"))
m.addConstraint(ct)
ct = None
lo = m.getListOfConstraints()
obj = m.getConstraint(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Constraint_ancestor_create(self):
m = libsbml.Model(2,4)
ct = m.createConstraint()
lo = m.getListOfConstraints()
self.assert_( ct.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( ct.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( ct.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ct.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getConstraint(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Delay_ancestor_add(self):
d = libsbml.Delay(2,4)
e = libsbml.Event(2,4)
e.setDelay(d)
d = None
obj = e.getDelay()
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
e = None
pass
def test_Delay_ancestor_create(self):
e = libsbml.Event(2,4)
ea = e.createDelay()
self.assert_( ea.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( ea.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ea.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = e.getDelay()
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_Delay_ancestor_create_model(self):
m = libsbml.Model(2,4)
e = m.createEvent()
ea = m.createDelay()
self.assert_( ea.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( ea.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( ea.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ea.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = e.getDelay()
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_EventAssignment_ancestor_add(self):
e = libsbml.Event(2,4)
ea = libsbml.EventAssignment(2,4)
ea.setVariable("c")
ea.setMath(libsbml.parseFormula("K+L"))
e.addEventAssignment(ea)
ea = None
lo = e.getListOfEventAssignments()
obj = e.getEventAssignment(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_EventAssignment_ancestor_create(self):
e = libsbml.Event(2,4)
ea = e.createEventAssignment()
lo = e.getListOfEventAssignments()
self.assert_( ea.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( ea.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( ea.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ea.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = e.getEventAssignment(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_EventAssignment_ancestor_create_model(self):
m = libsbml.Model(2,4)
e = m.createEvent()
ea = m.createEventAssignment()
lo = e.getListOfEventAssignments()
self.assert_( ea.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( ea.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( ea.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( ea.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ea.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = e.getEventAssignment(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_Event_ancestor_add(self):
e = libsbml.Event(2,4)
m = libsbml.Model(2,4)
t = libsbml.Trigger(2,4)
e.setTrigger(t)
e.createEventAssignment()
m.addEvent(e)
e = None
lo = m.getListOfEvents()
obj = m.getEvent(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_Event_ancestor_create(self):
m = libsbml.Model(2,4)
e = m.createEvent()
lo = m.getListOfEvents()
self.assert_( e.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( e.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( e.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( e.getAncestorOfType(libsbml.SBML_PARAMETER) == None )
obj = m.getEvent(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_FunctionDefinition_ancestor_add(self):
fd = libsbml.FunctionDefinition(2,4)
m = libsbml.Model(2,4)
fd.setId("fd")
fd.setMath(libsbml.parseFormula("l"))
m.addFunctionDefinition(fd)
fd = None
lo = m.getListOfFunctionDefinitions()
obj = m.getFunctionDefinition(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_FunctionDefinition_ancestor_create(self):
m = libsbml.Model(2,4)
fd = m.createFunctionDefinition()
lo = m.getListOfFunctionDefinitions()
self.assert_( fd.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( fd.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( fd.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( fd.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getFunctionDefinition(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_InitialAssignment_ancestor_add(self):
ia = libsbml.InitialAssignment(2,4)
m = libsbml.Model(2,4)
ia.setSymbol("c")
ia.setMath(libsbml.parseFormula("9"))
m.addInitialAssignment(ia)
ia = None
lo = m.getListOfInitialAssignments()
obj = m.getInitialAssignment(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_InitialAssignment_ancestor_create(self):
m = libsbml.Model(2,4)
ia = m.createInitialAssignment()
lo = m.getListOfInitialAssignments()
self.assert_( ia.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( ia.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( ia.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ia.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getInitialAssignment(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_KineticLaw_Parameter_ancestor_add(self):
kl = libsbml.KineticLaw(2,4)
p = libsbml.Parameter(2,4)
p.setId("jake")
kl.addParameter(p)
p = None
lop = kl.getListOfParameters()
obj = kl.getParameter(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_KINETIC_LAW) == kl )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lop )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
kl = None
pass
def test_KineticLaw_Parameter_ancestor_create(self):
kl = libsbml.KineticLaw(2,4)
p = kl.createParameter()
self.assert_( kl.getNumParameters() == 1 )
lop = kl.getListOfParameters()
self.assert_( p.getAncestorOfType(libsbml.SBML_KINETIC_LAW) == kl )
self.assert_( p.getAncestorOfType(libsbml.SBML_LIST_OF) == lop )
self.assert_( p.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( p.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = kl.getParameter(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_KINETIC_LAW) == kl )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lop )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
kl = None
pass
def test_KineticLaw_Parameter_ancestor_create_model(self):
m = libsbml.Model(2,4)
r = m.createReaction()
kl = m.createKineticLaw()
p = m.createKineticLawParameter()
self.assert_( kl.getNumParameters() == 1 )
lop = kl.getListOfParameters()
self.assert_( p.getAncestorOfType(libsbml.SBML_KINETIC_LAW) == kl )
self.assert_( p.getAncestorOfType(libsbml.SBML_LIST_OF) == lop )
self.assert_( p.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( p.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( p.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( p.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = kl.getParameter(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_KINETIC_LAW) == kl )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lop )
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
kl = None
pass
def test_KineticLaw_ancestor_add(self):
kl = libsbml.KineticLaw(2,4)
r = libsbml.Reaction(2,4)
r.setKineticLaw(kl)
obj = r.getKineticLaw()
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
r = None
pass
def test_KineticLaw_ancestor_create(self):
r = libsbml.Reaction(2,4)
kl = r.createKineticLaw()
self.assert_( kl.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( kl.getAncestorOfType(libsbml.SBML_DELAY) == None )
self.assert_( kl.getAncestorOfType(libsbml.SBML_MODEL) == None )
self.assert_( kl.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
obj = r.getKineticLaw()
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DELAY) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
r = None
pass
def test_KineticLaw_ancestor_create_model(self):
m = libsbml.Model(2,4)
r = m.createReaction()
kl = r.createKineticLaw()
self.assert_( kl.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( kl.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( kl.getAncestorOfType(libsbml.SBML_DELAY) == None )
self.assert_( kl.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
obj = r.getKineticLaw()
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DELAY) == None )
r = None
pass
def test_Model_ancestor_add(self):
d = libsbml.SBMLDocument(2,4)
m = libsbml.Model(2,4)
d.setModel(m)
self.assert_( d == d.getModel().getAncestorOfType(libsbml.SBML_DOCUMENT) )
d = None
pass
def test_Model_ancestor_create(self):
d = libsbml.SBMLDocument()
m = d.createModel()
self.assert_( m.getAncestorOfType(libsbml.SBML_DOCUMENT) == d )
obj = d.getModel()
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == d )
d = None
pass
def test_Parameter_ancestor_add(self):
ia = libsbml.Parameter(2,4)
m = libsbml.Model(2,4)
ia.setId("p")
m.addParameter(ia)
ia = None
lo = m.getListOfParameters()
obj = m.getParameter(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Parameter_ancestor_create(self):
m = libsbml.Model(2,4)
p = m.createParameter()
lo = m.getListOfParameters()
self.assert_( p.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( p.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( p.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( p.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getParameter(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_RateRule_ancestor_create(self):
m = libsbml.Model(2,4)
r = m.createRateRule()
lo = m.getListOfRules()
self.assert_( r.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( r.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( r.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( r.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getRule(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Reaction_ancestor_add(self):
ia = libsbml.Reaction(2,4)
m = libsbml.Model(2,4)
ia.setId("k")
m.addReaction(ia)
ia = None
lo = m.getListOfReactions()
obj = m.getReaction(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Reaction_ancestor_create(self):
m = libsbml.Model(2,4)
r = m.createReaction()
lo = m.getListOfReactions()
self.assert_( r.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( r.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( r.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( r.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getReaction(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Rule_ancestor_add(self):
ia = libsbml.RateRule(2,4)
ia.setVariable("a")
ia.setMath(libsbml.parseFormula("9"))
m = libsbml.Model(2,4)
m.addRule(ia)
ia = None
lo = m.getListOfRules()
obj = m.getRule(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_SpeciesReference_Modifier_ancestor_add(self):
sr = libsbml.ModifierSpeciesReference(2,4)
sr.setSpecies("s")
r = libsbml.Reaction(2,4)
r.addModifier(sr)
sr = None
lo = r.getListOfModifiers()
obj = r.getModifier(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_SpeciesReference_Modifier_ancestor_create(self):
r = libsbml.Reaction(2,4)
sr = r.createModifier()
lo = r.getListOfModifiers()
self.assert_( sr.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( sr.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( sr.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( sr.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = r.getModifier(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_SpeciesReference_Modifier_ancestor_create_model(self):
m = libsbml.Model(2,4)
r = m.createReaction()
sr = m.createModifier()
lo = r.getListOfModifiers()
self.assert_( sr.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( sr.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( sr.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( sr.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( sr.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = r.getModifier(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_SpeciesReference_Product_ancestor_add(self):
sr = libsbml.SpeciesReference(2,4)
r = libsbml.Reaction(2,4)
sr.setSpecies("p")
r.addProduct(sr)
sr = None
lo = r.getListOfProducts()
obj = r.getProduct(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_SpeciesReference_Product_ancestor_create(self):
r = libsbml.Reaction(2,4)
sr = r.createProduct()
lo = r.getListOfProducts()
self.assert_( sr.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( sr.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( sr.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( sr.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = r.getProduct(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_SpeciesReference_Product_ancestor_create_model(self):
m = libsbml.Model(2,4)
r = m.createReaction()
sr = m.createProduct()
lo = r.getListOfProducts()
self.assert_( sr.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( sr.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( sr.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( sr.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( sr.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = r.getProduct(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_SpeciesReference_Reactant_ancestor_add(self):
sr = libsbml.SpeciesReference(2,4)
r = libsbml.Reaction(2,4)
sr.setSpecies("s")
r.addReactant(sr)
sr = None
lo = r.getListOfReactants()
obj = r.getReactant(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_SpeciesReference_Reactant_ancestor_create(self):
r = libsbml.Reaction(2,4)
sr = r.createReactant()
lo = r.getListOfReactants()
self.assert_( sr.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( sr.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( sr.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( sr.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = r.getReactant(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_SpeciesReference_Reactant_ancestor_create_model(self):
m = libsbml.Model(2,4)
r = m.createReaction()
sr = m.createReactant()
lo = r.getListOfReactants()
self.assert_( sr.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( sr.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( sr.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( sr.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( sr.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = r.getReactant(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_REACTION) == r )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_SpeciesType_ancestor_add(self):
ia = libsbml.SpeciesType(2,4)
m = libsbml.Model(2,4)
ia.setId("s")
m.addSpeciesType(ia)
ia = None
lo = m.getListOfSpeciesTypes()
obj = m.getSpeciesType(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_SpeciesType_ancestor_create(self):
m = libsbml.Model(2,4)
st = m.createSpeciesType()
lo = m.getListOfSpeciesTypes()
self.assert_( st.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( st.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( st.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( st.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getSpeciesType(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Species_ancestor_add(self):
ia = libsbml.Species(2,4)
m = libsbml.Model(2,4)
ia.setId("s")
ia.setCompartment("c")
m.addSpecies(ia)
ia = None
lo = m.getListOfSpecies()
obj = m.getSpecies(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Species_ancestor_create(self):
m = libsbml.Model(2,4)
s = m.createSpecies()
lo = m.getListOfSpecies()
self.assert_( s.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( s.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( s.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( s.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getSpecies(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_StoichiometryMath_ancestor_add(self):
m = libsbml.StoichiometryMath(2,4)
sr = libsbml.SpeciesReference(2,4)
sr.setStoichiometryMath(m)
m = None
obj = sr.getStoichiometryMath()
self.assert_( obj.getAncestorOfType(libsbml.SBML_SPECIES_REFERENCE) == sr )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
sr = None
pass
def test_StoichiometryMath_ancestor_create(self):
sr = libsbml.SpeciesReference(2,4)
sm = sr.createStoichiometryMath()
self.assert_( sm.getAncestorOfType(libsbml.SBML_SPECIES_REFERENCE) == sr )
self.assert_( sm.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( sm.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = sr.getStoichiometryMath()
self.assert_( obj.getAncestorOfType(libsbml.SBML_SPECIES_REFERENCE) == sr )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_Trigger_ancestor_add(self):
d = libsbml.Trigger(2,4)
e = libsbml.Event(2,4)
e.setTrigger(d)
d = None
obj = e.getTrigger()
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
e = None
pass
def test_Trigger_ancestor_create(self):
e = libsbml.Event(2,4)
ea = e.createTrigger()
self.assert_( ea.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( ea.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ea.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = e.getTrigger()
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_Trigger_ancestor_create_model(self):
m = libsbml.Model(2,4)
e = m.createEvent()
ea = m.createTrigger()
self.assert_( ea.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( ea.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( ea.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ea.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = e.getTrigger()
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == e )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
pass
def test_UnitDefinition_ancestor_add(self):
ia = libsbml.UnitDefinition(2,4)
m = libsbml.Model(2,4)
ia.setId("u")
ia.createUnit()
m.addUnitDefinition(ia)
ia = None
lo = m.getListOfUnitDefinitions()
obj = m.getUnitDefinition(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_UnitDefinition_ancestor_create(self):
m = libsbml.Model(2,4)
ud = m.createUnitDefinition()
lo = m.getListOfUnitDefinitions()
self.assert_( ud.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( ud.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( ud.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( ud.getAncestorOfType(libsbml.SBML_EVENT) == None )
obj = m.getUnitDefinition(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_EVENT) == None )
pass
def test_Unit_ancestor_add(self):
ud = libsbml.UnitDefinition(2,4)
u = libsbml.Unit(2,4)
u.setKind(libsbml.UNIT_KIND_MOLE)
ud.addUnit(u)
u = None
self.assert_( ud.getNumUnits() == 1 )
lo = ud.getListOfUnits()
obj = ud.getUnit(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_UNIT_DEFINITION) == ud )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
ud = None
pass
def test_Unit_ancestor_create(self):
ud = libsbml.UnitDefinition(2,4)
u = ud.createUnit()
self.assert_( ud.getNumUnits() == 1 )
lo = ud.getListOfUnits()
self.assert_( u.getAncestorOfType(libsbml.SBML_UNIT_DEFINITION) == ud )
self.assert_( u.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( u.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( u.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = ud.getUnit(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_UNIT_DEFINITION) == ud )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
ud = None
pass
def test_Unit_ancestor_create_model(self):
m = libsbml.Model(2,4)
ud = m.createUnitDefinition()
u = m.createUnit()
self.assert_( ud.getNumUnits() == 1 )
lo = ud.getListOfUnits()
self.assert_( u.getAncestorOfType(libsbml.SBML_UNIT_DEFINITION) == ud )
self.assert_( u.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( u.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( u.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( u.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
obj = ud.getUnit(0)
self.assert_( obj.getAncestorOfType(libsbml.SBML_UNIT_DEFINITION) == ud )
self.assert_( obj.getAncestorOfType(libsbml.SBML_LIST_OF) == lo )
self.assert_( obj.getAncestorOfType(libsbml.SBML_DOCUMENT) == None )
self.assert_( obj.getAncestorOfType(libsbml.SBML_MODEL) == m )
self.assert_( obj.getAncestorOfType(libsbml.SBML_COMPARTMENT) == None )
ud = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestAncestor))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| alexholehouse/SBMLIntegrator | libsbml-5.0.0/src/bindings/python/test/sbml/TestAncestor.py | Python | gpl-3.0 | 39,155 | [
"VisIt"
] | 1992b9ea3155a609142f2b094d25e0ddb5109eb837e83e485a8b29149142652f |
__RCSID__ = "$Id$"
import socket
import select
import time
import os
from DIRAC.Core.DISET.private.Transports.BaseTransport import BaseTransport
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
class PlainTransport( BaseTransport ):
def initAsClient( self ):
timeout = None
if 'timeout' in self.extraArgsDict:
timeout = self.extraArgsDict[ 'timeout' ]
try:
self.oSocket = socket.create_connection(self.stServerAddress, timeout)
except socket.error , e:
if e.args[0] != 115:
return S_ERROR( "Can't connect: %s" % str( e ) )
#Connect in progress
oL = select.select( [], [ self.oSocket ], [], self.extraArgsDict[ 'timeout' ] )[1]
if len( oL ) == 0:
self.oSocket.close()
return S_ERROR( "Connection timeout" )
errno = self.oSocket.getsockopt( socket.SOL_SOCKET, socket.SO_ERROR )
if errno != 0:
return S_ERROR( "Can't connect: %s" % str( ( errno, os.strerror( errno ) ) ) )
self.remoteAddress = self.oSocket.getpeername()
return S_OK( self.oSocket )
def initAsServer( self ):
if not self.serverMode():
raise RuntimeError( "Must be initialized as server mode" )
try:
self.oSocket = socket.socket( socket.AF_INET6, socket.SOCK_STREAM )
except socket.error:
# IPv6 is probably disabled on this node, try IPv4 only instead
self.oSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
if self.bAllowReuseAddress:
self.oSocket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
self.oSocket.bind( self.stServerAddress )
self.oSocket.listen( self.iListenQueueSize )
return S_OK( self.oSocket )
def close( self ):
gLogger.debug( "Closing socket" )
try:
self.oSocket.shutdown( socket.SHUT_RDWR )
except:
pass
self.oSocket.close()
def setClientSocket( self, oSocket ):
if self.serverMode():
raise RuntimeError( "Mustbe initialized as client mode" )
self.oSocket = oSocket
if 'timeout' in self.extraArgsDict:
self.oSocket.settimeout( self.extraArgsDict[ 'timeout' ] )
self.remoteAddress = self.oSocket.getpeername()
def acceptConnection( self ):
#HACK: Was = PlainTransport( self )
oClientTransport = PlainTransport( self.stServerAddress )
oClientSocket, stClientAddress = self.oSocket.accept()
oClientTransport.setClientSocket( oClientSocket )
return S_OK( oClientTransport )
def _read( self, bufSize = 4096, skipReadyCheck = False ):
start = time.time()
timeout = False
if 'timeout' in self.extraArgsDict:
timeout = self.extraArgsDict[ 'timeout' ]
while True:
if timeout:
if time.time() - start > timeout:
return S_ERROR( "Socket read timeout exceeded" )
try:
data = self.oSocket.recv( bufSize )
return S_OK( data )
except socket.error, e:
if e[0] == 11:
time.sleep( 0.001 )
else:
return S_ERROR( "Exception while reading from peer: %s" % str( e ) )
except Exception as e:
return S_ERROR( "Exception while reading from peer: %s" % str( e ) )
def _write( self, buffer ):
sentBytes = 0
timeout = False
if 'timeout' in self.extraArgsDict:
timeout = self.extraArgsDict[ 'timeout' ]
if timeout:
start = time.time()
while sentBytes < len( buffer ):
try:
if timeout:
if time.time() - start > timeout:
return S_ERROR( "Socket write timeout exceeded" )
sent = self.oSocket.send( buffer[ sentBytes: ] )
if sent == 0:
return S_ERROR( "Connection closed by peer" )
if sent > 0:
sentBytes += sent
except socket.error, e:
if e[0] == 11:
time.sleep( 0.001 )
else:
return S_ERROR( "Exception while sending to peer: %s" % str( e ) )
except Exception as e:
return S_ERROR( "Error while sending: %s" % str( e ) )
return S_OK( sentBytes )
def checkSanity( *args, **kwargs ):
return S_OK( {} )
def delegate( delegationRequest, kwargs ):
"""
Check delegate!
"""
return S_OK()
| Andrew-McNab-UK/DIRAC | Core/DISET/private/Transports/PlainTransport.py | Python | gpl-3.0 | 4,188 | [
"DIRAC"
] | 6bf35c866b844f854645d54e00ce99ad86bb0569247017845c1877f6e1d8c533 |
"""
#python -c "import cyth, doctest; print(doctest.testmod(cyth.cyth_parser))"
"""
from __future__ import absolute_import, division, print_function
import six
from six.moves import zip, map, filter
from itertools import chain
import utool
import sys
from cyth import cyth_helpers
import ast
#import codegen # NOQA
import astor
import re
import cyth # NOQA
from copy import deepcopy
from cyth.cyth_decorators import MACRO_EXPANDERS_DICT
from cyth import cyth_benchmarks
import cyth.cyth_macros
BASE_CLASS = astor.codegen.SourceGenerator
class CythVisitor(BASE_CLASS):
indent_level = 0
emit = sys.stdout.write
def __init__(self, indent_with=' ' * 4, add_line_information=False,
py_modname=None):
super(CythVisitor, self).__init__(indent_with, add_line_information)
self.benchmark_names = []
self.benchmark_codes = []
self.py_modname = py_modname
#print('in module ', py_modname)
self.imported_modules = {}
self.imported_functions = {}
#self.all_funcalls = []
#self.imports_with_usemaps = {}
self.import_lines = ['cimport cython', 'import cython']
self.cimport_whitelist = ['numpy']
self.cimport_blacklist = ['numpy.core.umath_tests']
self.import_from_blacklist = ['range', 'map', 'zip']
self.cythonized_funcs = {}
self.plain_funcs = {}
self.fpig = FirstpassInformationGatherer()
self.spig = SecondpassInformationGatherer(self.fpig)
self.modules_to_cimport = []
self.interface_lines = [] # generated for the pxd header
self.gensym = cyth.cyth_macros.make_gensym_function()
def get_result(self):
"""
returns cythonized pyx text resulting from parsing the py file with cyth
markups
"""
cyth_text = (
'\n'.join(self.import_lines) +
'\n' +
''.join(self.result)
)
pxd_text = ('cimport numpy as np\n' + '\n\n'.join(self.interface_lines))
return cyth_text, pxd_text
def process_args(self, args, vararg, kwarg, defaults=None):
processed_argslist = map(self.visit, args)
if vararg:
processed_argslist.append('*%s' % vararg)
if kwarg:
processed_argslist.append('**%s' % kwarg)
if defaults is not None:
first_non_default = len(args) - len(defaults)
for (ix, de) in enumerate(defaults):
processed_argslist[first_non_default + ix] += '=%s' % self.visit(defaults[ix])
return processed_argslist
def signature(self, node, typedict={}):
# remove type declarations that are 'used up' in the function's
# call signature, to avoid duplicating them in the body
nonsig_typedict = typedict.copy()
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
def loop_args(args, defaults):
padding = [None] * (len(args) - len(defaults))
for arg, default in zip(args, padding + defaults):
if arg.id in typedict:
arg_ = typedict[arg.id] + ' ' + arg.id
nonsig_typedict.pop(arg.id)
else:
arg_ = arg
self.write(write_comma, arg_)
self.conditional_write('=', default)
loop_args(node.args, node.defaults)
self.conditional_write(write_comma, '*', node.vararg)
self.conditional_write(write_comma, '**', node.kwarg)
kwonlyargs = getattr(node, 'kwonlyargs', None)
if kwonlyargs:
if node.vararg is None:
self.write(write_comma, '*')
loop_args(kwonlyargs, node.kw_defaults)
return nonsig_typedict
def parse_cythdef(self, cyth_def):
""" Hacky string manipulation parsing """
#ast.literal_eval(utool.unindent(cyth_def))
typedict = {}
cdef_mode = False
current_indent = 0
for line_ in cyth_def.splitlines():
indent_level = utool.get_indentation(line_)
# Check indentation
if indent_level > current_indent:
current_indent = indent_level
elif indent_level > current_indent:
current_indent = indent_level
cdef_mode = False
line = line_.strip()
if line.startswith('#'):
continue
if len(line) == 0:
continue
# parse cdef
if line.startswith('cdef:'):
cdef_mode = True
continue
if cdef_mode or line.startswith('cdef '):
def parse_cdef_line(line):
"""
line = 'np.array[float, ndims=2] x, y, z'
# todo put in better line parsing
# allow for cdef
type_ = []
lbrackets = 0
for sub in line.split(','):
sub
pass
"""
assign_str = line.replace('cdef ', '')
pos = assign_str.rfind(' ')
type_ = assign_str[:pos]
varstr = assign_str[(pos + 1):]
typedict[varstr] = type_
return typedict
def typedict_to_cythdef(self, typedict):
res = ['cdef:']
for (id_, type_) in six.iteritems(typedict):
#print("%s, %s" % (repr(id_), repr(type_)))
res.append(self.indent_with + type_ + ' ' + id_)
if len(typedict) == 0:
res.append(self.indent_with + 'pass')
return res
def register_benchmark(self, funcname, docstr, modname):
if funcname:
# Parse doctests for benchmarks
(bench_name, bench_code) = cyth_benchmarks.parse_benchmarks(funcname, docstr, modname)
self.benchmark_names.append(bench_name)
self.benchmark_codes.append(bench_code)
def parse_cyth_markup(self, docstr, toplevel=False, funcdef_node=None):
"""
Args:
docstr (str): block of text possibly with cyth markups
toplevel (bool): True if docstr is in the global scope
funcdef_node: parent funcdef node for return type inference
"""
funcname = None if funcdef_node is None else funcdef_node.name
comment_str = docstr.strip()
has_markup = comment_str.find('<CYTH') != -1
# type returned_action = [`defines of string * (string, string) Hashtbl.t | `replace of string] option
def make_defines(cyth_def, return_type=None):
typedict = self.parse_cythdef(cyth_def)
if not return_type:
return_type = infer_return_type(funcdef_node, typedict)
actiontup = ('defines', cyth_def, typedict, return_type)
return actiontup
# TODO: these actions are old-style cyth commands. Replace them.
tags_to_actions = [
('<CYTH>', lambda cyth_def: make_defines(cyth_def.group(1))),
('<CYTH returns="(.*?)">', lambda cyth_def: make_defines(cyth_def.group(2), cyth_def.group(1))),
('<CYTH:REPLACE>', lambda cyth_def: ('replace', utool.unindent(cyth_def.group(1)))),
]
end_tag = '</CYTH>'
regex_flags = re.DOTALL | re.MULTILINE
regex_to_actions = [(re.compile(tag + '(.*?)' + end_tag, regex_flags), act)
for tag, act in tags_to_actions]
if has_markup:
# if CYTH markup was found...
self.register_benchmark(funcname, docstr, self.py_modname)
if toplevel:
# hack global cyth-docstrs to always CYTH:REPLACE
comment_str = re.sub('<CYTH>', '<CYTH:REPLACE>', comment_str)
for (regex, action) in regex_to_actions:
match = regex.search(comment_str)
if match:
#cyth_def = match.group(1)
actiontup = action(match)
return actiontup
print(comment_str)
utool.printex(NotImplementedError('no known cyth tag in docstring'),
iswarning=True,
key_list=['funcname', 'toplevel', 'regex', 'action',
'match', 'comment_str'])
else:
# Do nothing if cyth markup was not found
pass
return None
def parse_cyth_preproc_markup(self, docstr, cyth_mode, collect_macro_input,
macro_input_buffer_ptr,
suspended_macro_context_ptr, inline_flag_ptr):
source_lines = []
param_typedict = {}
bodyvars_typedict = {}
preproc_vars_dict = {'CYTH': True}
return_type_ptr = [None]
cyth_mode_ptr = [cyth_mode]
collect_macro_input_ptr = [collect_macro_input]
def handle_if(matcher):
varname = matcher.group(1)
# change to a stack in order to support nested ifs
#print('handle_if: %r' % varname)
cyth_mode_ptr[0] = preproc_vars_dict.get(varname, False)
def handle_else(matcher):
#print('handle_else: %r' % (cyth_mode_ptr[0],))
#cyth_mode_ptr[0] = not cyth_mode_ptr[0]
pass # deliberate no-op
def handle_endif(matcher):
#print('handle_endif')
cyth_mode_ptr[0] = False # pop the top item in the stack for nested ifs
def handle_returns_decl(matcher):
return_type_ptr[0] = matcher.group(1)
#print('handle_returns %r' % return_type_ptr[0])
def handle_inline(matcher):
inline_flag_ptr[0] = True
def handle_macro(matcher):
""" this should eventually be changed to reuse
the machinery for multiline """
macro_name = matcher.group(1)
suspended_macro_context_ptr[0] = (macro_name,)
collect_macro_input_ptr[0] = True
assert len(macro_input_buffer_ptr[0]) == 0, macro_input_buffer_ptr[0]
def handle_endmacro(matcher):
""" this is quite a bit hacky, but this is the
most straightforward way to implement them until the
parse_cyth_preproc_markup/visit_FunctionDef 'coroutine'
blob is refactored """
(macro_name,) = suspended_macro_context_ptr[0]
lines = macro_input_buffer_ptr[0]
#print('macro invokation of "%s" on lines %r' % (macro_name, lines))
expander = MACRO_EXPANDERS_DICT.get(macro_name, None)
if expander:
expanded_lines = ['\n'] + expander(self.gensym, lines) + ['\n']
else:
errmsg = 'No macro named %r has been registered via the cyth.macro decorator'
raise NotImplementedError(errmsg % macro_name)
indented_expanded_lines = [utool.indent(x, indent=self.indent_with * (self.indentation + 1))
for x in expanded_lines]
#print('output is %r' % expanded_lines)
source_lines.extend(indented_expanded_lines)
collect_macro_input_ptr[0] = False
macro_input_buffer_ptr[0] = []
oneline_directives = [('#' + a, b) for (a, b) in [
('if (.*)', handle_if),
('else', handle_else),
('endif', handle_endif),
('CYTH_RETURNS (.*)', handle_returns_decl),
('CYTH_INLINE', handle_inline),
('macro ([^ ]*).*', handle_macro),
('endmacro', handle_endmacro), # HACK
]]
def handle_param_types(matcher, lines):
# the indent/unindent pair is not a no-op: unindent goes all the way,
# indent just does one step
#print(lines)
#cythdef = '\n'.join(['cdef:\n'].append(utool.indent(utool.unindent('\n'.join(lines))).split('\n')))
#print('synthesized_cythdef = %r' % (cythdef,))
#param_typedict.update(self.parse_cythdef(cythdef))
for line in lines:
tmp_typedict = parse_cdef_line(line)
#print('%r -> %r' % (line, tmp_typedict))
param_typedict.update(tmp_typedict)
def handle_cdef(matcher, lines):
for line in lines:
tmp_typedict = parse_cdef_line(line)
#print('%r -> %r' % (line, tmp_typedict))
bodyvars_typedict.update(tmp_typedict)
multiline_directives = [((a + ":"), b) for (a, b) in [
('#CYTH_PARAM_TYPES', handle_param_types),
('cdef', handle_cdef),
]]
regex_compile_car = lambda (a, b): (re.compile(a), b)
#print(list(oneline_directives))
compiled_oneline_directives = list(map(regex_compile_car, oneline_directives))
compiled_multiline_directives = list(map(regex_compile_car, multiline_directives))
# hack for lack of labeled continues
def loop_body(line):
#print('cyth_mode_ptr[0] = %r, Line "%s"' % (cyth_mode_ptr[0], line))
for (regex, handler) in compiled_oneline_directives:
match = regex.search(line)
if match:
handler(match)
return None
for (regex, handler) in compiled_multiline_directives:
match = regex.search(line)
if match:
return (match, handler)
if cyth_mode_ptr[0]:
#if line.strip().startswith('cdef'):
# bodyvars_typedict.update(parse_cdef_line(line))
# #source_lines.append('\n' + line.replace('cdef [^ ]*', ''))
##else:
source_lines.append('\n' + line)
return None
multiline_buffer = []
multiline_start_indent = None
do_multiline = None
for line in docstr.split('\n'):
#print('do_multiline = %r, Line "%s"' % (do_multiline, line))
if do_multiline:
current_indentation = utool.get_indentation(line)
if multiline_start_indent < current_indentation:
multiline_buffer.append(line)
continue
else:
#print('calling handler')
(match, handler) = do_multiline
handler(match, multiline_buffer)
multiline_buffer = []
multiline_start_indent = None
do_multiline = loop_body(line)
if do_multiline:
assert multiline_buffer == [], multiline_buffer
multiline_start_indent = utool.get_indentation(line)
#print('at return, cyth_mode_ptr[0] is %r' % (cyth_mode_ptr[0],))
return source_lines, cyth_mode_ptr[0], collect_macro_input_ptr[0], param_typedict, bodyvars_typedict, return_type_ptr[0]
def visit_Module(self, node):
# cr = CallRecorder()
# cr.visit(node)
# self.all_funcalls = cr.calls
self.fpig.visit(node)
self.spig.visit(node)
def get_alias_name(al):
alias_name = al.name if al.asname is None else al.asname
return alias_name
for subnode in node.body:
# parse for cythtags
if is_docstring(subnode):
#print('Encountered global docstring: %s' % repr(subnode.value.s))
docstr = subnode.value.s
#actiontup = self.parse_cyth_markup(docstr, toplevel=True)
#if actiontup is not None:
# if actiontup[0] == 'replace':
# cyth_def = actiontup[1]
# self.newline(extra=1)
# self.write(cyth_def)
hacky_blob_of_retvals = self.parse_cyth_preproc_markup(docstr, False, False, [[]], [None], [False])
lines = hacky_blob_of_retvals[0]
self.newline(extra=1)
for line in lines:
self.write(line)
# try to parse functions for cyth tags
elif isinstance(subnode, ast.FunctionDef):
self.visit(subnode)
# register imports
elif isinstance(subnode, ast.Import):
for alias in subnode.names:
alias_ = get_alias_name(alias)
self.imported_modules[alias_] = [alias, False]
# register from imports
elif isinstance(subnode, ast.ImportFrom):
for alias in subnode.names:
alias_ = get_alias_name(alias)
self.imported_functions[alias_] = [subnode.module, alias, False]
# register a global
elif isinstance(subnode, (ast.Assign, ast.AugAssign)):
targets = assignment_targets(subnode)
if any((self.spig.globals_used.get(target, False) for target in targets)):
self.visit(subnode)
else:
#print('Skipping a global %r' % subnode.__class__)
pass
imports = self.generate_imports(self.imported_modules, self.imported_functions)
self.import_lines.extend(imports)
#return BASE_CLASS.visit_Module(self, node)
def visit_ImportFrom(self, node, emitCimport=False):
imp = 'cimport' if emitCimport else 'import'
if node.module:
self.statement(node, 'from ', node.level * '.',
node.module, ' ' + imp + ' ')
else:
self.statement(node, 'from ', node.level * '. ' + imp + ' ')
self.comma_list(node.names)
def generate_imports(self, modules, functions):
imports = []
for (alias, used_flag) in six.itervalues(modules):
if used_flag:
import_line = cyth_helpers.ast_to_sourcecode(ast.Import(names=[alias]))
imports.append(import_line)
if alias.name in self.cimport_whitelist:
imports.append('c' + import_line)
for (modulename, alias, used_flag) in six.itervalues(functions):
# If module
if used_flag and alias.name not in self.import_from_blacklist:
impnode = ast.ImportFrom(module=modulename, names=[alias], level=0)
impsrc = cyth_helpers.ast_to_sourcecode(impnode)
imports.append(impsrc)
# Check if the import is explicitly whitelisted or blacklisted
whitelisted = any(map(modulename.startswith, self.cimport_whitelist))
cblacklisted = modulename in self.cimport_blacklist
if whitelisted and not cblacklisted:
temp_cv = CythVisitor()
temp_cv.visit_ImportFrom(impnode, emitCimport=True)
import_line = temp_cv.get_result()[0]
imports.append(import_line)
for modulename in set(self.modules_to_cimport):
module_alias = modules.get(modulename, [None])[0]
if module_alias is not None:
assert isinstance(module_alias, ast.alias), type(module_alias)
cythed_alias = deepcopy(module_alias)
cythed_alias.name = cyth_helpers.get_cyth_name(module_alias.name)
# short circuit exploit for brevity
cythed_alias.asname = cythed_alias.asname and cyth_helpers.get_cyth_name(module_alias.asname)
tmpnode = ast.Import(names=[cythed_alias])
import_line = 'c' + cyth_helpers.ast_to_sourcecode(tmpnode)
imports.append(import_line)
module_func_dict = dict(chain(self.cythonized_funcs.iteritems(),
self.plain_funcs.iteritems()))
#@utool.show_return_value
def is_called_in(funcname, node):
call_list = get_funcalls_in_node(node)
def _iscalled(call):
return (isinstance(call, ast.Call) and
isinstance(call.func, ast.Name) and
funcname == call.func.id)
return any(map(_iscalled, call_list))
#def name_of_call(call): # ast.Node -> string option
# #print('ast dump: %r' % ast.dump(call))
# if not isinstance(call, ast.Call):
# return []
# if not isinstance(call.func, ast.Name):
# return []
# return [call.func.id]
#is_called = funcname in chain(*map(name_of_call, call_list))
#return is_called
called_funcs = []
print('module_func_dict = %r' % (module_func_dict.keys(),))
for callee in module_func_dict.keys():
for (caller, caller_node) in six.iteritems(self.cythonized_funcs):
if is_called_in(callee, caller_node):
called_funcs.append(callee)
if len(called_funcs) > 0:
names = [ast.alias(name, None) for name in called_funcs]
fromimport = ast.ImportFrom(module=self.py_modname, names=names, level=0)
imports.append(cyth_helpers.ast_to_sourcecode(fromimport))
return imports
def visit_Call(self, node):
"""
Gets called when parser encounters a function call
"""
#print(ast.dump(node))
# Special case for functions with _cyth suffix. These should have been
# generated by us.
is_cyth_call = lambda name: name.endswith('_cyth') and not name.startswith('_')
if isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast.Name):
#print('visit_Call, branch 1')
if node.func.value.id in self.imported_modules:
self.imported_modules[node.func.value.id][1] = True
if is_cyth_call(node.func.attr):
self.modules_to_cimport.append(node.func.value.id)
newnode = deepcopy(node)
newnode.func.attr = '_' + node.func.attr
newnode.func.value.id = cyth_helpers.get_cyth_name(node.func.value.id)
return BASE_CLASS.visit_Call(self, newnode)
if isinstance(node.func, ast.Name):
#print('visit_Call, branch 2')
if node.func.id in self.imported_functions:
self.imported_functions[node.func.id][2] = True
if is_cyth_call(node.func.id):
newnode = deepcopy(node)
newnode.func.id = '_' + node.func.id
return BASE_CLASS.visit_Call(self, newnode)
return BASE_CLASS.visit_Call(self, node)
def visit_str(self, string):
self.write(string)
def visit_FunctionDef(self, node):
"""
Gets called when parser encounters a function definition
"""
#super(CythVisitor, self).visit_FunctionDef(node)
new_body = []
#actiontup = None
cyth_mode = False # used for #if/else/endif
param_typedict = {}
bodyvars_typedict = {}
return_type = None
has_markup = False
first_docstr = None
inline_flag_ptr = [False]
collect_macro_input = False
macro_input_buffer_ptr = [[]]
suspended_macro_context_ptr = [None]
for stmt in node.body:
if is_docstring(stmt):
docstr = stmt.value.s
if first_docstr is None:
first_docstr = docstr
has_markup = has_markup or docstr.find('#if CYTH') != -1
#actiontup = self.parse_cyth_markup(docstr, funcdef_node=node)
(source_lines, cyth_mode, collect_macro_input,
new_param_typedict, new_bodyvars_typedict,
new_return_type) = self.parse_cyth_preproc_markup(
docstr, cyth_mode, collect_macro_input,
macro_input_buffer_ptr, suspended_macro_context_ptr,
inline_flag_ptr)
#print('source_lines: %r' % (source_lines,))
new_body.extend(source_lines)
if new_return_type is not None and return_type is None:
return_type = new_return_type
param_typedict.update(new_param_typedict)
bodyvars_typedict.update(new_bodyvars_typedict)
else:
#print('cyth_mode: %r, stmt: %r' % (cyth_mode, ast.dump(stmt)))
if not (cyth_mode or collect_macro_input):
new_body.append(stmt)
if collect_macro_input:
macro_input_buffer_ptr[0].append(cyth_helpers.ast_to_sourcecode(stmt))
if has_markup:
self.cythonized_funcs[node.name] = node
self.register_benchmark(node.name, first_docstr, self.py_modname)
union_typedict = {}
union_typedict.update(param_typedict)
union_typedict.update(bodyvars_typedict)
if return_type is None:
return_type = infer_return_type(node, union_typedict)
self.newline(extra=1)
cyth_funcname = cyth_helpers.get_cyth_name(node.name)
# TODO: should allow for user specification
func_prefix = utool.unindent('''
@cython.boundscheck(False)
@cython.wraparound(False)
''').strip()
# http://docs.cython.org/src/reference/compilation.html#compiler-directives
# [(name, defaultval), ...]
cython_compiler_directives = [ # NOQA
('boundscheck', True),
('wraparound', True),
('nonecheck', False),
('overflowcheck', False),
('overflowcheck.fold', True), # may help or hurt depending on compiler, arch, and opt settings
('embedsignature', False),
('cdivision', False),
('cdivision_warnings', False),
('always_allow_keywords', None),
('profile', False),
('linetrace', False),
('invert_types', None),
('language_level', 2),
('c_string_type', bytes),
('c_string_encoding', 'ascii'),
('type_version_tag', True),
('unraisable_tracebacks', None),
]
return_string = (" %s " % return_type) if return_type is not None else " "
self.statement(node, func_prefix + '\n')
# HACK: indexing is used to extract a portion of the generated stream, which wouldn't
# be needed if statement/write/etc all returned values rather than writing a stream
index_before = len(self.result)
inline_string = ' inline ' if inline_flag_ptr[0] else ''
self.write('cpdef%s%s%s(' % (inline_string, return_string, cyth_funcname,))
nonsig_typedict = self.signature(node.args, typedict=union_typedict)
cyth_def_body = self.typedict_to_cythdef(nonsig_typedict)
self.write(')')
function_signature = ''.join(self.result[index_before:])
self.interface_lines.append(function_signature)
self.write(':')
# TODO FIXME: the typedict parser is a giant hack right now.
# Find a good cython parser
self.indentation += 1
#cyth_def_body = self.typedict_to_cythdef(bodyvars_typedict)
for s in cyth_def_body:
self.write('\n', s)
self.write('\n')
self.indentation -= 1
self.body(new_body)
else:
self.plain_funcs[node.name] = node
# if actiontup:
# self.cythonized_funcs[node.name] = node
# if actiontup[0] == 'defines':
# _, cyth_def, typedict, return_type = actiontup
# #self.decorators(node, 2)
# self.newline(extra=1)
# cyth_funcname = cyth_helpers.get_cyth_name(node.name)
# # TODO: should allow for user specification
# func_prefix = utool.unindent('''
# @cython.boundscheck(False)
# @cython.wraparound(False)
# ''').strip()
#
# return_string = (" %s " % return_type) if return_type is not None else " "
# #self.statement(node, func_prefix + '\ncpdef%s%s(' % (return_string, cyth_funcname,))
# self.statement(node, func_prefix + '\n')
# # HACK: indexing is used to extract a portion of the generated stream, which wouldn't
# # be needed if statement/write/etc all returned values rather than writing a stream
# index_before = len(self.result)
# self.write('cpdef%s%s(' % (return_string, cyth_funcname,))
# nonsig_typedict = self.signature(node.args, typedict=typedict)
# cyth_def_body = self.typedict_to_cythdef(nonsig_typedict)
# self.write(')')
# function_signature = ''.join(self.result[index_before:])
# self.interface_lines.append(function_signature)
# if getattr(node, 'returns', None) is not None:
# self.write(' ->', node.returns)
# self.write(':')
# self.indentation += 1
# for s in cyth_def_body:
# self.write('\n', s)
# self.indentation -= 1
# self.body(new_body)
# elif actiontup[0] == 'replace':
# cyth_def = actiontup[1]
# self.newline(extra=1)
# self.write(cyth_def)
# regex_flags = re.MULTILINE
# sig_regex = re.compile('(cpdef.*\)):$', regex_flags)
# match = sig_regex.search(cyth_def)
# function_signature = match.group(1)
# self.interface_lines.append(function_signature)
# else:
# self.plain_funcs[node.name] = node
def comma_list(self, items, trailing=False):
for idx, item in enumerate(items):
if idx:
self.write(', ')
self.visit(item)
if trailing:
self.write(',')
def get_benchmarks(self):
bench_text = cyth_benchmarks.make_bench_text(self.benchmark_codes,
self.benchmark_names,
self.py_modname)
return bench_text
def is_docstring(node):
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)
def assignment_targets(node):
"""
Assign nodes have a list of multiple targets, which is used for
'a = b = c' (a and b are both targets)
'x, y = y, x' has a tuple as the only element of the targets array,
(likewise for '[x, y] = [y, x]', but with lists)
"""
assert isinstance(node, (ast.Assign, ast.AugAssign)), type(node)
if isinstance(node, ast.AugAssign):
assign_targets = [node.target]
return assign_targets
elif isinstance(node, ast.Assign):
assign_targets = []
for target in node.targets:
if isinstance(target, (ast.Tuple, ast.List)):
assign_targets.extend(target.elts)
else:
assign_targets.append(target)
return assign_targets
else:
raise AssertionError('unexpected node type %r' % type(node))
#def parseparen(string):
# src = cStringIO.StringIO(line3).readline
# tokentup_list = list(tokenize.generate_tokens(src))
# token_list = [token[1] for token in tokentup_list]
def parse_cdef_line(line):
"""
Example:
>>> from cyth.cyth_script import * # NOQA
>>> line1 = 'np.array[float, ndims=2] x, y, z'
>>> sorted(parse_cdef_line(line1).items())
[('x', 'np.array[float,ndims=2]'), ('y', 'np.array[float,ndims=2]'), ('z', 'np.array[float,ndims=2]')]
>>> line2 = 'cdef int x=y, y=2, z=3'
>>> sorted(parse_cdef_line(line2).items())
[('x', 'int'), ('y', 'int'), ('z', 'int')]
>>> line3 = 'cdef np.ndarray[np.float64_t, ndim=1] out = np.zeros((nMats,), dtype=np.float64)'
>>> sorted(parse_cdef_line(line3).items())
[('out', 'np.ndarray[np.float64_t, ndim=1]')]
"""
line = line.replace(' = ', '=')
tokens = line.replace('cdef ', '').strip().split(' ')
for ix, token in enumerate(tokens):
if token.find(",") == -1:
break
_type = ''.join(tokens[:ix + 1])
varnames_split_ = ''.join(tokens[ix + 1:]).split(",")
varnames_hack_ = []
lparen = 0
rparen = 0
current = ''
# Not a general solution
for t in varnames_split_:
lparen += t.count('(')
rparen += t.count(')')
if lparen != rparen:
current += t + ','
else:
current += t
varnames_hack_.append(current)
current = ''
varnames = (x.split('=')[0] for x in varnames_hack_)
typedict = {varname.strip(): _type.strip() for varname in varnames}
return typedict
class FirstpassInformationGatherer(ast.NodeVisitor):
"""
first pass visitor class
"""
def __init__(self):
self.global_names = []
def visit_Module(self, node):
for subnode in node.body:
if isinstance(subnode, (ast.Assign, ast.AugAssign)):
assign_targets = assignment_targets(subnode)
for target in assign_targets:
self.global_names.append(target)
class SecondpassInformationGatherer(ast.NodeVisitor):
"""
second pass visitor class
"""
def __init__(self, fpig):
self.fpig = fpig
self.globals_used = {name: False for name in fpig.global_names}
def visit_Name(self, node):
isname = lambda x: isinstance(x, ast.Name)
getid = lambda x: x.id
global_name_iter = map(getid, filter(isname, self.fpig.global_names))
if getid(node) in global_name_iter and isinstance(node.ctx, ast.Load):
self.globals_used[node] = True
def visit_Attribute(self, node):
if isinstance(node.value, ast.Name):
isattribute = lambda x: isinstance(x, ast.Attribute)
hasloadctx = lambda x: isinstance(x.value, ast.Name) and isinstance(x.value.ctx, ast.Load)
filt = lambda x: isattribute(x) and hasloadctx(x)
gettup = lambda x: (x.value.id, x.attr)
tup_iter = map(gettup, filter(filt, self.fpig.global_names))
if gettup(node) in tup_iter:
self.globals_used[node] = True
def visit_Expr(self, node):
if isinstance(node.value, ast.Str):
for global_name_node in self.fpig.global_names:
# for cyth strings, we don't yet have a good parser, so use
# substring as a conservative estimate
global_name = None
if isinstance(global_name_node, ast.Name):
global_name = global_name_node.id
elif isinstance(global_name_node, ast.Attribute):
global_name = global_name_node.value.id + "." + global_name_node.attr
self.globals_used[global_name_node] = (self.globals_used.get(global_name_node, False) or
(node.value.s.find(global_name) != -1))
class CallRecorder(ast.NodeVisitor):
def __init__(self):
self.calls = []
def visit_Call(self, node):
self.calls.append(node)
def get_funcalls_in_node(node):
cr = CallRecorder()
cr.visit(node)
return cr.calls
def infer_return_type(funcdef_node, typedict):
class ReturnTypeInferrer(ast.NodeVisitor):
def __init__(self, node):
self.return_type = None
self.visited_returns = []
assert isinstance(node, ast.FunctionDef), type(node)
funcdef = node
self.visit(funcdef)
#print('visited_returns: %r' % self.visited_returns)
if utool.list_allsame(self.visited_returns) and len(self.visited_returns) > 0:
self.return_type = self.visited_returns[0]
def visit_Return(self, node):
if node.value:
if isinstance(node.value, ast.Name):
self.visited_returns.append(typedict.get(node.value.id, None))
elif isinstance(node.value, ast.Tuple):
self.visited_returns.append("tuple")
return ReturnTypeInferrer(funcdef_node).return_type
| aweinstock314/cyth | cyth/cyth_parser.py | Python | apache-2.0 | 36,709 | [
"VisIt"
] | 4da2d6596d8e61222cd62a14ab77c12ffd0cae6ffb99d5ad23c780453ce7bae4 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
'''
Created on Jun 27, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jun 27, 2012"
import unittest2 as unittest
import os
import json
from pymatgen.entries.exp_entries import ExpEntry
from monty.json import MontyDecoder
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class ExpEntryTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "Fe2O3_exp.json"), "r") as f:
thermodata = json.load(f, cls=MontyDecoder)
self.entry = ExpEntry("Fe2O3", thermodata)
def test_energy(self):
self.assertAlmostEqual(self.entry.energy, -825.5)
def test_to_from_dict(self):
d = self.entry.as_dict()
e = ExpEntry.from_dict(d)
self.assertAlmostEqual(e.energy, -825.5)
def test_str(self):
self.assertIsNotNone(str(self.entry))
if __name__ == "__main__":
unittest.main()
| aykol/pymatgen | pymatgen/entries/tests/test_exp_entries.py | Python | mit | 1,235 | [
"pymatgen"
] | 632d2c14f95eb7449844152ee39d8934cad3524442879b621c44bbbfddb32927 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Dialog to set the price for sellables"""
from decimal import Decimal
import gtk
from kiwi.currency import currency
from kiwi.enums import ListType
from kiwi.ui.objectlist import Column
from kiwi.ui.listdialog import ListSlave
from storm.expr import LeftJoin
from stoqlib.api import api
from stoqlib.database.viewable import Viewable
from stoqlib.domain.sellable import (Sellable, ClientCategoryPrice,
SellableCategory)
from stoqlib.domain.person import ClientCategory
from stoqlib.gui.dialogs.progressdialog import ProgressDialog
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.lib.message import marker
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class SellableView(Viewable):
id = Sellable.id
code = Sellable.code
barcode = Sellable.barcode
status = Sellable.status
cost = Sellable.cost
description = Sellable.description
price = Sellable.base_price
max_discount = Sellable.max_discount
category_description = SellableCategory.description
tables = [
Sellable,
LeftJoin(SellableCategory, SellableCategory.id == Sellable.category_id)
]
def __init__(self, *args, **kargs):
self._new_prices = {}
Viewable.__init__(self, *args, **kargs)
def set_markup(self, category, markup):
price = self.cost + self.cost * markup / 100
if price <= 0:
price = Decimal('0.01')
self.set_price(category.id, currency(price))
def set_price(self, category_id, price):
self._new_prices[category_id] = price
setattr(self, 'price_%s' % category_id, price)
def save_changes(self):
store = self.store
for cat, value in self._new_prices.items():
info = store.find(ClientCategoryPrice, sellable_id=self.id,
category=cat).one()
if not info:
info = ClientCategoryPrice(sellable_id=self.id,
category=cat,
max_discount=self.max_discount,
price=value,
store=store)
else:
info.price = value
class SellablePriceDialog(BaseEditor):
gladefile = "SellablePriceDialog"
model_type = object
title = _(u"Price Change Dialog")
size = (850, 450)
def __init__(self, store):
self.categories = store.find(ClientCategory)
self._last_cat = None
BaseEditor.__init__(self, store, model=object())
self._setup_widgets()
def _setup_widgets(self):
self.category.prefill(api.for_combo(self.categories))
prices = self.store.find(ClientCategoryPrice)
category_prices = {}
for p in prices:
c = category_prices.setdefault(p.sellable_id, {})
c[p.category_id] = p.price
marker('SellableView')
sellables = self.store.find(SellableView).order_by(Sellable.code)
marker('add_items')
for s in sellables:
for category_id, price in category_prices.get(s.id, {}).items():
s.set_price(category_id, price)
self.slave.listcontainer.list.append(s)
marker('Done add_items')
def _get_columns(self):
marker('_get_columns')
self._price_columns = {}
columns = [Column("code", title=_(u"Code"), data_type=str,
width=100),
Column("barcode", title=_(u"Barcode"), data_type=str,
width=100, visible=False),
Column("category_description", title=_(u"Category"),
data_type=str, width=100),
Column("description", title=_(u"Description"),
data_type=str, width=200),
Column("cost", title=_(u"Cost"),
data_type=currency, width=90),
Column("price", title=_(u"Default Price"),
data_type=currency, width=90)
]
self._price_columns[None] = columns[-1]
for cat in self.categories:
columns.append(Column('price_%s' % (cat.id, ),
title=cat.get_description(), data_type=currency,
width=90, visible=True))
self._price_columns[cat.id] = columns[-1]
self._columns = columns
marker('Done _get_columns')
return columns
#
# BaseEditorSlave
#
def setup_slaves(self):
self.slave = ListSlave(self._get_columns())
self.slave.set_list_type(ListType.READONLY)
self.attach_slave("on_slave_holder", self.slave)
def on_cancel(self):
# Call clear on objectlist before destruction. Workaround for a bug
# when destructing the dialog taking to long
self.slave.listcontainer.list.clear()
def on_confirm(self):
marker('Saving prices')
# FIXME: Improve this part. This is just a quick workaround to
# release the bugfix asap
self.main_dialog.ok_button.set_sensitive(False)
self.main_dialog.cancel_button.set_sensitive(False)
d = ProgressDialog(_('Updating items'), pulse=False)
d.set_transient_for(self.main_dialog)
d.start(wait=0)
d.cancel.hide()
total = len(self.slave.listcontainer.list)
for i, s in enumerate(self.slave.listcontainer.list):
s.save_changes()
d.progressbar.set_text('%s/%s' % (i + 1, total))
d.progressbar.set_fraction((i + 1) / float(total))
while gtk.events_pending():
gtk.main_iteration(False)
d.stop()
marker('Done saving prices')
self.slave.listcontainer.list.clear()
#
# Callbacks
#
def on_apply__clicked(self, button):
markup = self.markup.read()
cat = self.category.read()
marker('Updating prices')
for i in self.slave.listcontainer.list:
i.set_markup(cat, markup)
self.slave.listcontainer.list.refresh(i)
marker('Done updating prices')
| andrebellafronte/stoq | stoqlib/gui/dialogs/sellablepricedialog.py | Python | gpl-2.0 | 7,152 | [
"VisIt"
] | d3ef3c72fe202b411b070da1ad829b73c655529709829da1fd926c835d645a3b |
#!/usr/bin/env python
# coding: UTF-8
import sys
import os.path
from optparse import OptionParser
# check if the pymol module is installed
try :
import pymol
except :
sys.exit( "error : This script needs 'pymol' module. Please install PyMOL before running." )
def name_check( string, name ):
if os.path.exists( string ):
with open( string ) as f:
return ( f.read(), os.path.basename(string) )
else:
return ( string, name )
def cartoon( filename, name ):
pymol.finish_launching()
pymol.cmd.read_pdbstr( *name_check( filename, name ) )
pymol.cmd.hide( "everything" )
pymol.cmd.show( "cartoon" )
pymol.cmd.spectrum()
if __name__ == "__main__":
# OptionParser
def opt():
# about this script
parser = OptionParser( usage = "%prog [options] PDB_FILE",
description = "Show the PDB_FILE in cartoon model using PyMOL." )
###### set options #######
# parse arguments
( options, args ) = parser.parse_args()
# check arguments
if len( args ) != 1 :
parser.error( "Incorrect number of arguments. "
"Just one PDB_FILE must be given. \n"
"\t\tTo show help message, use '-h or --help' option." )
# check if "file" exists
if not os.path.isfile( args[0] ) :
sys.exit( "error : \"" + args[0] + "\" : no such file" )
return ( options, args )
( options, args ) = opt()
cartoon( args[0], None )
| nishn/pymol-script | pym.py | Python | mit | 1,563 | [
"PyMOL"
] | bfe3f5a512815a751dffde20b95c46e448e4045cc9c3315454f9c13e39498cb1 |
"""
@author: Martin Kuemmel
@organization: LMU / USM
@license: Gnu Public Licence
@contact: mkuemmel@usm.lmu.de
@version: $Revision: $
@date: $Date: $
@changeDate: $LastChangedDate: $
Initialize the test library.
"""
import os.path
import shutil
import unittest
# check tips version
import tips
if float(tips.__version__)<2.0:
vtipslt2 = True
else:
vtipslt2 = False
class Test_VerifyWaveBasic(unittest.TestCase):
def setUp(self):
# flag for mop up
self.doRemove = False
# global detector-flag
self.detectorFlag=False
# global silent-flag
self.silentFlag=True
# the (list of) environment variables, names given to them and the files to be copied there
subDirs = [('AXE_IMAGE_PATH', 'DATA', ['galaxyThumbs.fits', 'input_stars_imgs.fits', 'input_line.spc.fits', 'input_cat_verifyI.dat', 'input_cat_verifyII.dat', 'input_cat_verifyIII.dat', 'input_cat_verifyIV.dat', 'input_cat_verifyII.fits', 'input_cat_verifyIV.fits']), \
('AXE_CONFIG_PATH', 'CONF', ['verificationConfI.conf', 'constSensI.fits', 'mef_c4.00000_x-0.38167_y1.08146.fits', 'mef_c4.00000_x0.28625_y1.17167.fits']), \
('AXE_OUTPUT_PATH', 'OUTPUT'), ('AXE_OUTSIM_PATH', 'OUTSIM'), \
#('AXE_SIMDATA_PATH', 'SIMDATA', ['wfc3_ir_f125w_tpass_m01.dat']), \
('AXE_DRIZZLE_PATH', 'DRIZZLE')]
# define the directory with the input data and make sure it exists
self.dataDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'verifyData'))
if not os.path.isdir(self.dataDir):
errMsg = 'File does not exist: %s!' % self.dataDir
raise Exception(errMsg)
# define a name for the run directory;
# destroy any old version;
# create a new one
#self.runDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'verifyTests'))
# run test in source directory may cause trouble depending where the code is integrated (not necessery writable)
# new path is defined relative, assuming the test would be ran in a appropiate directory
self.runDir = os.path.abspath('./verifyTests')
#if os.path.isdir(self.runDir):
# shutil.rmtree(self.runDir, ignore_errors=True, onerror=None)
if not os.path.isdir(self.runDir):
os.mkdir(self.runDir)
# create the various sub-dirs
# and point the environment variables on it
for aSub in subDirs:
subDir = os.path.join(self.runDir, aSub[1])
if not os.path.isdir(subDir):
os.mkdir(subDir)
os.environ[aSub[0]] = subDir
# copy files in this sub-dir
if len(aSub) > 2:
# extract the file list
fileList = aSub[2]
# copy files in the sub-dir
for aFile in fileList:
# put together file names
inFile = os.path.join(self.dataDir, aFile)
outFile = os.path.join(subDir, aFile)
# make sure the file exists
if not os.path.isfile(inFile):
errMsg = 'File does not exist: %s!' % inFile
raise Exception(errMsg)
# copy the file
shutil.copy(inFile, outFile)
# create a subdir for tips tests
if not os.path.isdir(os.path.join(self.runDir,'tips')):
os.mkdir(os.path.join(self.runDir,'tips'))
# create a subdir for tips tests
if not os.path.isdir(os.path.join(self.runDir,'tips','wave')):
os.mkdir(os.path.join(self.runDir,'tips','wave'))
self.tipsDir = os.path.join(self.runDir,'tips','wave')
def tearDown(self):
# check the mop up flag
if self.doRemove:
# tear down the run directory
if os.path.isdir(self.runDir):
shutil.rmtree(self.runDir, ignore_errors=True, onerror=None)
def testGaussModSpec(self):
"""
Gaussian objects with model spectra
"""
import math
import axesim
import verify
# make the simulation
axesim.simdispim(incat='input_cat_verifyII.dat', config='verificationConfI.conf',
dispim_name='test_verify_Flux_GaussModspec.fits', model_spectra='input_line.spc.fits',
exptime=10., bck_flux=0.0, detector=self.detectorFlag, silent=self.silentFlag)
# check that the output image exists
resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_Flux_GaussModspec.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
# compute the simulated flux and extract the flux values from the simulated image
simVals, fitVals, cogVals = verify.verifyII(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_Flux_GaussModspec.fits'),
os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyII.dat'),
os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'),
inSpec=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_line.spc.fits'))
# go over all objects
for index in range(len(simVals)):
# make sure the difference is small
#self.assertLess(relDiff1, 1.0E-03)
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'],simVals[index]['fwhm'], fitVals[index][2]*5.0/2.35482, fitVals[index][2]*5.0/2.35482-simVals[index]['fwhm'])
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'], simVals[index]['center'], cogVals[index], cogVals[index]-simVals[index]['center'])
self.assertLess(math.fabs(fitVals[index][1]-simVals[index]['center']), 0.5)
self.assertLess(math.fabs(cogVals[index]-simVals[index]['center']), 0.5)
def testGaussModSpec_tips(self):
"""
Gaussian objects with model spectra at TIPS level
"""
import math
import tips
import verify
inCat = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyII.fits')
inSpc = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_line.spc.fits')
obs = tips.Observation(inCat, inSpc, inCatForm='TIPS', inSpcForm='aXeSIM', norm=True)
obs.loadFromFile(os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.fits'))
obs.runSimulation(workDir=self.tipsDir)
# check that the output image exists
resultFile = os.path.join(self.tipsDir,'OUTSIM', 'input_cat_verifyII_WFC3_IR_00_v1_verify_d300914_IMG.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
axesimCat = os.path.join(self.tipsDir,'DATA','input_cat_verifyII_WFC3_IR_00.cat')
verify.getInitialModIndex(inCat, axesimCat)
# compute the simulated flux and extract the flux values from the simulated image
simVals, fitVals, cogVals = verify.verifyII(os.path.join(self.tipsDir,'OUTSIM','input_cat_verifyII_WFC3_IR_00_v1_verify_d300914_IMG.fits'),
axesimCat, os.path.join(self.tipsDir,'CONF','WFC3_IR_00_v1_verify_d300914.conf'), inSpec=inSpc)
# go over all objects
for index in range(len(simVals)):
# make sure the difference is small
#self.assertLess(relDiff1, 1.0E-03)
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'],simVals[index]['fwhm'], fitVals[index][2]*5.0/2.35482, fitVals[index][2]*5.0/2.35482-simVals[index]['fwhm'])
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'], simVals[index]['center'], cogVals[index], cogVals[index]-simVals[index]['center'])
self.assertLess(math.fabs(fitVals[index][1]-simVals[index]['center']), 0.5)
self.assertLess(math.fabs(cogVals[index]-simVals[index]['center']), 0.5)
@unittest.skipIf(vtipslt2, "not supported in with tips < 2.0")
def testStarsModSpec(self):
"""
Stellar objects with model spectra
"""
import math
import axesim
import verify
# make the simulation
axesim.simdispim(incat='input_cat_verifyIII.dat', config='verificationConfI.conf',
dispim_name='test_verify_Flux_starsModspec.fits', model_images='input_stars_imgs.fits', model_spectra='input_line.spc.fits',
psf_file='mef_c4.00000_x0.28625_y1.17167.fits', exptime=10., bck_flux=0.0, detector=self.detectorFlag, silent=self.silentFlag)
# check that the output image exists
resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_Flux_starsModspec.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
# compute the simulated flux and extract the flux values from the simulated image
simVals, fitVals, cogVals = verify.verifyII(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_Flux_starsModspec.fits'),
os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyIII.dat'),
os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'),
inSpec=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_line.spc.fits'),
inModel=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_stars_imgs.fits'),
inPSF=os.path.join(os.environ['AXE_CONFIG_PATH'],'mef_c4.00000_x-0.38167_y1.08146.fits'))
# go over all objects
for index in range(len(simVals)):
# make sure the difference is small
#self.assertLess(relDiff1, 1.0E-03)
#print simVals[index]['center'], ' <-> ', cogVals[index][1], ' : ', cogVals[index][1]-simVals[index]['center']
#print simVals[index]['fwhm'], ' <-> ', cogVals[index][2]*5.0/2.35482, ' : ', cogVals[index][2]*5.0/2.35482-simVals[index]['fwhm']
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'],simVals[index]['fwhm'], fitVals[index][2]*5.0/2.35482, fitVals[index][2]*5.0/2.35482-simVals[index]['fwhm'])
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'], simVals[index]['center'], cogVals[index], cogVals[index]-simVals[index]['center'])
self.assertLess(math.fabs(fitVals[index][1]-simVals[index]['center']), 0.5)
self.assertLess(math.fabs(cogVals[index]-simVals[index]['center']), 0.7)
def testModImgSpec(self):
"""
Model images with input spectra
"""
import math
import axesim
import verify
# make the simulation
axesim.simdispim(incat='input_cat_verifyIV.dat', config='verificationConfI.conf',
dispim_name='test_verify_Flux_ModimgSpec.fits', model_images='galaxyThumbs.fits', model_spectra='input_line.spc.fits',
exptime=10., bck_flux=0.0, detector=self.detectorFlag, silent=self.silentFlag)
# check that the output image exists
resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_Flux_ModimgSpec.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
# compute the simulated flux and extract the flux values from the simulated image
simVals, fitVals, cogVals = verify.verifyII(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_Flux_ModimgSpec.fits'),
os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyIV.dat'),
os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'),
inSpec=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_line.spc.fits'),
inModel=os.path.join(os.environ['AXE_IMAGE_PATH'],'galaxyThumbs.fits'))
# go over all objects
for index in range(len(simVals)):
# make sure the difference is small
#self.assertLess(relDiff1, 1.0E-03)
#print simVals[index]['center'], ' <-> ', cogVals[index][1], ' : ', cogVals[index][1]-simVals[index]['center']
#print simVals[index]['fwhm'], ' <-> ', cogVals[index][2]*5.0/2.35482, ' : ', cogVals[index][2]*5.0/2.35482-simVals[index]['fwhm']
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'],simVals[index]['fwhm'], fitVals[index][2]*5.0/2.35482, fitVals[index][2]*5.0/2.35482-simVals[index]['fwhm'])
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'], simVals[index]['center'], cogVals[index], cogVals[index]-simVals[index]['center'])
self.assertLess(math.fabs(fitVals[index][1]-simVals[index]['center']), 0.5)
self.assertLess(math.fabs(cogVals[index]-simVals[index]['center']), 0.5)
def testModImgSpec_tips(self):
"""
Model images with input spectra at TIPS level
"""
import math
import axesim
import verify
inCat = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyIV.fits')
inSpc = os.path.join(os.environ['AXE_IMAGE_PATH'],'input_line.spc.fits')
inThm = os.path.join(os.environ['AXE_IMAGE_PATH'],'galaxyThumbs.fits')
obs = tips.Observation(inCat, inSpc, inCatForm='TIPS', inSpcForm='aXeSIM', norm=True, inThmDir=inThm)
obs.loadFromFile(os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.fits'))
obs.runSimulation(workDir=self.tipsDir)
# check that the output image exists
resultFile = os.path.join(self.tipsDir,'OUTSIM', 'input_cat_verifyIV_WFC3_IR_00_v1_verify_d300914_IMG.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
axesimCat = os.path.join(self.tipsDir,'DATA','input_cat_verifyIV_WFC3_IR_00.cat')
verify.getInitialModIndex(inCat, axesimCat)
# compute the simulated flux and extract the flux values from the simulated image
simVals, fitVals, cogVals = verify.verifyII(os.path.join(self.tipsDir,'OUTSIM','input_cat_verifyIV_WFC3_IR_00_v1_verify_d300914_IMG.fits'),
axesimCat, os.path.join(self.tipsDir,'CONF','WFC3_IR_00_v1_verify_d300914.conf'),
inSpec=inSpc, inModel=inThm)
# go over all objects
for index in range(len(simVals)):
# make sure the difference is small
#self.assertLess(relDiff1, 1.0E-03)
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'],simVals[index]['fwhm'], fitVals[index][2]*5.0/2.35482, fitVals[index][2]*5.0/2.35482-simVals[index]['fwhm'])
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'], simVals[index]['center'], cogVals[index], cogVals[index]-simVals[index]['center'])
self.assertLess(math.fabs(fitVals[index][1]-simVals[index]['center']), 0.5)
self.assertLess(math.fabs(cogVals[index]-simVals[index]['center']), 0.5)
@unittest.skipIf(vtipslt2, "not supported in with tips < 2.0")
def testModImgSpecPSF(self):
"""
Model images with input spectra
"""
import math
import axesim
import verify
# make the simulation
axesim.simdispim(incat='input_cat_verifyIV.dat', config='verificationConfI.conf',
dispim_name='test_verify_Flux_ModimgSpecPSF.fits', model_images='galaxyThumbs.fits', model_spectra='input_line.spc.fits',
psf_file='mef_c4.00000_x-0.38167_y1.08146.fits', exptime=10., bck_flux=0.0, detector=self.detectorFlag, silent=self.silentFlag)
# check that the output image exists
resultFile = os.path.join(os.environ['AXE_OUTSIM_PATH'], 'test_verify_Flux_ModimgSpecPSF.fits')
self.assertTrue(os.path.isfile(resultFile), 'Output file does not exist: %s!' % resultFile)
# compute the simulated flux and extract the flux values from the simulated image
simVals, fitVals, cogVals = verify.verifyII(os.path.join(os.environ['AXE_OUTSIM_PATH'],'test_verify_Flux_ModimgSpecPSF.fits'),
os.path.join(os.environ['AXE_IMAGE_PATH'],'input_cat_verifyIV.dat'),
os.path.join(os.environ['AXE_CONFIG_PATH'],'verificationConfI.conf'),
inSpec=os.path.join(os.environ['AXE_IMAGE_PATH'],'input_line.spc.fits'),
inModel=os.path.join(os.environ['AXE_IMAGE_PATH'],'galaxyThumbs.fits'),
inPSF=os.path.join(os.environ['AXE_CONFIG_PATH'],'mef_c4.00000_x-0.38167_y1.08146.fits'))
# go over all objects
for index in range(len(simVals)):
# make sure the difference is small
#self.assertLess(relDiff1, 1.0E-03)
#print simVals[index]['center'], ' <-> ', cogVals[index][1], ' : ', cogVals[index][1]-simVals[index]['center']
#print simVals[index]['fwhm'], ' <-> ', cogVals[index][2]*5.0/2.35482, ' : ', cogVals[index][2]*5.0/2.35482-simVals[index]['fwhm']
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'],simVals[index]['fwhm'], fitVals[index][2]*5.0/2.35482, fitVals[index][2]*5.0/2.35482-simVals[index]['fwhm'])
#print '%.3f <-> %.3f : %.3f %.2f <-> %.2f : %.2f' % (simVals[index]['center'], fitVals[index][1], fitVals[index][1]-simVals[index]['center'], simVals[index]['center'], cogVals[index], cogVals[index]-simVals[index]['center'])
self.assertLess(math.fabs(fitVals[index][1]-simVals[index]['center']), 0.5)
self.assertLess(math.fabs(cogVals[index]-simVals[index]['center']), 0.5)
| nfourmanoit/TIPS | test/test_tips/testverify/testVerifyWave.py | Python | gpl-3.0 | 19,263 | [
"Gaussian"
] | b7f054e2dc24787c3dc6c02e536d8683ff2034eb5b838379c0584ec887f2af66 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import unittest
import inspect
import warnings
from skbio.util import classproperty, overrides
from skbio.util._decorator import (stable, experimental, deprecated,
_state_decorator)
from skbio.util._exception import OverrideError
class TestOverrides(unittest.TestCase):
def test_raises_when_missing(self):
class A(object):
pass
with self.assertRaises(OverrideError):
class B(A):
@overrides(A)
def test(self):
pass
def test_doc_inherited(self):
class A(object):
def test(self):
"""Docstring"""
pass
class B(A):
@overrides(A)
def test(self):
pass
self.assertEqual(B.test.__doc__, "Docstring")
def test_doc_not_inherited(self):
class A(object):
def test(self):
"""Docstring"""
pass
class B(A):
@overrides(A)
def test(self):
"""Different"""
pass
self.assertEqual(B.test.__doc__, "Different")
class TestClassProperty(unittest.TestCase):
def test_getter_only(self):
class Foo(object):
_foo = 42
@classproperty
def foo(cls):
return cls._foo
# class-level getter
self.assertEqual(Foo.foo, 42)
# instance-level getter
f = Foo()
self.assertEqual(f.foo, 42)
with self.assertRaises(AttributeError):
f.foo = 4242
class TestStabilityState(unittest.TestCase):
# the indentation spacing gets weird, so I'm defining the
# input doc string explicitly and adding it after function
# defintion
_test_docstring = (" Add 42, or something else, to x.\n"
"\n"
" Parameters\n"
" ----------\n"
" x : int, x\n"
" y : int, optional\n")
class TestBase(TestStabilityState):
def test_get_indentation_level(self):
c = _state_decorator()
self.assertEqual(c._get_indentation_level([]), 0)
self.assertEqual(
c._get_indentation_level([], default_no_existing_docstring=3), 3)
self.assertEqual(c._get_indentation_level([""]), 4)
self.assertEqual(
c._get_indentation_level([""], default_existing_docstring=3), 3)
in_ = (["summary"])
self.assertEqual(c._get_indentation_level(in_), 4)
in_ = (["summary", "", "", " ", "", " ", ""])
self.assertEqual(c._get_indentation_level(in_), 4)
in_ = (["summary", " More indentation", " Less indentation"])
self.assertEqual(c._get_indentation_level(in_), 5)
def test_update_docstring(self):
c = _state_decorator()
in_ = None
exp = ("""State: Test!!""")
self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
in_ = """"""
exp = ("""\n\n State: Test!!""")
self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
in_ = ("""Short summary\n\n Parameters\n\n----------\n """
"""x : int\n""")
exp = ("""Short summary\n\n State: Test!!\n\n"""
""" Parameters\n\n----------\n x : int\n""")
self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
in_ = ("""Short summary\n\n Parameters\n\n----------\n """
"""x : int\n""")
exp = ("""Short summary\n\n State: Test!!\n\n"""
""" Parameters\n\n----------\n x : int\n""")
self.assertEqual(c._update_docstring(in_, "Test!!"), exp)
in_ = ("""Short summary\n\n Parameters\n\n----------\n """
"""x : int\n""")
exp = ("""Short summary\n\n State: Test!!Test!!Test!!Test!!Test!!"""
"""Test!!Test!!Test!!Test!!Test!!Test!!Te\n st!!T"""
"""est!!Test!!Test!!Test!!Test!!Test!!Test!!Test!!\n\n"""
""" Parameters\n\n----------\n x : int\n""")
self.assertEqual(c._update_docstring(in_, "Test!!"*20), exp)
class TestStable(TestStabilityState):
def _get_f(self, as_of):
def f(x, y=42):
return x + y
f.__doc__ = self._test_docstring
f = stable(as_of=as_of)(f)
return f
def test_function_output(self):
f = self._get_f('0.1.0')
self.assertEqual(f(1), 43)
def test_function_docstring(self):
f = self._get_f('0.1.0')
e1 = (" Add 42, or something else, to x.\n\n"
" State: Stable as of 0.1.0.\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
f = self._get_f('0.1.1')
e1 = (" Add 42, or something else, to x.\n\n"
" State: Stable as of 0.1.1.\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
def test_function_signature(self):
f = self._get_f('0.1.0')
expected = inspect.ArgSpec(
args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
self.assertEqual(inspect.getargspec(f), expected)
self.assertEqual(f.__name__, 'f')
def test_missing_kwarg(self):
self.assertRaises(ValueError, stable)
self.assertRaises(ValueError, stable, '0.1.0')
class TestExperimental(TestStabilityState):
def _get_f(self, as_of):
def f(x, y=42):
return x + y
f.__doc__ = self._test_docstring
f = experimental(as_of=as_of)(f)
return f
def test_function_output(self):
f = self._get_f('0.1.0')
self.assertEqual(f(1), 43)
def test_function_docstring(self):
f = self._get_f('0.1.0')
e1 = (" Add 42, or something else, to x.\n\n"
" State: Experimental as of 0.1.0.\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
f = self._get_f('0.1.1')
e1 = (" Add 42, or something else, to x.\n\n"
" State: Experimental as of 0.1.1.\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
def test_function_signature(self):
f = self._get_f('0.1.0')
expected = inspect.ArgSpec(
args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
self.assertEqual(inspect.getargspec(f), expected)
self.assertEqual(f.__name__, 'f')
def test_missing_kwarg(self):
self.assertRaises(ValueError, experimental)
self.assertRaises(ValueError, experimental, '0.1.0')
class TestDeprecated(TestStabilityState):
def _get_f(self, as_of, until, reason):
def f(x, y=42):
return x + y
f.__doc__ = self._test_docstring
f = deprecated(as_of=as_of, until=until, reason=reason)(f)
return f
def test_function_output(self):
f = self._get_f('0.1.0', until='0.1.4',
reason='You should now use skbio.g().')
self.assertEqual(f(1), 43)
def test_deprecation_warning(self):
f = self._get_f('0.1.0', until='0.1.4',
reason='You should now use skbio.g().')
# adapted from SO example here: http://stackoverflow.com/a/3892301
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
f(1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
expected_str = "is deprecated as of scikit-bio version 0.1.0"
self.assertTrue(expected_str in str(w[0].message))
def test_function_docstring(self):
f = self._get_f('0.1.0', until='0.1.4',
reason='You should now use skbio.g().')
e1 = (" Add 42, or something else, to x.\n\n"
" .. note:: Deprecated as of 0.1.0 for "
"removal in 0.1.4. You should now use\n"
" skbio.g().\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
f = self._get_f('0.1.1', until='0.1.5',
reason='You should now use skbio.h().')
e1 = (" Add 42, or something else, to x.\n\n"
" .. note:: Deprecated as of 0.1.1 for "
"removal in 0.1.5. You should now use\n"
" skbio.h().\n\n"
" Parameters")
self.assertTrue(f.__doc__.startswith(e1))
def test_function_signature(self):
f = self._get_f('0.1.0', until='0.1.4',
reason='You should now use skbio.g().')
expected = inspect.ArgSpec(
args=['x', 'y'], varargs=None, keywords=None, defaults=(42,))
self.assertEqual(inspect.getargspec(f), expected)
self.assertEqual(f.__name__, 'f')
def test_missing_kwarg(self):
self.assertRaises(ValueError, deprecated)
self.assertRaises(ValueError, deprecated, '0.1.0')
self.assertRaises(ValueError, deprecated, as_of='0.1.0')
self.assertRaises(ValueError, deprecated, as_of='0.1.0', until='0.1.4')
if __name__ == '__main__':
unittest.main()
| Achuth17/scikit-bio | skbio/util/tests/test_decorator.py | Python | bsd-3-clause | 9,683 | [
"scikit-bio"
] | 3ad9d71176fda96bc19616569a9d2b62b7d04cbf728439099cf8b18fde607486 |
# proxy module
from __future__ import absolute_import
from mayavi.preferences.api import *
| enthought/etsproxy | enthought/mayavi/preferences/api.py | Python | bsd-3-clause | 91 | [
"Mayavi"
] | c9a5ad06bace1b13ff12b89d319c8881a63ed9233c6df88f055400fa7745ff54 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from flask import Blueprint
from flask_appbuilder import BaseView as AppBuilderBaseView, expose
from airflow.executors.base_executor import BaseExecutor
# Importing base classes that we need to derive
from airflow.hooks.base import BaseHook
from airflow.models.baseoperator import BaseOperator
# This is the class you derive to create a plugin
from airflow.plugins_manager import AirflowPlugin
from airflow.sensors.base import BaseSensorOperator
from airflow.timetables.interval import CronDataIntervalTimetable
from tests.test_utils.mock_operators import (
AirflowLink,
AirflowLink2,
CustomBaseIndexOpLink,
CustomOpLink,
GithubLink,
GoogleLink,
)
# Will show up under airflow.hooks.test_plugin.PluginHook
class PluginHook(BaseHook):
pass
# Will show up under airflow.operators.test_plugin.PluginOperator
class PluginOperator(BaseOperator):
pass
# Will show up under airflow.sensors.test_plugin.PluginSensorOperator
class PluginSensorOperator(BaseSensorOperator):
pass
# Will show up under airflow.executors.test_plugin.PluginExecutor
class PluginExecutor(BaseExecutor):
pass
# Will show up under airflow.macros.test_plugin.plugin_macro
def plugin_macro():
pass
# Creating a flask appbuilder BaseView
class PluginTestAppBuilderBaseView(AppBuilderBaseView):
default_view = "test"
@expose("/")
def test(self):
return self.render_template("test_plugin/test.html", content="Hello galaxy!")
v_appbuilder_view = PluginTestAppBuilderBaseView()
v_appbuilder_package = {"name": "Test View", "category": "Test Plugin", "view": v_appbuilder_view}
v_nomenu_appbuilder_package = {"view": v_appbuilder_view}
# Creating flask appbuilder Menu Items
appbuilder_mitem = {
"name": "Google",
"href": "https://www.google.com",
"category": "Search",
}
appbuilder_mitem_toplevel = {
"name": "apache",
"href": "https://www.apache.org/",
"label": "The Apache Software Foundation",
}
# Creating a flask blueprint to integrate the templates and static folder
bp = Blueprint(
"test_plugin",
__name__,
template_folder='templates', # registers airflow/plugins/templates as a Jinja template folder
static_folder='static',
static_url_path='/static/test_plugin',
)
# Extend an existing class to avoid the need to implement the full interface
class CustomCronDataIntervalTimetable(CronDataIntervalTimetable):
pass
# Defining the plugin class
class AirflowTestPlugin(AirflowPlugin):
name = "test_plugin"
operators = [PluginOperator]
sensors = [PluginSensorOperator]
hooks = [PluginHook]
executors = [PluginExecutor]
macros = [plugin_macro]
flask_blueprints = [bp]
appbuilder_views = [v_appbuilder_package]
appbuilder_menu_items = [appbuilder_mitem, appbuilder_mitem_toplevel]
global_operator_extra_links = [
AirflowLink(),
GithubLink(),
]
operator_extra_links = [GoogleLink(), AirflowLink2(), CustomOpLink(), CustomBaseIndexOpLink(1)]
timetables = [CustomCronDataIntervalTimetable]
class MockPluginA(AirflowPlugin):
name = 'plugin-a'
class MockPluginB(AirflowPlugin):
name = 'plugin-b'
class MockPluginC(AirflowPlugin):
name = 'plugin-c'
class AirflowTestOnLoadPlugin(AirflowPlugin):
name = 'preload'
def on_load(self, *args, **kwargs):
self.name = 'postload'
| apache/incubator-airflow | tests/plugins/test_plugin.py | Python | apache-2.0 | 4,156 | [
"Galaxy"
] | f3ce582a7a2f3f05604bf23844b4c288ff687c6fef5f0941597deb63849a7e43 |
# coding: utf8
"""
Tests for the HTTP response section of the ASGI spec
"""
from __future__ import unicode_literals
from unittest import TestCase
from asgiref.inmemory import ChannelLayer
from hypothesis import given
from twisted.test import proto_helpers
from daphne.http_protocol import HTTPFactory
from . import factories, http_strategies, testcases
class TestHTTPResponseSpec(testcases.ASGITestCase):
def test_minimal_response(self):
"""
Smallest viable example. Mostly verifies that our response building works.
"""
message = {'status': 200}
response = factories.response_for_message(message)
self.assert_valid_http_response_message(message, response)
self.assertIn(b'200 OK', response)
# Assert that the response is the last of the chunks.
# N.b. at the time of writing, Daphne did not support multiple response chunks,
# but still sends with Transfer-Encoding: chunked if no Content-Length header
# is specified (and maybe even if specified).
self.assertTrue(response.endswith(b'0\r\n\r\n'))
def test_status_code_required(self):
"""
Asserts that passing in the 'status' key is required.
Previous versions of Daphne did not enforce this, so this test is here
to make sure it stays required.
"""
with self.assertRaises(ValueError):
factories.response_for_message({})
def test_status_code_is_transmitted(self):
"""
Tests that a custom status code is present in the response.
We can't really use hypothesis to test all sorts of status codes, because a lot
of them have meaning that is respected by Twisted. E.g. setting 204 (No Content)
as a status code results in Twisted discarding the body.
"""
message = {'status': 201} # 'Created'
response = factories.response_for_message(message)
self.assert_valid_http_response_message(message, response)
self.assertIn(b'201 Created', response)
@given(body=http_strategies.http_body())
def test_body_is_transmitted(self, body):
message = {'status': 200, 'content': body.encode('ascii')}
response = factories.response_for_message(message)
self.assert_valid_http_response_message(message, response)
@given(headers=http_strategies.headers())
def test_headers(self, headers):
# The ASGI spec requires us to lowercase our header names
message = {'status': 200, 'headers': [(name.lower(), value) for name, value in headers]}
response = factories.response_for_message(message)
# The assert_ method does the heavy lifting of checking that headers are
# as expected.
self.assert_valid_http_response_message(message, response)
@given(
headers=http_strategies.headers(),
body=http_strategies.http_body()
)
def test_kitchen_sink(self, headers, body):
"""
This tests tries to let Hypothesis find combinations of variables that result
in breaking our assumptions. But responses are less exciting than responses,
so there's not a lot going on here.
"""
message = {
'status': 202, # 'Accepted'
'headers': [(name.lower(), value) for name, value in headers],
'content': body.encode('ascii')
}
response = factories.response_for_message(message)
self.assert_valid_http_response_message(message, response)
class TestHTTPResponse(TestCase):
"""
Tests that the HTTP protocol class correctly generates and parses messages.
"""
def setUp(self):
self.channel_layer = ChannelLayer()
self.factory = HTTPFactory(self.channel_layer, send_channel="test!")
self.proto = self.factory.buildProtocol(('127.0.0.1', 0))
self.tr = proto_helpers.StringTransport()
self.proto.makeConnection(self.tr)
def test_http_disconnect_sets_path_key(self):
"""
Tests http disconnect has the path key set, see https://channels.readthedocs.io/en/latest/asgi.html#disconnect
"""
# Send a simple request to the protocol
self.proto.dataReceived(
b"GET /te%20st-%C3%A0/?foo=bar HTTP/1.1\r\n" +
b"Host: anywhere.com\r\n" +
b"\r\n"
)
# Get the request message
_, message = self.channel_layer.receive(["http.request"])
# Send back an example response
self.factory.dispatch_reply(
message['reply_channel'],
{
"status": 200,
"status_text": b"OK",
"content": b"DISCO",
}
)
# Get the disconnection notification
_, disconnect_message = self.channel_layer.receive(["http.disconnect"])
self.assertEqual(disconnect_message['path'], "/te st-à/")
| maikhoepfel/daphne | daphne/tests/test_http_response.py | Python | bsd-3-clause | 4,907 | [
"exciting"
] | 6a7822581dce53c840c13bc5d809346a311aee21fd308dc1e1f4652d3e8db5eb |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2013 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Parameters and system data for applications"""
from decimal import Decimal
import logging
from kiwi.datatypes import ValidationError
from kiwi.python import namedAny
from stoqdrivers.enum import TaxType
from stoqlib.database.runtime import get_default_store
from stoqlib.domain.parameter import ParameterData
from stoqlib.enums import (LatePaymentPolicy, ReturnPolicy,
ChangeSalespersonPolicy)
from stoqlib.l10n.l10n import get_l10n_field
from stoqlib.lib.barcode import BarcodeInfo
from stoqlib.lib.countries import get_countries
from stoqlib.lib.defaults import MAX_INT
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.lib.validators import (validate_int,
validate_decimal,
validate_area_code,
validate_percentage)
_ = stoqlib_gettext
log = logging.getLogger(__name__)
def _credit_limit_salary_changed(new_value, store):
from stoqlib.domain.person import Client
old_value = sysparam.get_decimal('CREDIT_LIMIT_SALARY_PERCENT')
if new_value == old_value:
return
new_value = Decimal(new_value)
Client.update_credit_limit(new_value, store)
class ParameterDetails(object):
def __init__(self, key, group, short_desc, long_desc, type,
initial=None, options=None, combo_data=None, range=None,
multiline=False, validator=None, onupgrade=None,
change_callback=None, editor=None, wrap=True, allow_none=False):
self.key = key
self.group = group
self.short_desc = short_desc
self.long_desc = long_desc
self.type = type
self.initial = initial
self.options = options
self.combo_data = combo_data
self.range = range
self.multiline = multiline
self.validator = validator
if onupgrade is None:
onupgrade = initial
self.onupgrade = onupgrade
self.change_callback = change_callback
self.editor = editor
self.wrap = wrap
self.allow_none = allow_none
#
# Public API
#
def get_parameter_type(self):
if isinstance(self.type, basestring):
return namedAny('stoqlib.domain.' + self.type)
else:
return self.type
def get_parameter_validator(self):
return self.validator or self._get_generic_parameter_validator()
def get_change_callback(self):
return self.change_callback
#
# Staticmethods
#
@staticmethod
def validate_int(value):
if not validate_int(value):
return ValidationError(_("This parameter only accepts "
"integer values."))
@staticmethod
def validate_decimal(value):
if not validate_decimal(value):
return ValidationError(_("This parameter only accepts "
"decimal values."))
@staticmethod
def validate_area_code(code):
if not validate_area_code(code):
return ValidationError(_("'%s' is not a valid area code.\n"
"Valid area codes are on 10-99 range.")
% code)
@staticmethod
def validate_percentage(value):
if not validate_percentage(value):
return ValidationError(_("'%s' is not a valid percentage.")
% value)
@staticmethod
def validate_state(value):
state_l10n = get_l10n_field('state')
if not state_l10n.validate(value):
return ValidationError(
_("'%s' is not a valid %s.")
% (value, state_l10n.label.lower(), ))
@staticmethod
def validate_city(value):
city_l10n = get_l10n_field('city')
state = sysparam.get_string('STATE_SUGGESTED')
country = sysparam.get_string('COUNTRY_SUGGESTED')
if not city_l10n.validate(value, state=state, country=country):
return ValidationError(_("'%s' is not a valid %s.") %
(value, city_l10n.label.lower()))
#
# Private API
#
def _get_generic_parameter_validator(self):
p_type = self.get_parameter_type()
if issubclass(p_type, int):
return ParameterDetails.validate_int
elif issubclass(p_type, Decimal):
return ParameterDetails.validate_decimal
_details = [
ParameterDetails(
u'EDIT_CODE_PRODUCT',
_(u'Products'),
_(u'Disable edit code products'),
_(u'Disable edit code products on purchase application'),
bool, initial=False),
ParameterDetails(
u'MAIN_COMPANY',
_(u'General'),
_(u'Primary company'),
_(u'The primary company which is the owner of all other '
u'branch companies'),
u'person.Branch'),
ParameterDetails(
u'CUSTOM_LOGO_FOR_REPORTS',
_(u'General'),
_(u'Custom logotype for reports'),
_(u'Defines a custom logo for all the reports generated by Stoq. '
u'The recommended image dimension is 170x65 (pixels), if needed, '
u'the image will be resized. In order to use the default logotype '
u'leave this field blank'),
u'image.Image'),
ParameterDetails(
u'DISABLE_COOKIES',
_(u'General'),
_(u'Disable cookies'),
_(u'Disable the ability to use cookies in order to automatic log in '
u'the system. If so, all the users will have to provide the password '
u'everytime they log in. Requires restart to take effect.'),
bool, initial=False),
ParameterDetails(
u'DEFAULT_SALESPERSON_ROLE',
_(u'Sales'),
_(u'Default salesperson role'),
_(u'Defines which of the employee roles existent in the system is the '
u'salesperson role'),
u'person.EmployeeRole'),
# FIXME: s/SUGGESTED/DEFAULT/
ParameterDetails(
u'SUGGESTED_SUPPLIER',
_(u'Purchase'),
_(u'Suggested supplier'),
_(u'The supplier suggested when we are adding a new product in the '
u'system'),
u'person.Supplier', allow_none=True),
ParameterDetails(
u'SUGGESTED_UNIT',
_(u'Purchase'),
_(u'Suggested unit'),
_(u'The unit suggested when we are adding a new product in the '
u'system'),
u'sellable.SellableUnit'),
ParameterDetails(
u'ALLOW_OUTDATED_OPERATIONS',
_(u'General'),
_(u'Allow outdated operations'),
_(u'Allows the inclusion of purchases and payments done previously than the '
u'current date.'),
bool, initial=False),
ParameterDetails(
u'DELIVERY_SERVICE',
_(u'Sales'),
_(u'Delivery service'),
_(u'The default delivery service in the system.'),
u'service.Service'),
# XXX This parameter is POS-specific. How to deal with that
# in a better way?
ParameterDetails(
u'POS_FULL_SCREEN',
_(u'Sales'),
_(u'Show POS application in Fullscreen'),
_(u'Once this parameter is set the Point of Sale application '
u'will be showed as full screen'),
bool, initial=False),
ParameterDetails(
u'POS_SEPARATE_CASHIER',
_(u'Sales'),
_(u'Exclude cashier operations in Point of Sale'),
_(u'If you have a computer that will be a Point of Sales and have a '
u'fiscal printer connected, set this False, so the Till menu will '
u'appear on POS. If you prefer to separate the Till menu from POS '
u'set this True.'),
bool, initial=False),
ParameterDetails(
u'ENABLE_DOCUMENT_ON_INVOICE',
_(u'ECF'),
_(u'Enable document on invoice'),
_(u'Once this parameter is set, we will confirm the client document '
u'when registering a fiscal coupon.'),
bool, initial=False),
ParameterDetails(
u'DEFAULT_PAYMENT_METHOD',
_(u'Sales'),
_(u'Default payment method selected'),
_(u'The default method to select when doing a checkout on POS'),
u'payment.method.PaymentMethod'),
ParameterDetails(
u'ALLOW_CANCEL_CONFIRMED_SALES',
_(u'Sales'),
_(u'Allow to cancel confirmed sales'),
_(u'When this parameter is True, allow the user to cancel confirmed and'
u' paid sales'),
bool, initial=False),
ParameterDetails(
u'CITY_SUGGESTED',
_(u'General'),
_(u'Default city'),
_(u'When adding a new address for a certain person we will always '
u'suggest this city.'),
unicode, initial=u'São Carlos',
validator=ParameterDetails.validate_city),
ParameterDetails(
u'STATE_SUGGESTED',
_(u'General'),
_(u'Default state'),
_(u'When adding a new address for a certain person we will always '
u'suggest this state.'),
unicode, initial=u'SP', validator=ParameterDetails.validate_state),
ParameterDetails(
u'COUNTRY_SUGGESTED',
_(u'General'),
_(u'Default country'),
_(u'When adding a new address for a certain person we will always '
u'suggest this country.'),
# FIXME: When fixing bug 5100, change this to BR
unicode, initial=u'Brazil', combo_data=get_countries),
ParameterDetails(
u'ALLOW_REGISTER_NEW_LOCATIONS',
_(u'General'),
_(u'Allow registration of new city locations'),
# Change the note here when we have more locations to reflect it
_(u'Allow to register new city locations. A city location is a '
u'single set of a country + state + city.\n'
u'NOTE: Right now this will only work for brazilian locations.'),
bool, initial=False),
ParameterDetails(
u'HAS_DELIVERY_MODE',
_(u'Sales'),
_(u'Has delivery mode'),
_(u'Does this branch work with delivery service? If not, the '
u'delivery option will be disable on Point of Sales Application.'),
bool, initial=True),
ParameterDetails(
u'SHOW_COST_COLUMN_IN_SALES',
_(u'Sales'),
_(u'Show cost column in sales'),
_(u'should the cost column be displayed when creating a new sale quote.'),
bool, initial=False),
ParameterDetails(
u'MAX_SEARCH_RESULTS',
_(u'General'),
_(u'Max search results'),
_(u'The maximum number of results we must show after searching '
u'in any dialog.'),
int, initial=600, range=(1, MAX_INT)),
ParameterDetails(
u'CONFIRM_SALES_ON_TILL',
_(u'Sales'),
_(u'Confirm sales in Till'),
_(u'Once this parameter is set, the sales confirmation are only made '
u'on till application and the fiscal coupon will be printed on '
u'that application instead of Point of Sales'),
bool, initial=False),
ParameterDetails(
u'CONFIRM_QTY_ON_BARCODE_ACTIVATE',
_(u'Sales'),
_(u'Requires confirmation of quantity after barcode activation'),
_(u'The system will always require the quantity of products '
u'before adding a sale item on Point of Sale'),
bool, initial=False),
ParameterDetails(
u'ACCEPT_CHANGE_SALESPERSON',
_(u'Sales'),
_(u'Change salesperson'),
_(u'Determines weather we are able or not to change the salesperson '
u'on the sale checkout dialog. Both "Allowed" and "Disallowed" will '
u'select the current user by default, but only the former will '
u'allow it to be changed. "Choose" will force the current user to '
u'indicate the correct salesperson.'),
int, initial=int(ChangeSalespersonPolicy.DISALLOW),
options={
int(ChangeSalespersonPolicy.DISALLOW): _(u'Disallowed'),
int(ChangeSalespersonPolicy.ALLOW): _(u"Allowed"),
int(ChangeSalespersonPolicy.FORCE_CHOOSE): _(u'Choose'),
}),
ParameterDetails(
u'RETURN_POLICY_ON_SALES',
_(u'Sales'),
_(u'Return policy on sales'),
_(u'This parameter sets if the salesperson must return money, credit '
u'or if the client can choose when there is overpaid values in '
u'sales.'),
int, initial=int(ReturnPolicy.CLIENT_CHOICE),
options={
int(ReturnPolicy.CLIENT_CHOICE): _(u"Client's choice"),
int(ReturnPolicy.RETURN_MONEY): _(u'Always return money'),
int(ReturnPolicy.RETURN_CREDIT): _(u'Always create credit for '
u'future sales'),
}),
ParameterDetails(
u'ACCEPT_SALE_RETURN_WITHOUT_DOCUMENT',
_(u'Sales'),
_(u'Allow sale return from clients without document'),
_(u'If this parameter is set it will not be possible to accept '
u'returned sales from clients without document.'),
bool, initial=True),
ParameterDetails(
u'MAX_SALE_DISCOUNT',
_(u'Sales'),
_(u'Max discount for sales'),
_(u'The max discount for salesperson in a sale'),
Decimal, initial=5, range=(0, 100),
validator=ParameterDetails.validate_percentage),
ParameterDetails(
u'REUTILIZE_DISCOUNT',
_(u'Sales'),
_(u'Reutilize not used discounts on sale quotes'),
_(u'Whether we should reutilize the discount not used on some '
u'products to other products. For instance, if two products with '
u'a price of 100,00 are on a sale, and they both have a max '
u'discount of 10%, that means we could sell each one for 90,00. '
u'If this parameter is true, we could still sell one of those '
u'items for 100,00 and reutilize it\'s not used discount on the '
u'other product, selling it for 80,00'),
bool, initial=False),
ParameterDetails(
u'SALE_PAY_COMMISSION_WHEN_CONFIRMED',
_(u'Sales'),
_(u'Commission Payment At Sale Confirmation'),
_(u'Define whether the commission is paid when a sale is confirmed. '
u'If True pay the commission when a sale is confirmed, '
u'if False, pay a relative commission for each commission when '
u'the sales payment is paid.'),
bool, initial=False),
ParameterDetails(
u'ALLOW_TRADE_NOT_REGISTERED_SALES',
_(u"Sales"),
_(u"Allow trade not registered sales"),
_(u"If this is set to True, you will be able to trade products "
u"from sales not registered on Stoq. Use this option only if "
u"you need to trade itens sold on other stores."),
bool, initial=False),
ParameterDetails(
u'USE_TRADE_AS_DISCOUNT',
_(u'Sales'),
_(u'Use trade value as a discount'),
_(u'The traded value will be used as discount '
u'when confirm a new sale. Otherwise, the trade '
u'will be registred as a new payment for that new sale.'),
bool, initial=False),
ParameterDetails(
u'DEFAULT_OPERATION_NATURE',
_(u'Sales'),
_(u'Default operation nature'),
_(u'When adding a new sale quote, we will always suggest '
u'this operation nature'),
unicode, initial=_(u'Sale')),
ParameterDetails(
u'ASK_SALES_CFOP',
_(u'Sales'),
_(u'Ask for Sale Order C.F.O.P.'),
_(u'Once this parameter is set to True we will ask for the C.F.O.P. '
u'when creating new sale orders'),
bool, initial=False),
ParameterDetails(
u'ALLOW_HIGHER_SALE_PRICE',
_(u'Sales'),
_(u'Allow product sale with a higher price'),
_(u'When this parameter is set, we will allow the sales person to add '
u'items to a quote with a price higher than the default price for '
u'the product.'),
bool, initial=True),
ParameterDetails(
u'DEFAULT_SALES_CFOP',
_(u'Sales'),
_(u'Default Sales C.F.O.P.'),
_(u'Default C.F.O.P. (Fiscal Code of Operations) used when generating '
u'fiscal book entries.'),
u'fiscal.CfopData'),
ParameterDetails(
u'DEFAULT_RETURN_SALES_CFOP',
_(u'Sales'),
_(u'Default Return Sales C.F.O.P.'),
_(u'Default C.F.O.P. (Fiscal Code of Operations) used when returning '
u'sale orders '),
u'fiscal.CfopData'),
ParameterDetails(
u'TOLERANCE_FOR_LATE_PAYMENTS',
_(u'Sales'),
_(u'Tolerance for a payment to be considered as a late payment.'),
_(u'How many days Stoq should allow a client to not pay a late '
u'payment without considering it late.'),
int, initial=0, range=(0, 365)),
ParameterDetails(
u'EXPIRATION_SALE_QUOTE_DATE',
_(u'Sales'),
_(u'Period of time in days to calculate expiration date of a sale quote'),
_(u'How many days Stoq should consider to calculate the default '
u'expiration day of a sale quote'),
int, initial=0, range=(0, 365)),
ParameterDetails(
u'LATE_PAYMENTS_POLICY',
_(u'Sales'),
_(u'Policy for customers with late payments.'),
_(u'How should Stoq behave when creating a new sale for a client with '
u'late payments'),
int, initial=int(LatePaymentPolicy.ALLOW_SALES),
options={int(LatePaymentPolicy.ALLOW_SALES): _(u'Allow sales'),
int(LatePaymentPolicy.DISALLOW_STORE_CREDIT):
_(u'Allow sales except with store credit'),
int(LatePaymentPolicy.DISALLOW_SALES): _(u'Disallow sales')}),
ParameterDetails(
u'CHANGE_CLIENT_AFTER_CONFIRMED',
_(u'Sales'),
_(u'Allow client change after sale\'s confirmation'),
_(u'This parameter allows to change the client after a sale\'s confirmation.'),
bool, initial=False),
ParameterDetails(
u'CHANGE_SALESPERSON_AFTER_CONFIRMED',
_(u'Sales'),
_(u'Allow salesperson change after sale\'s confirmation'),
_(u'This parameter allows to change the salesperson after a sale\'s confirmation.'),
bool, initial=False),
ParameterDetails(
u'DEFAULT_RECEIVING_CFOP',
_(u'Purchase'),
_(u'Default Receiving C.F.O.P.'),
_(u'Default C.F.O.P. (Fiscal Code of Operations) used when receiving '
u'products in the stock application.'),
u'fiscal.CfopData'),
ParameterDetails(
u'DEFAULT_STOCK_DECREASE_CFOP',
_(u'Stock'),
_(u'Default C.F.O.P. for Stock Decreases'),
_(u'Default C.F.O.P. (Fiscal Code of Operations) used when performing a '
u'manual stock decrease.'),
u'fiscal.CfopData'),
ParameterDetails(
u'ICMS_TAX',
_(u'Sales'),
_(u'Default ICMS tax'),
_(u'Default ICMS to be applied on all the products of a sale. ') + u' ' +
_(u'This is a percentage value and must be between 0 and 100.') + u' ' +
_(u'E.g: 18, which means 18% of tax.'),
Decimal, initial=18, range=(0, 100),
validator=ParameterDetails.validate_percentage),
ParameterDetails(
u'ISS_TAX',
_(u'Sales'),
_(u'Default ISS tax'),
_(u'Default ISS to be applied on all the services of a sale. ') + u' ' +
_(u'This is a percentage value and must be between 0 and 100.') + u' ' +
_(u'E.g: 12, which means 12% of tax.'),
Decimal, initial=18, range=(0, 100),
validator=ParameterDetails.validate_percentage),
ParameterDetails(
u'SUBSTITUTION_TAX',
_(u'Sales'),
_(u'Default Substitution tax'),
_(u'The tax applied on all sale products with substitution tax type.') +
u' ' +
_(u'This is a percentage value and must be between 0 and 100.') + u' ' +
_(u'E.g: 16, which means 16% of tax.'),
Decimal, initial=18, range=(0, 100),
validator=ParameterDetails.validate_percentage),
ParameterDetails(
u'DEFAULT_AREA_CODE',
_(u'General'),
_(u'Default area code'),
_(u'This is the default area code which will be used when '
u'registering new clients, users and more to the system'),
int, initial=16,
validator=ParameterDetails.validate_area_code),
ParameterDetails(
u'CREDIT_LIMIT_SALARY_PERCENT',
_(u'General'),
_(u"Client's credit limit automatic calculation"),
_(u"This is used to calculate the client's credit limit according"
u"to the client's salary. If this percent is changed it will "
u"automatically recalculate the credit limit for all clients."),
Decimal, initial=0, range=(0, 100),
validator=ParameterDetails.validate_percentage,
change_callback=_credit_limit_salary_changed),
ParameterDetails(
u'DEFAULT_PRODUCT_TAX_CONSTANT',
_(u'Sales'),
_(u'Default tax constant for products'),
_(u'This is the default tax constant which will be used '
u'when adding new products to the system'),
u'sellable.SellableTaxConstant'),
ParameterDetails(
u'SUGGEST_BATCH_NUMBER',
_(u'General'),
_(u'Suggest batch number'),
_(u"If false, you should enter the batch number by hand. That's "
u"useful if the batch number is already present on the barcode "
u"of the products for instance. If true a sequencial number will "
u"be used for suggestion when registering new batches. That's "
u"useful if you generate your own batches."),
bool, initial=False),
ParameterDetails(
u'LABEL_TEMPLATE_PATH',
_(u'General'),
_(u'Glabels template file'),
_(u'The glabels file that will be used to print the labels. Check the '
u'documentation to see how to setup this file.'),
unicode, initial=u"", editor='file-chooser'),
ParameterDetails(
u'CAT52_DEST_DIR',
_(u'General'),
_(u'Cat 52 destination directory'),
_(u'Where the file generated after a Z-reduction should be saved.'),
unicode, initial=u'~/.stoq/cat52', editor='directory-chooser'),
ParameterDetails(
u'COST_PRECISION_DIGITS',
_(u'General'),
_(u'Number of digits to use for product cost'),
_(u'Set this parameter accordingly to the number of digits of the '
u'products you purchase'),
int, initial=2, range=(2, 8)),
ParameterDetails(
u'SCALE_BARCODE_FORMAT',
_(u'Sales'),
_(u'Scale barcode format'),
_(u'Format used by the barcode printed by the scale. This format always'
u' starts with 2 followed by 4,5 or 6 digits product code and by a 5'
u' digit weight or a 6 digit price. Check or scale documentation and'
u' configuration to see the best option.'),
int, initial=0,
options=BarcodeInfo.options),
ParameterDetails(
u'NFE_SERIAL_NUMBER',
_(u'NF-e'),
_(u'Fiscal document serial number'),
_(u'Fiscal document serial number. Fill with 0 if the NF-e have no '
u'series. This parameter only has effect if the nfe plugin is enabled.'),
int, initial=1),
ParameterDetails(
u'NFE_DANFE_ORIENTATION',
_(u'NF-e'),
_(u'Danfe printing orientation'),
_(u'Orientation to use for printing danfe. Portrait or Landscape'),
int, initial=0,
options={0: _(u'Portrait'),
1: _(u'Landscape')}),
ParameterDetails(
u'NFE_FISCO_INFORMATION',
_(u'NF-e'),
_(u'Additional Information for the Fisco'),
_(u'Additional information to add to the NF-e for the Fisco'), unicode,
initial=(u'Documento emitido por ME ou EPP optante pelo SIMPLES '
u'NACIONAL. Não gera Direito a Crédito Fiscal de ICMS e de '
u'ISS. Conforme Lei Complementar 123 de 14/12/2006.'),
multiline=True),
ParameterDetails(
u'BANKS_ACCOUNT',
_(u'Accounts'),
_(u'Parent bank account'),
_(u'Newly created bank accounts will be placed under this account.'),
u'account.Account'),
ParameterDetails(
u'TILLS_ACCOUNT',
_(u'Accounts'),
_(u'Parent till account'),
_(u'Till account transfers will be placed under this account'),
u'account.Account'),
ParameterDetails(
u'IMBALANCE_ACCOUNT',
_(u'Accounts'),
_(u'Imbalance account'),
_(u'Account used for unbalanced transactions'),
u'account.Account'),
ParameterDetails(
u'DEMO_MODE',
_(u'General'),
_(u'Demonstration mode'),
_(u'If Stoq is used in a demonstration mode'),
bool, initial=False),
ParameterDetails(
u'BLOCK_INCOMPLETE_PURCHASE_PAYMENTS',
_(u'Payments'),
_(u'Block incomplete purchase payments'),
_(u'Do not allow confirming a account payable if the purchase is not '
u'completely received.'),
bool, initial=False),
ParameterDetails(
u'CREATE_PAYMENTS_ON_STOCK_DECREASE',
_(u'Payments'),
_(u'Create payments for a stock decrease'),
_(u'When this paramater is True, Stoq will allow to create payments for '
u'stock decreases.'),
bool, initial=False),
ParameterDetails(
u'SHOW_TOTAL_PAYMENTS_ON_TILL',
_(u'Till'),
_(u'Show total received payments of the day on till'),
_(u'When this paramater is True, show total of received payments.'),
bool, initial=False),
# This parameter is tricky, we want to ask the user to fill it in when
# upgrading from a previous version, but not if the user installed Stoq
# from scratch. Some of the hacks involved with having 3 boolean values
# ("", True, False) can be removed if we always allow None and treat it like
# and unset value in the database.
ParameterDetails(
u'ONLINE_SERVICES',
_(u'General'),
_(u'Online services'),
_(u'If online services such as upgrade notifications, automatic crash reports '
u'should be enabled.'),
bool, initial=True, onupgrade=u''),
ParameterDetails(
u'BILL_INSTRUCTIONS',
_(u'Sales'),
_(u'Bill instructions '),
# Translators: do not translate $DATE
_(u'When printing bills, include the first 3 lines of these on '
u'the bill itself. This usually includes instructions on how '
u'to pay the bill and the validity and the terms. $DATE will be'
u'replaced with the due date of the bill'),
unicode, multiline=True, initial=u""),
ParameterDetails(
u'BOOKLET_INSTRUCTIONS',
_(u'Sales'),
_(u'Booklet instructions '),
_(u'When printing booklets, include the first 4 lines of these on it. '
u'This usually includes instructions on how to pay the booklet and '
u'the validity and the terms.'),
unicode, multiline=True,
initial=_(u"Payable at any branch on presentation of this booklet")),
ParameterDetails(
u'SMART_LIST_LOADING',
_(u'Smart lists'),
_(u'Load items intelligently from the database'),
_(u'This is useful when you have several thousand items, but it may cause '
u'some problems as it\'s new and untested. If you want to preserve the old '
u'list behavior in the payable and receivable applications, '
u'disable this parameter.'),
bool,
initial=True),
ParameterDetails(
u'LOCAL_BRANCH',
_(u'General'),
_(u'Current branch for this database'),
_(u'When operating with synchronized databases, this parameter will be '
u'used to restrict the data that will be sent to this database.'),
u'person.Branch'),
ParameterDetails(
u'SYNCHRONIZED_MODE',
_(u'General'),
_(u'Synchronized mode operation'),
_(u'This parameter indicates if Stoq is operating with synchronized '
u'databases. When using synchronized databases, some operations with '
u'branches different than the current one will be restriced.'),
bool,
initial=False),
ParameterDetails(
u'PRINT_PROMISSORY_NOTES_ON_BOOKLETS',
_(u'Payments'),
_(u'Printing of promissory notes on booklets'),
_(u'This parameter indicates if Stoq should print promissory notes when'
u' printing booklets for payments.'),
bool,
initial=True),
ParameterDetails(
u'PRINT_PROMISSORY_NOTE_ON_LOAN',
_(u'Sales'),
_(u'Printing of promissory notes on loans'),
_(u'This parameter indicates if Stoq should print a promissory note '
u'when printing a loan receipt.'),
bool, initial=False),
ParameterDetails(
u'PRINT_SALE_DETAILS_ON_POS',
_(u'Sales'),
_(u'Printing of sale details on point of sales'),
_(u'This parameter indicates if Stoq should print the sale details'
u'when finishing a sale on point of sales.'),
bool, initial=False),
ParameterDetails(
u'MANDATORY_CHECK_NUMBER',
_(u'Payments'),
_(u'Mandatory check number'),
_(u'This parameter indicates if the check number on check payments is '
u'mandatory.'),
bool, initial=False),
ParameterDetails(
u'MANDATORY_CARD_AUTH_NUMBER',
_(u'Sales'),
_(u'Set authorization number mandatory'),
_(u'Set the authorization number on card payments as mandatory or not.'),
bool, initial=True),
ParameterDetails(
u'DEFECT_DETECTED_TEMPLATE',
_(u'Work order'),
_(u'Defect detected template for work orders'),
_(u'A template to be used to fill the "Defect detected" field when '
u'creating a new work order.'),
unicode, multiline=True, initial=u""),
ParameterDetails(
u'AUTOMATIC_LOGOUT',
_(u'General'),
_(u'Automatic logout after inactivity period'),
_(u'Set the maximum time in minutes for the user to remain idle, before being '
u'automatically logout. \nSet to zero to disable the funcionality. '
u'Requires restart to take effect.'),
int, initial=0),
ParameterDetails(
u'ALLOW_CANCEL_LAST_COUPON',
_(u'ECF'),
_(u'Allow to cancel the last fiscal coupon'),
_(u'When set to false, the user will not be able to cancel the last coupon, '
u'only return it.'),
bool, initial=True),
ParameterDetails(
u'UPDATE_PRODUCTS_COST_ON_PURCHASE',
_(u'Purchase'),
_(u'Automatic update of products cost when making a new purchase.'),
_(u'When a new purchase is made, it\'s products cost are set to '
u'the purchase\'s items cost.'),
bool, initial=False),
# Some fiscal printers can print up to 8 rows and 70 characters each row.
# But we want to write an documentation to make sure it will work
# on every fiscal printer
ParameterDetails(
u'ADDITIONAL_INFORMATION_ON_COUPON',
_(u'ECF'),
_(u'Additional information on fiscal coupon'),
_(u'This will be printed in the promotional message area of the fiscal coupon\n'
u'IMPORTANT NOTE:\n'
u'This template cannot have more than 2 line, and each line more '
u'than 50 characters, and you have to break it manually using the characters '
u'"\\n" or (enter key) or the fiscal printer may not print it correctly.'),
unicode, multiline=True, initial=u'', wrap=False)
]
class ParameterAccess(object):
"""
API for accessing and updating system parameters
"""
def __init__(self):
# Mapping of details, name -> ParameterDetail
self._details = dict((detail.key, detail) for detail in _details)
self._values_cache = None
# Lazy Mapping of database raw database values, name -> database value
@property
def _values(self):
if self._values_cache is None:
self._values_cache = dict(
(p.field_name, p.field_value)
for p in get_default_store().find(ParameterData))
return self._values_cache
def _create_default_values(self, store):
# Create default values for parameters that take objects
self.set_object_default(store, "CUSTOM_LOGO_FOR_REPORTS", None)
self.set_object_default(store, "LOCAL_BRANCH", None, is_editable=False)
self.set_object_default(store, "MAIN_COMPANY", None)
self.set_object_default(store, "SUGGESTED_SUPPLIER", None)
self.set_object_default(store, "SUGGESTED_UNIT", None)
self._set_default_method_default(store)
self._set_cfop_default(store,
u"DEFAULT_SALES_CFOP",
u"Venda de Mercadoria Adquirida",
u"5.102")
self._set_cfop_default(store,
u"DEFAULT_RETURN_SALES_CFOP",
u"Devolucao",
u"5.202")
self._set_cfop_default(store,
u"DEFAULT_RECEIVING_CFOP",
u"Compra para Comercializacao",
u"1.102")
self._set_cfop_default(store,
u"DEFAULT_STOCK_DECREASE_CFOP",
u"Outra saída de mercadoria ou "
u"prestação de serviço não especificado",
u"5.949")
self._set_delivery_default(store)
self._set_sales_person_role_default(store)
self._set_product_tax_constant_default(store)
def _set_default_method_default(self, store):
from stoqlib.domain.payment.method import PaymentMethod
if self.has_object("DEFAULT_PAYMENT_METHOD"):
return
method = PaymentMethod.get_by_name(store, u'money')
self.set_object(store, u"DEFAULT_PAYMENT_METHOD", method)
def _set_cfop_default(self, store, param_name, description, code):
from stoqlib.domain.fiscal import CfopData
if self.has_object(param_name):
return
data = self.get_object(store, param_name)
if not data:
data = CfopData(code=code, description=description,
store=store)
self.set_object(store, param_name, data)
def _set_sales_person_role_default(self, store):
if self.has_object("DEFAULT_SALESPERSON_ROLE"):
return
from stoqlib.domain.person import EmployeeRole
role = EmployeeRole(name=_(u'Salesperson'),
store=store)
self.set_object(store, "DEFAULT_SALESPERSON_ROLE", role,
is_editable=False)
def _set_product_tax_constant_default(self, store):
if self.has_object("DEFAULT_PRODUCT_TAX_CONSTANT"):
return
from stoqlib.domain.sellable import SellableTaxConstant
tax_constant = SellableTaxConstant.get_by_type(TaxType.NONE, store)
self.set_object(store, "DEFAULT_PRODUCT_TAX_CONSTANT", tax_constant)
def _set_delivery_default(self, store):
if self.has_object("DELIVERY_SERVICE"):
return
from stoqlib.domain.sellable import (Sellable,
SellableTaxConstant)
from stoqlib.domain.service import Service
tax_constant = SellableTaxConstant.get_by_type(TaxType.SERVICE, store)
sellable = Sellable(store=store,
description=_(u'Delivery'))
sellable.tax_constant = tax_constant
service = Service(sellable=sellable, store=store)
self.set_object(store, "DELIVERY_SERVICE", service)
def _verify_detail(self, field_name, expected_type=None):
detail = self._details.get(field_name)
if detail is None:
raise ValueError("%s is not a valid parameter" % (field_name, ))
if expected_type is not None and detail.type != expected_type:
raise ValueError("%s is not a %s parameter" % (
field_name,
expected_type.__name__))
return detail
def _set_param_internal(self, store, param_name, value, expected_type):
param = store.find(ParameterData, field_name=unicode(param_name)).one()
if param is None:
raise ValueError("param_name %s is not a valid parameter" % (
param_name, ))
if value is not None and not type(value) is expected_type:
raise TypeError("%s must be a decimal, not %r" % (
param_name, type(value).__name__))
# bool are represented as 1/0
if expected_type is bool:
value = int(value)
self._values[param_name] = param.field_value = unicode(value)
def _set_default_value(self, store, detail, value):
if value is None:
return
if detail.type is bool:
value = int(value)
if value is not None:
value = unicode(value)
param_name = detail.key
data = self._values.get(param_name)
if data is None:
data = ParameterData(store=store,
field_name=param_name,
field_value=value,
is_editable=True)
self._values[param_name] = data.field_value
data.field_value = value
def _remove_unused_parameters(self, store):
"""
Remove any parameter found in ParameterData table which is not
used any longer.
"""
for param_name in self._values:
if param_name not in self._details:
param = store.find(ParameterData,
field_name=param_name).one()
store.remove(param)
#
# Public API
#
def clear_cache(self):
"""Clears the internal cache so it can be rebuilt on next access"""
self._values_cache = None
def check_parameter_presence(self):
"""
Check so the number of installed parameters are equal to
the number of available ones
:returns: ``True`` if they're up to date, ``False`` otherwise
"""
return len(self._values) == len(self._details)
def ensure_system_parameters(self, store, update=False):
"""
:param update: ``True`` if we're upgrading a database,
otherwise ``False``
"""
# This is called when creating a new database or
# updating an existing one
# Clear cached values to ensure the parameters updates
# will be used correctly. If there any change in name, these values
# will differ from database.
if update:
self.clear_cache()
self._remove_unused_parameters(store)
for detail in self._details.values():
if update and detail.key in self._values:
continue
if update:
default = detail.onupgrade
else:
default = detail.initial
self._set_default_value(store, detail, default)
self._create_default_values(store)
def set_bool(self, store, param_name, value):
"""
Updates a database bool value for a given parameter.
:param store: a database store
:param param_name: the parameter name
:param value: the value to set
:type value: bool
"""
self._verify_detail(param_name, bool)
self._set_param_internal(store, param_name, value, bool)
def get_bool(self, param_name):
"""
Fetches a bool database value.
:param param_name: the parameter name
:returns: the database value
:rtype: bool
"""
detail = self._verify_detail(param_name, bool)
value = self._values.get(param_name)
if value is None:
return detail.initial
return value == u'1'
def set_decimal(self, store, param_name, value):
"""
Updates a database decimal value for a given parameter.
:param store: a database store
:param param_name: the parameter name
:param value: the value to set
:type value: decimal.Decimal
"""
self._verify_detail(param_name, Decimal)
self._set_param_internal(store, param_name, value, Decimal)
def get_decimal(self, param_name):
"""
Fetches a decimal database value.
:param param_name: the parameter name
:returns: the database value
:rtype: decimal.Decimal
"""
detail = self._verify_detail(param_name, Decimal)
value = self._values.get(param_name)
if value is None:
return detail.initial
try:
return Decimal(value)
except ValueError:
return detail.initial
def set_int(self, store, param_name, value):
"""
Updates a database int value for a given parameter.
:param store: a database store
:param param_name: the parameter name
:param value: the value to set
:type value: int
"""
self._verify_detail(param_name, int)
self._set_param_internal(store, param_name, value, int)
def get_int(self, param_name):
"""
Fetches an int database value.
:param param_name: the parameter name
:returns: the database value
:rtype: int
"""
detail = self._verify_detail(param_name, int)
value = self._values.get(param_name)
if value is None:
return detail.initial
try:
return int(value)
except ValueError:
return detail.initial
def set_string(self, store, param_name, value):
"""
Updates a database unicode value for a given parameter.
:param store: a database store
:param param_name: the parameter name
:param value: the value to set
:type value: unicode
"""
self._verify_detail(param_name, unicode)
self._set_param_internal(store, param_name, value, unicode)
def get_string(self, param_name):
"""
Fetches a unicode database value.
:param param_name: the parameter name
:returns: the database value
:rtype: unicode
"""
detail = self._verify_detail(param_name, unicode)
value = self._values.get(param_name)
if value is None:
return detail.initial
return value
def set_object(self, store, param_name, value, is_editable=True):
"""
Updates a database object.
:param store: a database store
:param param_name: the parameter name
:param value: the value to set
:type value: a domain object
:param is_editable: if the parameter can be modified interactivly
"""
detail = self._details.get(param_name)
if detail is None:
raise ValueError("%s is not a valid parameter" % (param_name, ))
field_type = detail.get_parameter_type()
if (value is not None and
not isinstance(value, field_type)):
raise TypeError("%s must be a %s instance, not %r" % (
param_name, field_type.__name__,
type(value).__name__))
param = ParameterData.get_or_create(store, field_name=unicode(param_name))
if value is not None:
value = unicode(value.id)
param.field_value = value
param.is_editable = is_editable
self._values[param_name] = value
def set_object_default(self, store, param_name, value, is_editable=True):
"""
Updates the default value for a database object. This works like
.set_object() but only updates if it doesn't have a value set.
:param store: a database store
:param param_name: the parameter name
:param value: the value to set
:type value: a domain object
:param is_editable: if the parameter can be modified interactivly
"""
if self.has_object(param_name):
return
self.set_object(store, param_name, value, is_editable=is_editable)
def get_object(self, store, param_name):
"""
Fetches an object from the database.
..note..:: This has to query the database to build an object and
it is slower than other getters, avoid it if you can.
:param store: a database store
:param param_name: the parameter name
:returns: the object
"""
detail = self._verify_detail(param_name)
value = self._values.get(param_name)
if value is None:
return detail.initial
field_type = detail.get_parameter_type()
return store.get(field_type, unicode(value))
def get_object_id(self, param_name):
"""
Fetches the database object id
:param param_name: the parameter name
:returns: the object id
"""
self._verify_detail(param_name)
return self._values.get(param_name)
def has_object(self, param_name):
"""
Check if an object is set.
:param param_name: the parameter name
"""
self._verify_detail(param_name)
value = self._values.get(param_name)
return value is not None
def compare_object(self, param_name, other_object):
"""
Compare the currently set value of a parameter with
a specified object.
:param param_name: the parameter name
:param other_object: object to compare
"""
self._verify_detail(param_name)
object_id = self._values.get(param_name)
if object_id is None and other_object is None:
return True
if other_object is None:
return False
# FIXME: Enable this type checking in the future
# if type(other_object) != detail.get_parameter_type():
# raise TypeError("Expected an object of type %s, but got a %s" % (
# detail.get_parameter_type().__name__,
# type(other_object).__name__))
return object_id == other_object.id
def set_value_generic(self, param_name, value):
"""Update the internal cache for a parameter
:param param_name: the parameter name
:param value: value
:type value: unicode
"""
self._values[param_name] = value
def get_detail_by_name(self, param_name):
"""
Returns a ParameterDetails class for the given parameter name
:param param_name: the parameter name
:returns: the detail
"""
detail = self._details.get(param_name)
if detail is None:
raise KeyError("Unknown parameter: %r" % (param_name, ))
return detail
sysparam = ParameterAccess()
| tiagocardosos/stoq | stoqlib/lib/parameters.py | Python | gpl-2.0 | 48,323 | [
"VisIt"
] | 1fbfaf64b100097102fef06ac5715ff2da6ec46cdba84a5723bfe63da50fa04a |
#!/usr/bin/env python
#
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2011 Gerome Fournier <jef(at)foutaise.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ ["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"] ])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'GPL'
__version__ = '0.8.1'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
"""
import sys
import string
try:
if sys.version >= '2.3':
import textwrap
elif sys.version >= '2.2':
from optparse import textwrap
else:
from optik import textwrap
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
try:
True, False
except NameError:
(True, False) = (1, 0)
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if not isinstance(iterable, str):
return iterable.__len__()
try:
return len(unicode(iterable, 'utf'))
except:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
if max_width <= 0:
max_width = False
self._max_width = max_width
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError, "array should contain 4 characters"
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either "a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = map(int, array)
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = map(str, array)
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i,x in enumerate(array):
cells.append(self._str(i,x))
self._rows.append(cells)
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
try:
f = float(x)
except:
return str(x)
n = self._precision
dtype = self._dtype[i]
if dtype == 'i':
return str(int(round(f)))
elif dtype == 'f':
return '%.*f' % (n, f)
elif dtype == 'e':
return '%.*e' % (n, f)
elif dtype == 't':
return str(x)
else:
if f - round(f) == 0:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return str(int(round(f)))
else:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return '%.*f' % (n, f)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError, "array should contain %d elements" \
% self._row_size
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = string.join([horiz * n for n in self._width], s)
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, range(1, len(parts) + 1)):
length = length + len(part)
if i < len(parts):
length = (length/8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, range(len(row))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
items = len(maxi)
length = reduce(lambda x,y: x+y, maxi)
if self._max_width and length + items * 3 + 1 > self._max_width:
maxi = [(self._max_width - items * 3 -1) / items \
for n in range(items)]
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = "c"
if align == "r":
out += "%s " % (fill * space + cell_line)
elif align == "c":
out += "%s " % (fill/2 * space + cell_line \
+ (fill/2 + fill%2) * space)
else:
out += "%s " % (cell_line + fill * space)
if length < len(line):
out += "%s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
try:
c = unicode(c, 'utf')
except UnicodeDecodeError, strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (c, strerror))
c = unicode(c, 'utf', 'replace')
array.extend(textwrap.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, map(len, line_wrapped))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * (missing / 2)
cell.extend([""] * (missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ ["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"] ])
print table.draw() + "\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
| jabbalaci/jabbapylib | jabbapylib/lib/texttable.py | Python | gpl-3.0 | 18,494 | [
"Brian"
] | ea5cd136d78283eed18abc52f5d37f88d6a8b1d3b67429db64e21d458e37d5c5 |
# Copyright (C) 2012-2018
# Max Planck Institute for Polymer Research
# Copyright (C) 2008-2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************************
espressopp.integrator.FreeEnergyCompensation
********************************************
Free Energy Compensation used in Hamiltonian Adaptive Resolution Simulations (H-AdResS) or Path Integral Adaptive Resolution Simulations (PI-AdResS). This works for spherical or slab adaptive resolution geometries. However, it only works for fixed, non-moving atomistic region (otherwise, H-AdResS is not properly defined).
Example:
>>> fec = espressopp.integrator.FreeEnergyCompensation(system, center=[Lx/2, Ly/2, Lz/2])
>>> # set up the fec module with the center in the center of the box
>>> fec.addForce(itype=3,filename="tablefec.xvg",type=typeCG)
>>> # set up the actual force
>>> integrator.addExtension(fec)
>>> # add to previously defined integrator
.. function:: espressopp.integrator.FreeEnergyCompensation(system, center, sphereAdr, ntrotter, slow)
:param system: system object
:param center: (default: [], corresponds to (0.0, 0.0, 0.0) position) center of high resolution region
:param sphereAdr: (default: False) Spherical AdResS region (True) vs. slab geometry with resolution change in x-direction (False)
:param ntrotter: (default: 1) Trotter number when used in Path Integral AdResS. Default leads to normal non-PI-AdResS behaviour.
:param slow: (default: False) When used with RESPA Velocity Verlet, this flag decides whether the Free Energy Compensation is applied together with the slow, less frequently updated forces (slow=True) or with the fast, more frequently updated (slow=False) forces.
:type system: shared_ptr<System>
:type center: list of reals
:type sphereAdr: bool
:type ntrotter: int
:type slow: bool
.. function:: espressopp.integrator.FreeEnergyCompensation.addForce(itype, filename, type)
:param itype: interpolation type 1: linear, 2: Akima, 3: Cubic
:param filename: filename for TD force file
:param type: particle type on which the TD force needs to be applied
:type itype: int
:type filename: string
:type type: int
.. function:: espressopp.integrator.FreeEnergyCompensation.computeCompEnergy()
:rtype: real
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_FreeEnergyCompensation
class FreeEnergyCompensationLocal(ExtensionLocal, integrator_FreeEnergyCompensation):
def __init__(self, system, center=[], sphereAdr = False, ntrotter=1, slow=False):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_FreeEnergyCompensation, system, sphereAdr, ntrotter, slow)
# set center of FreeEnergyCompensation force
if (center != []):
self.cxxclass.setCenter(self, center[0], center[1], center[2])
def addForce(self, itype, filename, type):
"""
Each processor takes the broadcasted interpolation type,
filename and particle type
"""
if pmi.workerIsActive():
self.cxxclass.addForce(self, itype, filename, type)
def computeCompEnergy(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.computeCompEnergy(self)
if pmi.isController :
class FreeEnergyCompensation(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.FreeEnergyCompensationLocal',
pmiproperty = [ 'itype', 'filename'],
pmicall = ['addForce' , 'computeCompEnergy']
)
| govarguz/espressopp | src/integrator/FreeEnergyCompensation.py | Python | gpl-3.0 | 4,641 | [
"ESPResSo"
] | 16d089a02cbb5b8c03fab9e74af0bcdb9f7b259f58252399085bbf981004dceb |
#!/usr/bin/python
#
# Written by Gavin Heverly-Coulson
# Email: gavin <at> quantumgeranium.com
#
# Takes an input cell and uses it to build a potential energy surface following
# the system sheared along the a and b lattice vectors.
# Creates the Quantum ESPRESSO input files for each point on the PES.
#
#
# This work is licensed under a Simplified BSD License
# Copyright (c) 2014, Gavin Heverly-Coulson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import math
from decimal import Decimal
import os
#####################################################
# Calculates the transpose of a matrix
# Works for arbitrary NxM size
def transpose(mat):
cols = len(mat) # number of rows in mat
rows = len(mat[0]) # number of columns in mat
transMat = [x[:] for x in [[None]*cols]*rows] # cols, rows
for a in range(rows):
for b in range(cols):
transMat[a][b] = mat[b][a]
return transMat
#####################################################
# Calculates the determinant of a 3x3 matrix, using the 2x2 sub-matrices method
def det3(mat):
return ( ( mat[0][0]*det2([[mat[1][1], mat[1][2]], [mat[2][1], mat[2][2]]]) ) - ( mat[0][1]*det2([[mat[1][0], mat[1][2]], [mat[2][0], mat[2][2]]]) ) + (mat[0][2]*det2([[mat[1][0], mat[1][1]], [mat[2][0], mat[2][1]]])) )
#####################################################
# Calculates the determinant of a 2x2 matrix
def det2(mat):
return ((mat[0][0]*mat[1][1]) - (mat[0][1]*mat[1][0]))
#####################################################
# a function that takes the cell parameters, in angstrom, and a list of fractional coordinates
# and returns the structure in Cartesian coordinates
#
# Assumes the cellParam matrix is of the form:
# | ax ay az |
# | bx by bz |
# | cx cy cz |
#
def frac2cart(cellParam, fracCoords):
cartCoords = []
for i in fracCoords:
xPos = i[1]*cellParam[0][0] + i[2]*cellParam[1][0] + i[3]*cellParam[2][0]
yPos = i[1]*cellParam[0][1] + i[2]*cellParam[1][1] + i[3]*cellParam[2][1]
zPos = i[1]*cellParam[0][2] + i[2]*cellParam[1][2] + i[3]*cellParam[2][2]
cartCoords.append([i[0], xPos, yPos, zPos])
return cartCoords
#####################################################
# a function that takes the cell parameters, in angstrom, and a list of Cartesian coordinates
# and returns the structure in fractional coordinates
#
# Uses Cramer's Rule to solve for the fractional coordinates
#
# Assumes the cellParam matrix is of the form:
# | ax ay az |
# | bx by bz |
# | cx cy cz |
#
# Need to use the transpose of this matrix in calculation, so call transpose function first
#
# Assumes cartCoords are of the form:
# | X x0 y0 z0 |
# | X x1 y1 z1 |
# | X x2 y2 z2 |
# | ............. |
# where X is the element symbol
def cart2frac(cellParam, cartCoords):
latCnt = transpose(cellParam)
fracCoords = []
detLatCnt = det3(latCnt)
for i in cartCoords:
aPos = (det3([[i[1], latCnt[0][1], latCnt[0][2]], [i[2], latCnt[1][1], latCnt[1][2]], [i[3], latCnt[2][1], latCnt[2][2]]])) / detLatCnt
bPos = (det3([[latCnt[0][0], i[1], latCnt[0][2]], [latCnt[1][0], i[2], latCnt[1][2]], [latCnt[2][0], i[3], latCnt[2][2]]])) / detLatCnt
cPos = (det3([[latCnt[0][0], latCnt[0][1], i[1]], [latCnt[1][0], latCnt[1][1], i[2]], [latCnt[2][0], latCnt[2][1], i[3]]])) / detLatCnt
fracCoords.append([i[0], aPos, bPos, cPos])
return fracCoords
#####################################################
# Take a list of strings representing the atomic positions in a system and set it up
# for use in further calculations. Returns a list of lists with each sublist formatted
# as: [AtomicSymbol(str), a/x(float), b/y(float), c/z(float)]
def formatCoords(rawCoords):
coords = []
for i in rawCoords:
temp = i.split()
temp[1] = float(temp[1])
temp[2] = float(temp[2])
temp[3] = float(temp[3])
coords.append(temp)
return coords
#####################################################
# Take a list of strings for the atomic positions
# [0] - atom type, keep as string
# [1-3] - fractional coordinates, cast to float
# [4] - freeze 0 = freeze, 1 = move, cast to int
def formatCoordsFrozen(rawCoords):
coords = []
for i in rawCoords:
temp = i.split()
temp[1] = float(temp[1])
temp[2] = float(temp[2])
temp[3] = float(temp[3])
temp[4] = int(temp[4])
coords.append(temp)
return coords
#####################################################
pseudodir = "/home/n/nmosey/nmosey/Espresso/PPs/Database"
bohr2angstrom = 1.8897626
# First read in from input file
reader = open(sys.argv[1], 'r')
sourceFile = reader.readlines()
reader.close()
# Find the positions for the pertinent sections of the file
counter = 0
atomNumPos = -1
coordPos = -1
latVectPos = -1
cutoffPos = -1
stepSizePos = -1
minAPos = -1
maxAPos = -1
minBPos = -1
maxBPos = -1
ecutwfcPos = -1
ecfixedPos = -1
ecutrhoPos = -1
qcutzPos = -1
q2sigmaPos = -1
londons6Pos = -1
londonrcutPos = -1
pseudoPos = -1
for line in sourceFile:
if "Number of Atoms" in line:
atomNumPos = counter
elif "Atomic Positions" in line:
coordPos = counter + 1
elif "Lattice Vectors" in line:
latVectPos = counter + 1
elif "Cutoff" in line:
cutoffPos = counter
elif "Step Size" in line:
stepSizePos = counter
elif "Max A" in line:
maxAPos = counter
elif "Max B" in line:
maxBPos = counter
elif "Min A" in line:
minAPos = counter
elif "Min B" in line:
minBPos = counter
elif "ecutwfc" in line:
ecutwfcPos = counter
elif "ecfixed" in line:
ecfixedPos = counter
elif "ecutrho" in line:
ecutrhoPos = counter
elif "qcutz" in line:
qcutzPos = counter
elif "q2sigma" in line:
q2sigmaPos = counter
elif "Pseudopotentials" in line:
pseudoPos = counter + 1
elif "london_s6" in line:
londons6Pos = counter
elif "london_rcut" in line:
londonrcutPos = counter
counter += 1
numAtoms = int(sourceFile[atomNumPos][sourceFile[atomNumPos].index(':')+1:].strip())
stepSize = Decimal(sourceFile[stepSizePos][sourceFile[stepSizePos].index(':')+1:].strip())
maxA = Decimal(sourceFile[maxAPos][sourceFile[maxAPos].index(':')+1:].strip())
maxB = Decimal(sourceFile[maxBPos][sourceFile[maxBPos].index(':')+1:].strip())
minA = Decimal(sourceFile[minAPos][sourceFile[minAPos].index(':')+1:].strip())
minB = Decimal(sourceFile[minBPos][sourceFile[minBPos].index(':')+1:].strip())
if cutoffPos == -1:
coords = formatCoordsFrozen(sourceFile[coordPos:coordPos+numAtoms])
else:
cutoff = float(sourceFile[cutoffPos][sourceFile[cutoffPos].index(':')+1:].strip())
coords = formatCoords(sourceFile[coordPos:coordPos+numAtoms])
for i in range(numAtoms):
if coords[i][3] >= cutoff:
coords[i].append(1)
else:
coords[i].append(0)
latVectsRaw = sourceFile[latVectPos:latVectPos+3] # Note that this is correct, list slice second number is exclusive
latVects = []
temp1 = latVectsRaw[0].split()
latVects.append([float(temp1[0]), float(temp1[1]), float(temp1[2])])
temp2 = latVectsRaw[1].split()
latVects.append([float(temp2[0]), float(temp2[1]), float(temp2[2])])
temp3 = latVectsRaw[2].split()
latVects.append([float(temp3[0]), float(temp3[1]), float(temp3[2])])
# Use a_x component as celldm(1)
celldmAng = latVects[0][0]
celldmBohr = celldmAng * bohr2angstrom
# determine number of atom types in system
atomTypes = []
for i in coords:
if i[0] not in atomTypes:
atomTypes.append(i[0])
# Create a string containing all the non-coordinate & lattice vectors part of the input file
fileStart = "&control\n calculation=\'scf\',\n restart_mode=\'from_scratch\',\n tstress=.false.,\n pseudo_dir=\'{0}\',\n wf_collect=.false.\n/\n\n".format(pseudodir)
fileStart += "&system\n ibrav=0,\n nosym=.true.,\n"
fileStart += " celldm(1)={0:.6f}, ! Lattice constant of {1:.6f} angstrom in bohr\n".format(celldmBohr, celldmAng)
fileStart += " nat={0},\n ntyp={1},\n {2},\n {3},\n {4},\n {5},\n".format( numAtoms, len(atomTypes), sourceFile[ecutwfcPos].strip(), sourceFile[ecfixedPos].strip(), sourceFile[qcutzPos].strip(), sourceFile[q2sigmaPos].strip() )
if ecutrhoPos >= 0:
fileStart += " " + sourceFile[ecutrhoPos].strip() + ",\n"
if londons6Pos >= 0:
fileStart += " london=.true.,\n " + sourceFile[londons6Pos].strip() + ",\n " + sourceFile[londonrcutPos].strip() + ",\n"
fileStart += "/\n\n&electrons\n electron_maxstep=50,\n/\n\n&ions\n/\n\n&cell\n/\n\n"
fileStart += "ATOMIC_SPECIES\n"
for i in range(len(atomTypes)):
fileStart += sourceFile[pseudoPos + i]
fileStart += "ATOMIC_POSITIONS crystal\n"
# Convert input fractional coordinates to Cartesian coordinates
cartCoords = frac2cart(latVects, coords)
lengthA = math.sqrt((latVects[0][0]**2) + (latVects[0][1]**2) + (latVects[0][2]**2))
lengthB = math.sqrt((latVects[1][0]**2) + (latVects[1][1]**2) + (latVects[1][2]**2))
# Normalize the a and b vectors for use in determining how far along x and y directions atoms are moving
# This will be used for tilting c vector by same amount atoms have moved
unitA = [(latVects[0][0]/lengthA), (latVects[0][1]/lengthA), (latVects[0][2]/lengthA)]
unitB = [(latVects[1][0]/lengthB), (latVects[1][1]/lengthB), (latVects[1][2]/lengthB)]
# Do the meat of the operation
xyzName = sys.argv[1] + ".xyz"
xyzWriter = open(xyzName, 'w')
dirsWriter = open("folders", 'w')
numStepsA = math.ceil((maxA-minA)/stepSize) + 1
numStepsB = math.ceil((maxB-minB)/stepSize) + 1
for a in range(int(numStepsA)):
stepA = minA + (a * stepSize)
aDir = "a_" + str(stepA)
for b in range(int(numStepsB)):
stepB = minB + (b * stepSize)
bDir = "b_" + str(stepB)
# Determine movement in x and y from stepA and stepB
stepX = (float(stepA)*lengthA*unitA[0]) + (float(stepB)*lengthB*unitB[0])
stepY = (float(stepA)*lengthA*unitA[1]) + (float(stepB)*lengthB*unitB[1])
print "stepA={0:f} stepB={1:f}\nstepX={2:f} stepY={3:f}\n".format(stepA, stepB, stepX, stepY)
# move atoms
cartSlide = []
for i in range(len(cartCoords)):
if coords[i][4]:
newX = cartCoords[i][1] + stepX
newY = cartCoords[i][2] + stepY
cartSlide.append([cartCoords[i][0], newX, newY, cartCoords[i][3]])
else:
cartSlide.append(cartCoords[i])
# Tilt cell in same direction and magnitude atoms moved
latVectsTilt = [latVects[0], latVects[1], [(latVects[2][0] + stepX), (latVects[2][1] + stepY), latVects[2][2]]]
# Convert slide coordinates to cartesian, using tilted lattice vectors
slide = cart2frac(latVectsTilt, cartSlide)
# Build CELL_PARAMETERS section using longest component from above
cellParam = []
for i in latVectsTilt:
temp0 = i[0] / celldmAng
temp1 = i[1] / celldmAng
temp2 = i[2] / celldmAng
cellParam.append([temp0, temp1, temp2])
# print output
# Write to the xyz movie file
xyzWriter.write( "{0}\ndelta a={1:f}, delta b={2:f}\n".format(numAtoms, stepA, stepB) )
for q in cartSlide:
xyzWriter.write( "{0[0]:<2} {0[1]:> 10.6f} {0[2]:> 10.6f} {0[3]:> 10.6f}\n".format(q) )
# Write the PWSCF input file
path = "{0}/{1}".format(aDir, bDir)
os.makedirs(path)
dirsWriter.write(path)
dirsWriter.write("\n")
pwscfFile = "{0}/{1}.in".format(path, sys.argv[1])
pwscfWriter = open(pwscfFile, 'w')
pwscfWriter.write(fileStart)
for i in slide:
pwscfWriter.write( "{0[0]:<2} {0[1]:> 10.6f} {0[2]:> 10.6f} {0[3]:> 10.6f}\n".format(i) )
pwscfWriter.write("CELL_PARAMETERS\n")
pwscfWriter.write(" {0[0][0]:> 10.6f} {0[0][1]:> 10.6f} {0[0][2]:>10.6f}\n {0[1][0]:> 10.6f} {0[1][1]:> 10.6f} {0[1][2]:> 10.6f}\n {0[2][0]:> 10.6f} {0[2][1]:> 10.6f} {0[2][2]:> 10.6f}\n".format(cellParam))
pwscfWriter.close()
xyzWriter.close()
dirsWriter.close()
#eof
| ghevcoul/MaterialsPES | pes_builder.py | Python | bsd-2-clause | 13,150 | [
"CRYSTAL",
"ESPResSo",
"Quantum ESPRESSO"
] | 36c072017fa6205d934af4e0e3bb232edd5db205c7b7308eeacdbeef5e7e849c |
'''
Hom family of models based on: [Drukker2013]_
Following: [Anselin2011]_
'''
__author__ = "Luc Anselin luc.anselin@asu.edu, Daniel Arribas-Bel darribas@asu.edu"
from scipy import sparse as SP
import numpy as np
from numpy import linalg as la
from . import ols as OLS
from pysal.lib.weights.spatial_lag import lag_spatial
from .utils import power_expansion, set_endog, iter_msg, sp_att
from .utils import get_A1_hom, get_A2_hom, get_A1_het, optim_moments
from .utils import get_spFilter, get_lags, _moments2eqs
from .utils import spdot, RegressionPropsY, set_warn
from . import twosls as TSLS
from . import user_output as USER
from . import summary_output as SUMMARY
__all__ = ["GM_Error_Hom", "GM_Endog_Error_Hom", "GM_Combo_Hom"]
class BaseGM_Error_Hom(RegressionPropsY):
'''
GMM method for a spatial error model with homoskedasticity (note: no
consistency checks, diagnostics or constant added); based on
Drukker et al. (2013) [Drukker2013]_, following Anselin (2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011) (default). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
xtx : float
X'X
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
Model commands
>>> reg = BaseGM_Error_Hom(y, X, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 47.9479 12.3021]
[ 0.7063 0.4967]
[ -0.556 0.179 ]
[ 0.4129 0.1835]]
>>> print np.around(reg.vm, 4) #doctest: +SKIP
[[ 1.51340700e+02 -5.29060000e+00 -1.85650000e+00 -2.40000000e-03]
[ -5.29060000e+00 2.46700000e-01 5.14000000e-02 3.00000000e-04]
[ -1.85650000e+00 5.14000000e-02 3.21000000e-02 -1.00000000e-04]
[ -2.40000000e-03 3.00000000e-04 -1.00000000e-04 3.37000000e-02]]
'''
def __init__(self, y, x, w,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
if A1 == 'hom':
wA1 = get_A1_hom(w)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w)
wA2 = get_A2_hom(w)
# 1a. OLS --> \tilde{\delta}
ols = OLS.BaseOLS(y=y, x=x)
self.x, self.y, self.n, self.k, self.xtx = ols.x, ols.y, ols.n, ols.k, ols.xtx
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w, wA1, wA2, ols.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. SWLS --> \hat{\delta}
x_s = get_spFilter(w, lambda_old, self.x)
y_s = get_spFilter(w, lambda_old, self.y)
ols_s = OLS.BaseOLS(y=y_s, x=x_s)
self.predy = spdot(self.x, ols_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w, wA1, wA2, self.u)
psi = get_vc_hom(w, wA1, wA2, self, lambda_old)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((ols_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom_ols(
w, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * w * self.u
self._cache = {}
class GM_Error_Hom(BaseGM_Error_Hom):
'''
GMM method for a spatial error model with homoskedasticity, with results
and diagnostics; based on Drukker et al. (2013) [Drukker2013]_, following Anselin
(2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : pysal W object
Spatial weights object
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xtx : float
X'X
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal.lib
Open data on Columbus neighborhood crime (49 areas) using pysal.lib.io.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.lib.io.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) and CRIME (crime) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Error_Hom(y, X, w=w, A1='hom_sc', name_y='home value', name_x=['income', 'crime'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``spreg.error_sp``, it allows for inference on the spatial
parameter. This is why you obtain as many coefficient estimates as
standard errors, which you calculate taking the square root of the
diagonal of the variance-covariance matrix of the parameters:
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 47.9479 12.3021]
[ 0.7063 0.4967]
[ -0.556 0.179 ]
[ 0.4129 0.1835]]
'''
def __init__(self, y, x, w,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Error_Hom.__init__(self, y=y, x=x_constant, w=w.sparse, A1=A1,
max_iter=max_iter, epsilon=epsilon)
self.title = "SPATIALLY WEIGHTED LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Error_Hom(reg=self, w=w, vm=vm)
class BaseGM_Endog_Error_Hom(RegressionPropsY):
'''
GMM method for a spatial error model with homoskedasticity and
endogenous variables (note: no consistency checks, diagnostics or constant
added); based on Drukker et al. (2013) [Drukker2013]_, following Anselin (2011)
[Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
hth : float
H'H
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> reg = BaseGM_Endog_Error_Hom(y, X, yd, q, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 55.3658 23.496 ]
[ 0.4643 0.7382]
[ -0.669 0.3943]
[ 0.4321 0.1927]]
'''
def __init__(self, y, x, yend, q, w,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
if A1 == 'hom':
wA1 = get_A1_hom(w)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w)
wA2 = get_A2_hom(w)
# 1a. S2SLS --> \tilde{\delta}
tsls = TSLS.BaseTSLS(y=y, x=x, yend=yend, q=q)
self.x, self.z, self.h, self.y, self.hth = tsls.x, tsls.z, tsls.h, tsls.y, tsls.hth
self.yend, self.q, self.n, self.k = tsls.yend, tsls.q, tsls.n, tsls.k
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w, wA1, wA2, tsls.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. GS2SLS --> \hat{\delta}
x_s = get_spFilter(w, lambda_old, self.x)
y_s = get_spFilter(w, lambda_old, self.y)
yend_s = get_spFilter(w, lambda_old, self.yend)
tsls_s = TSLS.BaseTSLS(y=y_s, x=x_s, yend=yend_s, h=self.h)
self.predy = spdot(self.z, tsls_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w, wA1, wA2, self.u)
psi = get_vc_hom(w, wA1, wA2, self, lambda_old, tsls_s.z)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((tsls_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom(
w, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * w * self.u
self._cache = {}
class GM_Endog_Error_Hom(BaseGM_Endog_Error_Hom):
'''
GMM method for a spatial error model with homoskedasticity and endogenous
variables, with results and diagnostics; based on Drukker et al. (2013)
[Drukker2013]_, following Anselin (2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
hth : float
H'H
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal.lib
Open data on Columbus neighborhood crime (49 areas) using pysal.lib.io.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.lib.io.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
In this case we consider CRIME (crime rates) is an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for CRIME. We use DISCBD (distance to the
CBD) for this and hence put it in the instruments parameter, 'q'.
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous), the
instruments and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Endog_Error_Hom(y, X, yd, q, w=w, A1='hom_sc', name_x=['inc'], name_y='hoval', name_yend=['crime'], name_q=['discbd'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``spreg.error_sp``, it allows for inference on the spatial
parameter. Hence, we find the same number of betas as of standard errors,
which we calculate taking the square root of the diagonal of the
variance-covariance matrix:
>>> print reg.name_z
['CONSTANT', 'inc', 'crime', 'lambda']
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 55.3658 23.496 ]
[ 0.4643 0.7382]
[ -0.669 0.3943]
[ 0.4321 0.1927]]
'''
def __init__(self, y, x, yend, q, w,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Endog_Error_Hom.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend, q=q,
A1=A1, max_iter=max_iter, epsilon=epsilon)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda') # listing lambda last
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Endog_Error_Hom(reg=self, w=w, vm=vm)
class BaseGM_Combo_Hom(BaseGM_Endog_Error_Hom):
'''
GMM method for a spatial lag and error model with homoskedasticity and
endogenous variables (note: no consistency checks, diagnostics or constant
added); based on Drukker et al. (2013) [Drukker2013]_, following Anselin (2011)
[Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
hth : float
H'H
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> w_lags = 1
>>> yd2, q2 = pysal.model.spreg.utils.set_endog(y, X, w, None, None, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
Example only with spatial lag
>>> reg = BaseGM_Combo_Hom(y, X, yend=yd2, q=q2, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 10.1254 15.2871]
[ 1.5683 0.4407]
[ 0.1513 0.4048]
[ 0.2103 0.4226]]
Example with both spatial lag and other endogenous variables
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> yd2, q2 = pysal.model.spreg.utils.set_endog(y, X, w, yd, q, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
>>> reg = BaseGM_Combo_Hom(y, X, yd2, q2, w=w.sparse, A1='hom_sc')
>>> betas = np.array([['CONSTANT'],['inc'],['crime'],['W_hoval'],['lambda']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas, np.sqrt(reg.vm.diagonal()).reshape(5,1))),5)))
[['CONSTANT' '111.7705' '67.75191']
['inc' '-0.30974' '1.16656']
['crime' '-1.36043' '0.6841']
['W_hoval' '-0.52908' '0.84428']
['lambda' '0.60116' '0.18605']]
'''
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
BaseGM_Endog_Error_Hom.__init__(
self, y=y, x=x, w=w, yend=yend, q=q, A1=A1,
max_iter=max_iter, epsilon=epsilon)
class GM_Combo_Hom(BaseGM_Combo_Hom):
'''
GMM method for a spatial lag and error model with homoskedasticity and
endogenous variables, with results and diagnostics; based on Drukker et
al. (2013) [Drukker2013]_, following Anselin (2011) [Anselin2011]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always necessary)
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
sig2 : float
Sigma squared used in computations (based on filtered
residuals)
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
hth : float
H'H
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal.lib
Open data on Columbus neighborhood crime (49 areas) using pysal.lib.io.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.lib.io.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
Example only with spatial lag
The Combo class runs an SARAR model, that is a spatial lag+error model.
In this case we will run a simple version of that, where we have the
spatial effects as well as exogenous variables. Since it is a spatial
model, we have to pass in the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Combo_Hom(y, X, w=w, A1='hom_sc', name_x=['inc'],\
name_y='hoval', name_yend=['crime'], name_q=['discbd'],\
name_ds='columbus')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 10.1254 15.2871]
[ 1.5683 0.4407]
[ 0.1513 0.4048]
[ 0.2103 0.4226]]
This class also allows the user to run a spatial lag+error model with the
extra feature of including non-spatial endogenous regressors. This means
that, in addition to the spatial lag and error, we consider some of the
variables on the right-hand side of the equation as endogenous and we
instrument for this. As an example, we will include CRIME (crime rates) as
endogenous and will instrument with DISCBD (distance to the CSB). We first
need to read in the variables:
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
And then we can run and explore the model analogously to the previous combo:
>>> reg = GM_Combo_Hom(y, X, yd, q, w=w, A1='hom_sc', \
name_ds='columbus')
>>> betas = np.array([['CONSTANT'],['inc'],['crime'],['W_hoval'],['lambda']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas, np.sqrt(reg.vm.diagonal()).reshape(5,1))),5)))
[['CONSTANT' '111.7705' '67.75191']
['inc' '-0.30974' '1.16656']
['crime' '-1.36043' '0.6841']
['W_hoval' '-0.52908' '0.84428']
['lambda' '0.60116' '0.18605']]
'''
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
yend2, q2 = set_endog(y, x, w, yend, q, w_lags, lag_q)
x_constant = USER.check_constant(x)
BaseGM_Combo_Hom.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend2, q=q2,
w_lags=w_lags, A1=A1, lag_q=lag_q,
max_iter=max_iter, epsilon=epsilon)
self.rho = self.betas[-2]
self.predy_e, self.e_pred, warn = sp_att(w, self.y, self.predy,
yend2[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_yend.append(USER.set_name_yend_sp(self.name_y))
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda') # listing lambda last
self.name_q = USER.set_name_q(name_q, q)
self.name_q.extend(
USER.set_name_q_sp(self.name_x, w_lags, self.name_q, lag_q))
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Combo_Hom(reg=self, w=w, vm=vm)
# Functions
def moments_hom(w, wA1, wA2, u):
'''
Compute G and g matrices for the spatial error model with homoscedasticity
as in Anselin [Anselin2011]_ (2011).
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
u : array
Residuals. nx1 array assumed to be aligned with w
Attributes
----------
moments : list
List of two arrays corresponding to the matrices 'G' and
'g', respectively.
'''
n = w.shape[0]
A1u = wA1 * u
A2u = wA2 * u
wu = w * u
g1 = np.dot(u.T, A1u)
g2 = np.dot(u.T, A2u)
g = np.array([[g1][0][0], [g2][0][0]]) / n
G11 = 2 * np.dot(wu.T * wA1, u)
G12 = -np.dot(wu.T * wA1, wu)
G21 = 2 * np.dot(wu.T * wA2, u)
G22 = -np.dot(wu.T * wA2, wu)
G = np.array([[G11[0][0], G12[0][0]], [G21[0][0], G22[0][0]]]) / n
return [G, g]
def get_vc_hom(w, wA1, wA2, reg, lambdapar, z_s=None, for_omegaOLS=False):
'''
VC matrix \psi of Spatial error with homoscedasticity. As in
Anselin (2011) [Anselin2011]_ (p. 20)
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lambdapar : float
Spatial parameter estimated in previous step of the
procedure
z_s : array
optional argument for spatially filtered Z (to be
passed only if endogenous variables are present)
for_omegaOLS : boolean
If True (default=False), it also returns P, needed
only in the computation of Omega
Returns
-------
psi : array
2x2 VC matrix
a1 : array
nx1 vector a1. If z_s=None, a1 = 0.
a2 : array
nx1 vector a2. If z_s=None, a2 = 0.
p : array
P matrix. If z_s=None or for_omegaOLS=False, p=0.
'''
u_s = get_spFilter(w, lambdapar, reg.u)
n = float(w.shape[0])
sig2 = np.dot(u_s.T, u_s) / n
mu3 = np.sum(u_s ** 3) / n
mu4 = np.sum(u_s ** 4) / n
tr11 = wA1 * wA1
tr11 = np.sum(tr11.diagonal())
tr12 = wA1 * (wA2 * 2)
tr12 = np.sum(tr12.diagonal())
tr22 = wA2 * wA2 * 2
tr22 = np.sum(tr22.diagonal())
vecd1 = np.array([wA1.diagonal()]).T
psi11 = 2 * sig2 ** 2 * tr11 + \
(mu4 - 3 * sig2 ** 2) * np.dot(vecd1.T, vecd1)
psi12 = sig2 ** 2 * tr12
psi22 = sig2 ** 2 * tr22
a1, a2, p = 0., 0., 0.
if for_omegaOLS:
x_s = get_spFilter(w, lambdapar, reg.x)
p = la.inv(spdot(x_s.T, x_s) / n)
if issubclass(type(z_s), np.ndarray) or \
issubclass(type(z_s), SP.csr.csr_matrix) or \
issubclass(type(z_s), SP.csc.csc_matrix):
alpha1 = (-2 / n) * spdot(z_s.T, wA1 * u_s)
alpha2 = (-2 / n) * spdot(z_s.T, wA2 * u_s)
hth = spdot(reg.h.T, reg.h)
hthni = la.inv(hth / n)
htzsn = spdot(reg.h.T, z_s) / n
p = spdot(hthni, htzsn)
p = spdot(p, la.inv(spdot(htzsn.T, p)))
hp = spdot(reg.h, p)
a1 = spdot(hp, alpha1)
a2 = spdot(hp, alpha2)
psi11 = psi11 + \
sig2 * spdot(a1.T, a1) + \
2 * mu3 * spdot(a1.T, vecd1)
psi12 = psi12 + \
sig2 * spdot(a1.T, a2) + \
mu3 * spdot(a2.T, vecd1) # 3rd term=0
psi22 = psi22 + \
sig2 * spdot(a2.T, a2) # 3rd&4th terms=0 bc vecd2=0
psi = np.array(
[[psi11[0][0], psi12[0][0]], [psi12[0][0], psi22[0][0]]]) / n
return psi, a1, a2, p
def get_omega_hom(w, wA1, wA2, reg, lamb, G):
'''
Omega VC matrix for Hom models with endogenous variables computed as in
Anselin (2011) [Anselin2011]_ (p. 21).
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lamb : float
Spatial parameter estimated in previous step of the
procedure
G : array
Matrix 'G' of the moment equation
Returns
-------
omega : array
Omega matrix of VC of the model
'''
n = float(w.shape[0])
z_s = get_spFilter(w, lamb, reg.z)
u_s = get_spFilter(w, lamb, reg.u)
sig2 = np.dot(u_s.T, u_s) / n
mu3 = np.sum(u_s ** 3) / n
vecdA1 = np.array([wA1.diagonal()]).T
psi, a1, a2, p = get_vc_hom(w, wA1, wA2, reg, lamb, z_s)
j = np.dot(G, np.array([[1.], [2 * lamb]]))
psii = la.inv(psi)
t2 = spdot(reg.h.T, np.hstack((a1, a2)))
psiDL = (mu3 * spdot(reg.h.T, np.hstack((vecdA1, np.zeros((int(n), 1))))) +
sig2 * spdot(reg.h.T, np.hstack((a1, a2)))) / n
oDD = spdot(la.inv(spdot(reg.h.T, reg.h)), spdot(reg.h.T, z_s))
oDD = sig2 * la.inv(spdot(z_s.T, spdot(reg.h, oDD)))
oLL = la.inv(spdot(j.T, spdot(psii, j))) / n
oDL = spdot(spdot(spdot(p.T, psiDL), spdot(psii, j)), oLL)
o_upper = np.hstack((oDD, oDL))
o_lower = np.hstack((oDL.T, oLL))
return np.vstack((o_upper, o_lower)), float(sig2)
def get_omega_hom_ols(w, wA1, wA2, reg, lamb, G):
'''
Omega VC matrix for Hom models without endogenous variables (OLS) computed
as in Anselin (2011) [Anselin2011]_.
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lamb : float
Spatial parameter estimated in previous step of the
procedure
G : array
Matrix 'G' of the moment equation
Returns
-------
omega : array
Omega matrix of VC of the model
'''
n = float(w.shape[0])
x_s = get_spFilter(w, lamb, reg.x)
u_s = get_spFilter(w, lamb, reg.u)
sig2 = np.dot(u_s.T, u_s) / n
vecdA1 = np.array([wA1.diagonal()]).T
psi, a1, a2, p = get_vc_hom(w, wA1, wA2, reg, lamb, for_omegaOLS=True)
j = np.dot(G, np.array([[1.], [2 * lamb]]))
psii = la.inv(psi)
oDD = sig2 * la.inv(spdot(x_s.T, x_s))
oLL = la.inv(spdot(j.T, spdot(psii, j))) / n
#oDL = np.zeros((oDD.shape[0], oLL.shape[1]))
mu3 = np.sum(u_s ** 3) / n
psiDL = (mu3 * spdot(reg.x.T, np.hstack((vecdA1, np.zeros((int(n), 1)))))) / n
oDL = spdot(spdot(spdot(p.T, psiDL), spdot(psii, j)), oLL)
o_upper = np.hstack((oDD, oDL))
o_lower = np.hstack((oDL.T, oLL))
return np.vstack((o_upper, o_lower)), float(sig2)
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
| lixun910/pysal | pysal/model/spreg/error_sp_hom.py | Python | bsd-3-clause | 58,029 | [
"COLUMBUS"
] | 959063a34a0a768178c39600fc3b3c4736016bbca279c61f9a27a93431b406c0 |
# Time-stamp: <2015-03-05 13:48:25 Tao Liu>
"""Description: Filter duplicate reads depending on sequencing depth.
Copyright (c) 2011 Tao Liu <taoliu@jimmy.harvard.edu>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included
with the distribution).
@status: release candidate
@version: $Id$
@author: Yong Zhang, Tao Liu
@contact: taoliu@jimmy.harvard.edu
"""
# ------------------------------------
# python modules
# ------------------------------------
import os
import sys
import logging
from collections import Counter
# ------------------------------------
# own python modules
# ------------------------------------
from MACS2.OptValidator import opt_validate_refinepeak as opt_validate
from MACS2.Prob import binomial_cdf_inv
from MACS2.IO.BedGraphIO import bedGraphIO,genericBedIO
from MACS2.IO.PeakIO import PeakIO
from MACS2.Constants import *
# ------------------------------------
# Main function
# ------------------------------------
def run( o_options ):
"""The Main function/pipeline for duplication filter.
"""
# Parse options...
options = opt_validate( o_options )
# end of parsing commandline options
info = options.info
warn = options.warn
debug = options.debug
error = options.error
if options.ofile:
outputfile = open( os.path.join( options.outdir, options.ofile ), 'w' )
options.oprefix = options.ofile
else:
outputfile = open( os.path.join( options.outdir, "%s_refinepeak.bed" % options.oprefix), "w" )
peakio = file(options.bedfile)
peaks = PeakIO()
for l in peakio:
fs = l.rstrip().split()
peaks.add( fs[0], int(fs[1]), int(fs[2]), name=fs[3] )
peaks.sort()
#1 Read tag files
info("read tag files...")
fwtrack = load_tag_files_options (options)
retval = fwtrack.compute_region_tags_from_peaks( peaks, find_summit, window_size = options.windowsize, cutoff = options.cutoff )
outputfile.write( "\n".join( map(lambda x: "%s\t%d\t%d\t%s\t%.2f" % x , retval) ) )
info("Done!")
info("Check output file: %s" % options.oprefix+"_refinepeak.bed")
def find_summit(chrom, plus, minus, peak_start, peak_end, name = "peak", window_size=100, cutoff = 5):
left_sum = lambda strand, pos, width = window_size: sum([strand[x] for x in strand if x <= pos and x >= pos - width])
right_sum = lambda strand, pos, width = window_size: sum([strand[x] for x in strand if x >= pos and x <= pos + width])
left_forward = lambda strand, pos: strand.get(pos,0) - strand.get(pos-window_size, 0)
right_forward = lambda strand, pos: strand.get(pos + window_size, 0) - strand.get(pos, 0)
watson, crick = (Counter(plus), Counter(minus))
watson_left = left_sum(watson, peak_start)
crick_left = left_sum(crick, peak_start)
watson_right = right_sum(watson, peak_start)
crick_right = right_sum(crick, peak_start)
wtd_list = []
for j in range(peak_start, peak_end+1):
wtd_list.append(2 * (watson_left * crick_right)**0.5 - watson_right - crick_left)
watson_left += left_forward(watson, j)
watson_right += right_forward(watson, j)
crick_left += left_forward(crick, j)
crick_right += right_forward(crick,j)
wtd_max_val = max(wtd_list)
wtd_max_pos = wtd_list.index(wtd_max_val) + peak_start
#return (chrom, wtd_max_pos, wtd_max_pos+1, wtd_max_val)
if wtd_max_val > cutoff:
return (chrom, wtd_max_pos, wtd_max_pos+1, name+"_R" , wtd_max_val) # 'R'efined
else:
return (chrom, wtd_max_pos, wtd_max_pos+1, name+"_F" , wtd_max_val) # 'F'ailed
#return "{}\t{}\t{}\tRefinePeak_summit\t{:.2f}\n".format(chrom,
# wtd_max_pos,
# wtd_max_pos+1,
# wtd_max_val,)
# def find_summit(bed_file, sam_file, window_size, output_file):
# def count_by_strand(ialign):
# pred = lambda x:x.is_reverse
# watson_5_end = lambda x:x.pos
# crick_5_end = lambda x:x.aend
# ialign1, ialign2 = tee(ialign)
# return (Counter(map(watson_5_end,
# ifilterfalse(pred, ialign1))),
# Counter(map(crick_5_end,
# ifilter(pred, ialign2))))
# left_sum = lambda strand, pos, width = window_size: sum([strand[x] for x in strand if x <= pos and x >= pos - width])
# right_sum = lambda strand, pos, width = window_size: sum([strand[x] for x in strand if x >= pos and x <= pos + width])
# left_forward = lambda strand, pos: strand.get(pos,0) - strand.get(pos-window_size, 0)
# right_forward = lambda strand, pos: strand.get(pos + window_size, 0) - strand.get(pos, 0)
# samfile = pysam.Samfile(sam_file, "rb" )
# cnt = 0
# with open(bed_file) as bfile, open(output_file,"w") as ofile:
# for i in bfile:
# i = i.split("\t")
# chrom = i[0]
# peak_start = int(i[1])
# peak_end = int(i[2])
# watson, crick = count_by_strand(samfile.fetch(chrom, peak_start-window_size, peak_end+window_size))
# watson_left = left_sum(watson, peak_start)
# crick_left = left_sum(crick, peak_start)
# watson_right = right_sum(watson, peak_start)
# crick_right = right_sum(crick, peak_start)
# wtd_list = []
# for j in range(peak_start, peak_end+1):
# wtd_list.append(2 * sqrt(watson_left * crick_right) - watson_right - crick_left)
# watson_left += left_forward(watson, j)
# watson_right += right_forward(watson, j)
# crick_left += left_forward(crick, j)
# crick_right += right_forward(crick,j)
# wtd_max_val = max(wtd_list)
# wtd_max_pos = wtd_list.index(wtd_max_val) + peak_start
# cnt += 1
# ofile.write("{}\t{}\t{}\tSPP_summit_{}\t{:.2f}\n".format(chrom,
# wtd_max_pos,
# wtd_max_pos+1,
# cnt,
# wtd_max_val,))
# samfile.close()
def load_tag_files_options ( options ):
"""From the options, load alignment tags.
"""
options.info("read alignment tags...")
tp = options.parser(options.ifile)
ttsize = tp.tsize()
options.tsize = ttsize
treat = tp.build_fwtrack()
treat.sort()
options.info("tag size is determined as %d bps" % options.tsize)
return treat
| ENCODE-DCC/chip-seq-pipeline | dnanexus/shell/resources/usr/local/lib/python2.7/dist-packages/MACS2/refinepeak_cmd.py | Python | mit | 6,879 | [
"pysam"
] | 326dc75e53c44868d8e2e7f6611da23eb3d9b2c4499a63d54c8b27083b55cf59 |
# Copyright (c) 2012, CyberPoint International, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CyberPoint International, LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CYBERPOINT INTERNATIONAL, LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
A module for creating and managing node data. Node data in this library can have many types, dependent on whether the conditional probability distributions are discrete, linear Gaussian, or hybrid, and on whether the Bayesian network is static or dynamic. For example input files, see :doc:`unittestdict`, :doc:`unittesthdict`, :doc:`unittestlgdict`, and :doc:`unittestdyndict`.
'''
import re
import json
from . import graphskeleton
class NodeData:
'''This class represents the node data for each node in a graph.
If the Bayesian network is static, it contains the attribute
*Vdata*.
If the Bayesian network is dynamic, it contains two attributes,
*initial_Vdata* and *twotbn_Vdata*.
If the Bayesian network has hybrid CPDs, it contains the
additional attribute *nodes*.
'''
@classmethod
def load(k, path):
'''
Load node data from an input file located at *path*. Input file must be a plaintext .txt file with a JSON-style representation of a dict. The dict must have the top-level key ``Vdata`` or two top-level keys, ``initial_Vdata`` and ``twotbn_Vdata``. For example::
{
"Vdata": {
"<vertex 1>": <dict containing vertex 1 data>,
...
"<vertex n>": <dict containing vertex n data>
}
}
or::
{
"initial_Vdata": {
"<vertex 1>": <dict containing vertex 1 data>,
...
"<vertex n>": <dict containing vertex n data>
}
"twotbn_Vdata": {
"<vertex 1>": <dict containing vertex 1 data>,
...
"<vertex n>": <dict containing vertex n data>
}
}
The function takes the following arguments:
1. *path* -- The path to the text file that contains input data (e.g., "mydictionary.txt")
In the static case, it modifies *Vdata* to hold the dictionary found at path. In the dynamic case, it modifies the *initial_Vdata* and *twotbn_Vdata* attributes to hold the dictionaries found at path.
'''
alldata = json.load(open(path))
# try to load both for normal and dynamic cases
self = k()
try:
self.Vdata = self.parse_cprob(alldata["Vdata"])
except KeyError:
self.initial_Vdata = self.parse_cprob(
alldata["initial_Vdata"])
self.twotbn_Vdata = self.parse_cprob(
alldata["twotbn_Vdata"])
return self
def parse_cprob(self, vdata):
"""Parse a cprob dictionary.
Convert a cprob dictionary provided in json into a dictionary
indexed by parent-value tuples.
"""
inlist = re.compile(r"^\w*\['(.*)'\]\w*$")
comma = re.compile(r"', *'")
for node, props in vdata.items():
new_cprob = {}
if "cprob" in props:
# It is a discrete node
cprob = props["cprob"]
try:
for key, ps in cprob.items():
content = inlist.match(key).group(1)
key = tuple(comma.split(content))
new_cprob[key] = ps
props["cprob"] = new_cprob
except AttributeError:
pass
return vdata
@property
def V(self):
try:
return self._graphskeleton.V
except AttributeError:
try:
V = list(self.Vdata)
except (TypeError, AttributeError):
V = list(self.initialVdata)
self._graphskeleton = graphskeleton.GraphSkeleton(V, self.E)
self._graphskeleton.toporder()
return self._graphskeleton.V
@property
def E(self):
try:
return self._E
except AttributeError:
self._E = []
for (node, properties) in self.Vdata.items():
parents = properties["parents"]
for parent in parents:
self._E.append((parent, node))
return self._E
class StaticNodeData(NodeData):
def __init__(self, Vdata={}):
self.Vdata = Vdata
'''A dictionary of node data.'''
class DynamicNodeData(NodeData):
def __init__(self, initial_Vdata, dwotbn_Vdata):
self.initial_Vdata = {}
'''In dynamic graphs, a dictionary containing node data for the initial time interval.'''
self.twotbn_Vdata = {}
'''In dynamic graphs, a dictionary containing node data for every time step interval after the first one.'''
class HybridNodeData(NodeData):
def __init__(self, nodes={}):
self.nodes = nodes
'''In hybrid graphs, a dictionary of {key:value} pairs linking the name of each node (the key) to a clas instance (the value) which represents the node, its data, and its sampling function.'''
def entriestoinstances(self):
'''
For each node, convert dictionary entry to class instance.
This method is used only when dealing with Hybrid Bayesian networks as found in the :doc:`hybayesiannetwork` module.
The type of the node must be located in the 'type' attribute of the node's dictionary entry. To see an example of such a dictionary, see :doc:`unittesthdict`. This type is used to instantiate a corresponding class from libpgm/CPDtypes/, and store the node's dictionary info in that class. Thus we lose none of the dictionary data, yet we gain the ability to use the instantiated class's built-in function to choose its own outcome based on the outcomes of its parents.
In order for this method to be called, the self.Vdata attribute must have dictionary entries of the following form::
<vertex name>: {
'type': <type of node -- must correspond to module in /CPDtypes>,
'parents': <array of parents of node>,
'children': <array of children of node>,
<whatever further entries are required by the type*>
}
For instance, type "discrete" requires a "cprob" entry, while type "lg"
requires "mean_base", "mean_scal", and "variance" entries.
The function draws on the data in the *Vdata* attribute, and instantiates the attribute *nodes*, which is a dictionary of {name: instance} pairs where 'name' is the name of the node and 'instance' is a class instance containing the node data and the proper sampling function.
'''
# declare result dict
rarray = dict()
# transform into class instances
for entry in self.Vdata.keys():
# import module containing class
path = str(self.Vdata[entry]["type"])
exec("from libpgm.CPDtypes import " + path)
# instantiate class
exec("tmpnode = " + path + "." + str.capitalize(path) + "(self.Vdata[entry])")
# append to array
exec("rarray['" + str(entry) + "'] = tmpnode")
self.nodes = rarray
| Anaphory/libpgm | libpgm/nodedata.py | Python | bsd-3-clause | 8,840 | [
"Gaussian"
] | bc05442e78cabdb40ea87cac6fb6d858aa574ead78f7634e4f4df406f926df49 |
""" UserProfileDB class is a front-end to the User Profile Database
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import types
import os
import sys
import hashlib
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.Core.Utilities import Time
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Core.Base.DB import DB
class UserProfileDB( DB ):
""" UserProfileDB class is a front-end to the User Profile Database
"""
tableDict = { 'up_Users' : { 'Fields' : { 'Id' : 'INTEGER AUTO_INCREMENT NOT NULL',
'UserName' : 'VARCHAR(32) NOT NULL',
'LastAccess' : 'DATETIME',
},
'PrimaryKey' : 'Id',
'UniqueIndexes' : { 'U' : [ 'UserName' ] },
'Engine': 'InnoDB',
},
'up_Groups': { 'Fields' : { 'Id' : 'INTEGER AUTO_INCREMENT NOT NULL',
'UserGroup' : 'VARCHAR(32) NOT NULL',
'LastAccess' : 'DATETIME',
},
'PrimaryKey' : 'Id',
'UniqueIndexes' : { 'G' : [ 'UserGroup' ] },
'Engine': 'InnoDB',
},
'up_VOs': { 'Fields' : { 'Id' : 'INTEGER AUTO_INCREMENT NOT NULL',
'VO' : 'VARCHAR(32) NOT NULL',
'LastAccess' : 'DATETIME',
},
'PrimaryKey' : 'Id',
'UniqueIndexes' : { 'VO' : [ 'VO' ] },
'Engine': 'InnoDB',
},
'up_ProfilesData': { 'Fields' : { 'UserId' : 'INTEGER',
'GroupId' : 'INTEGER',
'VOId' : 'INTEGER',
'Profile' : 'VARCHAR(255) NOT NULL',
'VarName' : 'VARCHAR(255) NOT NULL',
'Data' : 'BLOB',
'ReadAccess' : 'VARCHAR(10) DEFAULT "USER"',
'PublishAccess' : 'VARCHAR(10) DEFAULT "USER"',
},
'PrimaryKey' : [ 'UserId', 'GroupId', 'Profile', 'VarName' ],
'Indexes' : { 'ProfileKey' : [ 'UserId', 'GroupId', 'Profile' ],
'UserKey' : [ 'UserId' ] ,
},
'Engine': 'InnoDB',
},
'up_HashTags': { 'Fields' : { 'UserId' : 'INTEGER',
'GroupId' : 'INTEGER',
'VOId' : 'INTEGER',
'HashTag' : 'VARCHAR(32) NOT NULL',
'TagName' : 'VARCHAR(255) NOT NULL',
'LastAccess' : 'DATETIME',
},
'PrimaryKey' : [ 'UserId', 'GroupId', 'TagName' ],
'Indexes' : { 'HashKey' : [ 'UserId', 'HashTag' ] },
'Engine': 'InnoDB',
},
}
def __init__( self ):
""" Constructor
"""
self.__permValues = [ 'USER', 'GROUP', 'VO', 'ALL' ]
self.__permAttrs = [ 'ReadAccess', 'PublishAccess' ]
DB.__init__(self, 'UserProfileDB', 'Framework/UserProfileDB')
retVal = self.__initializeDB()
if not retVal[ 'OK' ]:
raise Exception( "Can't create tables: %s" % retVal[ 'Message' ] )
def _checkTable( self ):
""" Make sure the tables are created
"""
return self.__initializeDB()
def __initializeDB( self ):
"""
Create the tables
"""
retVal = self._query( "show tables" )
if not retVal[ 'OK' ]:
return retVal
tablesInDB = [ t[0] for t in retVal[ 'Value' ] ]
tablesD = {}
if 'up_Users' not in tablesInDB:
tablesD[ 'up_Users' ] = self.tableDict['up_Users']
if 'up_Groups' not in tablesInDB:
tablesD[ 'up_Groups' ] = self.tableDict[ 'up_Groups']
if 'up_VOs' not in tablesInDB:
tablesD[ 'up_VOs' ] = self.tableDict['up_VOs']
if 'up_ProfilesData' not in tablesInDB:
tablesD[ 'up_ProfilesData' ] = self.tableDict['up_ProfilesData']
if 'up_HashTags' not in tablesInDB:
tablesD[ 'up_HashTags' ] = self.tableDict['up_HashTags']
return self._createTables( tablesD )
def __getUserId( self, userName, insertIfMissing = True ):
return self.__getObjId( userName, 'UserName', 'up_Users', insertIfMissing )
def __getGroupId( self, groupName, insertIfMissing = True ):
return self.__getObjId( groupName, 'UserGroup', 'up_Groups', insertIfMissing )
def __getVOId( self, voName, insertIfMissing = True ):
return self.__getObjId( voName, 'VO', 'up_VOs', insertIfMissing )
def __getObjId( self, objValue, varName, tableName, insertIfMissing = True ):
result = self.getFields( tableName, ['Id'], { varName: objValue } )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 0:
objId = data[0][0]
self.updateFields( tableName, ['LastAccess'], ['UTC_TIMESTAMP()'], { 'Id': objId } )
return S_OK( objId )
if not insertIfMissing:
return S_ERROR( "No entry %s for %s defined in the DB" % ( objValue, varName ) )
result = self.insertFields( tableName, [ varName, 'LastAccess' ], [ objValue, 'UTC_TIMESTAMP()' ] )
if not result[ 'OK' ]:
return result
return S_OK( result[ 'lastRowId' ] )
def getUserGroupIds( self, userName, userGroup, insertIfMissing = True ):
result = self.__getUserId( userName, insertIfMissing )
if not result[ 'OK' ]:
return result
userId = result[ 'Value' ]
result = self.__getGroupId( userGroup, insertIfMissing )
if not result[ 'OK' ]:
return result
groupId = result[ 'Value' ]
userVO = Registry.getVOForGroup( userGroup )
if not userVO:
userVO = "undefined"
result = self.__getVOId( userVO, insertIfMissing )
if not result[ 'OK' ]:
return result
voId = result[ 'Value' ]
return S_OK( ( userId, groupId, voId ) )
def deleteUserProfile( self, userName, userGroup = False ):
"""
Delete the profiles for a user
"""
result = self.__getUserId( userName )
if not result[ 'OK' ]:
return result
userId = result[ 'Value' ]
condDict = { 'UserId': userId }
if userGroup:
result = self.__getGroupId( userGroup )
if not result[ 'OK' ]:
return result
groupId = result[ 'Value' ]
condDict['GroupId'] = groupId
result = self.deleteEntries( 'up_ProfilesData', condDict )
if not result[ 'OK' ] or not userGroup:
return result
return self.deleteEntries( 'up_Users', { 'Id': userId } )
def __webProfileUserDataCond( self, userIds, sqlProfileName = False, sqlVarName = False ):
condSQL = [ '`up_ProfilesData`.UserId=%s' % userIds[0],
'`up_ProfilesData`.GroupId=%s' % userIds[1],
'`up_ProfilesData`.VOId=%s' % userIds[2] ]
if sqlProfileName:
condSQL.append( '`up_ProfilesData`.Profile=%s' % sqlProfileName )
if sqlVarName:
condSQL.append( '`up_ProfilesData`.VarName=%s' % sqlVarName )
return " AND ".join( condSQL )
def __webProfileReadAccessDataCond( self, userIds, ownerIds, sqlProfileName, sqlVarName = False, match = False ):
permCondSQL = []
sqlCond = []
if match:
sqlCond.append( '`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' % ( ownerIds[0], ownerIds[1] ) )
else:
permCondSQL.append( '`up_ProfilesData`.UserId = %s AND `up_ProfilesData`.GroupId = %s' % ( ownerIds[0], ownerIds[1] ) )
permCondSQL.append( '`up_ProfilesData`.GroupId=%s AND `up_ProfilesData`.ReadAccess="GROUP"' % userIds[1] )
permCondSQL.append( '`up_ProfilesData`.VOId=%s AND `up_ProfilesData`.ReadAccess="VO"' % userIds[2] )
permCondSQL.append( '`up_ProfilesData`.ReadAccess="ALL"' )
sqlCond.append( '`up_ProfilesData`.Profile = %s' % sqlProfileName )
if sqlVarName:
sqlCond.append( "`up_ProfilesData`.VarName = %s" % ( sqlVarName ) )
#Perms
sqlCond.append( "( ( %s ) )" % " ) OR ( ".join( permCondSQL ) )
return " AND ".join( sqlCond )
def __parsePerms( self, perms, addMissing = True ):
normPerms = {}
for pName in self.__permAttrs:
if not perms or pName not in perms:
if addMissing:
normPerms[ pName ] = self.__permValues[0]
continue
else:
permVal = perms[ pName ].upper()
for nV in self.__permValues:
if nV == permVal:
normPerms[ pName ] = nV
break
if pName not in normPerms and addMissing:
normPerms[ pName ] = self.__permValues[0]
return normPerms
def retrieveVarById( self, userIds, ownerIds, profileName, varName ):
"""
Get a data entry for a profile
"""
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
sqlCond = self.__webProfileReadAccessDataCond( userIds, ownerIds, sqlProfileName, sqlVarName, True )
#when we retrieve the user profile we have to take into account the user.
selectSQL = "SELECT data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query( selectSQL )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 0:
return S_OK( data[0][0] )
return S_ERROR( "No data for userIds %s profileName %s varName %s" % ( userIds, profileName, varName ) )
def retrieveAllUserVarsById( self, userIds, profileName ):
"""
Get a data entry for a profile
"""
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
sqlCond = self.__webProfileUserDataCond( userIds, sqlProfileName )
selectSQL = "SELECT varName, data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query( selectSQL )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
return S_OK( dict( data ) )
def retrieveUserProfilesById( self, userIds ):
"""
Get all profiles and data for a user
"""
sqlCond = self.__webProfileUserDataCond( userIds )
selectSQL = "SELECT Profile, varName, data FROM `up_ProfilesData` WHERE %s" % sqlCond
result = self._query( selectSQL )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
dataDict = {}
for row in data:
if row[0] not in dataDict:
dataDict[ row[0] ] = {}
dataDict[ row[0] ][ row[1] ] = row[2 ]
return S_OK( dataDict )
def retrieveVarPermsById( self, userIds, ownerIds, profileName, varName ):
"""
Get a data entry for a profile
"""
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
sqlCond = self.__webProfileReadAccessDataCond( userIds, ownerIds, sqlProfileName, sqlVarName )
selectSQL = "SELECT %s FROM `up_ProfilesData` WHERE %s" % ( ", ".join( self.__permAttrs ), sqlCond )
result = self._query( selectSQL )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 0:
permDict = {}
for i in range( len( self.__permAttrs ) ):
permDict[ self.__permAttrs[ i ] ] = data[0][i]
return S_OK( permDict )
return S_ERROR( "No data for userIds %s profileName %s varName %s" % ( userIds, profileName, varName ) )
def deleteVarByUserId( self, userIds, profileName, varName ):
"""
Remove a data entry for a profile
"""
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
sqlCond = self.__webProfileUserDataCond( userIds, sqlProfileName, sqlVarName )
selectSQL = "DELETE FROM `up_ProfilesData` WHERE %s" % sqlCond
return self._update( selectSQL )
def storeVarByUserId( self, userIds, profileName, varName, data, perms ):
"""
Set a data entry for a profile
"""
sqlInsertValues = []
sqlInsertKeys = []
sqlInsertKeys.append( ( 'UserId', userIds[0] ) )
sqlInsertKeys.append( ( 'GroupId', userIds[1] ) )
sqlInsertKeys.append( ( 'VOId', userIds[2] ) )
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
sqlInsertKeys.append( ( 'Profile', sqlProfileName ) )
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
sqlInsertKeys.append( ( 'VarName', sqlVarName ) )
result = self._escapeString( data )
if not result[ 'OK' ]:
return result
sqlInsertValues.append( ( 'Data', result[ 'Value' ] ) )
normPerms = self.__parsePerms( perms )
for k in normPerms:
sqlInsertValues.append( ( k, '"%s"' % normPerms[ k ] ) )
sqlInsert = sqlInsertKeys + sqlInsertValues
insertSQL = "INSERT INTO `up_ProfilesData` ( %s ) VALUES ( %s )" % ( ", ".join( [ f[0] for f in sqlInsert ] ),
", ".join( [ str( f[1] ) for f in sqlInsert ] ) )
result = self._update( insertSQL )
if result[ 'OK' ]:
return result
#If error and not duplicate -> real error
if result[ 'Message' ].find( "Duplicate entry" ) == -1:
return result
updateSQL = "UPDATE `up_ProfilesData` SET %s WHERE %s" % ( ", ".join( [ "%s=%s" % f for f in sqlInsertValues ] ),
self.__webProfileUserDataCond( userIds,
sqlProfileName,
sqlVarName ) )
return self._update( updateSQL )
def setUserVarPermsById( self, userIds, profileName, varName, perms ):
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
result = self._escapeString( varName )
if not result[ 'OK' ]:
return result
sqlVarName = result[ 'Value' ]
nPerms = self.__parsePerms( perms, False )
if not nPerms:
return S_OK()
sqlPerms = ",".join( [ "%s='%s'" % ( k, nPerms[k] ) for k in nPerms ] )
updateSql = "UPDATE `up_ProfilesData` SET %s WHERE %s" % ( sqlPerms,
self.__webProfileUserDataCond( userIds,
sqlProfileName,
sqlVarName ) )
return self._update( updateSql )
def retrieveVar( self, userName, userGroup, ownerName, ownerGroup, profileName, varName ):
"""
Get a data entry for a profile
"""
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
result = self.getUserGroupIds( ownerName, ownerGroup )
if not result[ 'OK' ]:
return result
ownerIds = result[ 'Value' ]
return self.retrieveVarById( userIds, ownerIds, profileName, varName )
def retrieveUserProfiles( self, userName, userGroup ):
"""
Helper for getting data
"""
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.retrieveUserProfilesById( userIds )
def retrieveAllUserVars( self, userName, userGroup, profileName ):
"""
Helper for getting data
"""
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.retrieveAllUserVarsById( userIds, profileName )
def retrieveVarPerms( self, userName, userGroup, ownerName, ownerGroup, profileName, varName ):
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
result = self.getUserGroupIds( ownerName, ownerGroup, False )
if not result[ 'OK' ]:
return result
ownerIds = result[ 'Value' ]
return self.retrieveVarPermsById( userIds, ownerIds, profileName, varName )
def setUserVarPerms( self, userName, userGroup, profileName, varName, perms ):
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.setUserVarPermsById( userIds, profileName, varName, perms )
def storeVar( self, userName, userGroup, profileName, varName, data, perms = None ):
"""
Helper for setting data
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.storeVarByUserId( userIds, profileName, varName, data, perms = perms )
finally:
pass
def deleteVar( self, userName, userGroup, profileName, varName ):
"""
Helper for deleting data
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.deleteVarByUserId( userIds, profileName, varName )
finally:
pass
def __profilesCondGenerator( self, value, varType, initialValue = False ):
if type( value ) in types.StringTypes:
value = [ value ]
ids = []
if initialValue:
ids.append( initialValue )
for val in value:
if varType == 'user':
result = self.__getUserId( val, insertIfMissing = False )
elif varType == 'group':
result = self.__getGroupId( val, insertIfMissing = False )
else:
result = self.__getVOId( val, insertIfMissing = False )
if not result[ 'OK' ]:
continue
ids.append( result[ 'Value' ] )
if varType == 'user':
fieldName = 'UserId'
elif varType == 'group':
fieldName = 'GroupId'
else:
fieldName = 'VOId'
return "`up_ProfilesData`.%s in ( %s )" % ( fieldName, ", ".join( [ str( iD ) for iD in ids ] ) )
def listVarsById( self, userIds, profileName, filterDict = None ):
result = self._escapeString( profileName )
if not result[ 'OK' ]:
return result
sqlProfileName = result[ 'Value' ]
sqlCond = [ "`up_Users`.Id = `up_ProfilesData`.UserId",
"`up_Groups`.Id = `up_ProfilesData`.GroupId",
"`up_VOs`.Id = `up_ProfilesData`.VOId",
self.__webProfileReadAccessDataCond( userIds, userIds, sqlProfileName ) ]
if filterDict:
fD = {}
for k in filterDict:
fD[ k.lower() ] = filterDict[ k ]
filterDict = fD
for k in ( 'user', 'group', 'vo' ):
if k in filterDict:
sqlCond.append( self.__profilesCondGenerator( filterDict[ k ], k ) )
sqlVars2Get = [ "`up_Users`.UserName", "`up_Groups`.UserGroup", "`up_VOs`.VO", "`up_ProfilesData`.VarName" ]
sqlQuery = "SELECT %s FROM `up_Users`, `up_Groups`, `up_VOs`, `up_ProfilesData` WHERE %s" % ( ", ".join( sqlVars2Get ),
" AND ".join( sqlCond ) )
return self._query( sqlQuery )
def listVars( self, userName, userGroup, profileName, filterDict = None ):
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.listVarsById( userIds, profileName, filterDict )
def storeHashTagById( self, userIds, tagName, hashTag = False ):
"""
Set a data entry for a profile
"""
if not hashTag:
hashTag = hashlib.md5()
hashTag.update( "%s;%s;%s" % ( Time.dateTime(), userIds, tagName ) )
hashTag = hashTag.hexdigest()
result = self.insertFields( 'up_HashTags', [ 'UserId', 'GroupId', 'VOId', 'TagName', 'HashTag' ],
[ userIds[0], userIds[1], userIds[2], tagName, hashTag ] )
if result[ 'OK' ]:
return S_OK( hashTag )
#If error and not duplicate -> real error
if result[ 'Message' ].find( "Duplicate entry" ) == -1:
return result
result = self.updateFields( 'up_HashTags', ['HashTag'], [hashTag], { 'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2],
'TagName': tagName } )
if not result[ 'OK' ]:
return result
return S_OK( hashTag )
def retrieveHashTagById( self, userIds, hashTag ):
"""
Get a data entry for a profile
"""
result = self.getFields( 'up_HashTags', ['TagName'], { 'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2],
'HashTag': hashTag } )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if len( data ) > 0:
return S_OK( data[0][0] )
return S_ERROR( "No data for combo userId %s hashTag %s" % ( userIds, hashTag ) )
def retrieveAllHashTagsById( self, userIds ):
"""
Get a data entry for a profile
"""
result = self.getFields( 'up_HashTags', ['HashTag', 'TagName'], { 'UserId': userIds[0],
'GroupId': userIds[1],
'VOId': userIds[2] } )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
return S_OK( dict( data ) )
def storeHashTag( self, userName, userGroup, tagName, hashTag = False ):
"""
Helper for storing HASH
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.storeHashTagById( userIds, tagName, hashTag )
finally:
pass
def retrieveHashTag( self, userName, userGroup, hashTag ):
"""
Helper for retrieving HASH
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.retrieveHashTagById( userIds, hashTag )
finally:
pass
def retrieveAllHashTags( self, userName, userGroup ):
"""
Helper for retrieving HASH
"""
try:
result = self.getUserGroupIds( userName, userGroup )
if not result[ 'OK' ]:
return result
userIds = result[ 'Value' ]
return self.retrieveAllHashTagsById( userIds )
finally:
pass
def getUserProfileNames( self, permission ):
"""
it returns the available profile names by not taking account the permission: ReadAccess and PublishAccess
"""
result = None
permissions = self.__parsePerms( permission, False )
if not permissions:
return S_OK()
condition = ",".join( [ "%s='%s'" % ( k, permissions[k] ) for k in permissions ] )
query = "SELECT distinct Profile from `up_ProfilesData` where %s" % condition
retVal = self._query( query )
if retVal['OK']:
result = S_OK( [i[0] for i in retVal['Value']] )
else:
result = retVal
return result
def testUserProfileDB():
""" Some test cases
"""
# building up some fake CS values
gConfig.setOptionValue( 'DIRAC/Setup', 'Test' )
gConfig.setOptionValue( '/DIRAC/Setups/Test/Framework', 'Test' )
host = '127.0.0.1'
user = 'Dirac'
pwd = 'Dirac'
db = 'AccountingDB'
gConfig.setOptionValue( '/Systems/Framework/Test/Databases/UserProfileDB/Host', host )
gConfig.setOptionValue( '/Systems/Framework/Test/Databases/UserProfileDB/DBName', db )
gConfig.setOptionValue( '/Systems/Framework/Test/Databases/UserProfileDB/User', user )
gConfig.setOptionValue( '/Systems/Framework/Test/Databases/UserProfileDB/Password', pwd )
db = UserProfileDB()
assert db._connect()['OK']
userName = 'testUser'
userGroup = 'testGroup'
profileName = 'testProfile'
varName = 'testVar'
tagName = 'testTag'
hashTag = '237cadc4af90277e9524e6386e264630'
data = 'testData'
perms = 'USER'
try:
if False:
for tableName in db.tableDict.keys():
result = db._update( 'DROP TABLE `%s`' % tableName )
assert result['OK']
gLogger.info( '\n Creating Table\n' )
# Make sure it is there and it has been created for this test
result = db._checkTable()
assert result == {'OK': True, 'Value': None }
result = db._checkTable()
assert result == {'OK': True, 'Value': 0}
gLogger.info( '\n Adding some data\n' )
result = db.storeVar( userName, userGroup, profileName, varName, data, perms )
assert result['OK']
assert result['Value'] == 1
gLogger.info( '\n Some queries\n' )
result = db.getUserGroupIds( userName, userGroup )
assert result['OK']
assert result['Value'] == ( 1, 1, 1 )
result = db.listVars( userName, userGroup, profileName )
assert result['OK']
assert result['Value'][0][3] == varName
result = db.retrieveUserProfiles( userName, userGroup )
assert result['OK']
assert result['Value'] == { profileName: { varName: data } }
result = db.storeHashTag( userName, userGroup, tagName, hashTag )
assert result['OK']
assert result['Value'] == hashTag
result = db.retrieveAllHashTags( userName, userGroup )
assert result['OK']
assert result['Value'] == { hashTag: tagName }
result = db.retrieveHashTag( userName, userGroup, hashTag )
assert result['OK']
assert result['Value'] == tagName
gLogger.info( '\n OK\n' )
except AssertionError:
print('ERROR ', end=' ')
if not result['OK']:
print(result['Message'])
else:
print(result)
sys.exit( 1 )
if __name__ == '__main__':
from DIRAC.Core.Base import Script
Script.parseCommandLine()
gLogger.setLevel( 'VERBOSE' )
if 'PYTHONOPTIMIZE' in os.environ and os.environ['PYTHONOPTIMIZE']:
gLogger.info( 'Unset pyhthon optimization "PYTHONOPTIMIZE"' )
sys.exit( 0 )
testUserProfileDB()
| petricm/DIRAC | FrameworkSystem/DB/UserProfileDB.py | Python | gpl-3.0 | 27,595 | [
"DIRAC"
] | 508f9fc1e5fc160915231664f43d969072a429ecb84ac37dc5be6a42a2cf1635 |
# Module:
# Submodules:
# Created:
# Copyright (C) <date> <fullname>
#
# This module is part of the <project name> project and is released under
# the MIT License: http://opensource.org/licenses/MIT
"""
"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from enum import Enum
from functools import partial
# Third-party imports
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from yarl import URL
# Local imports
from .driver import BrowserDriver
# ============================================================================
# Globals
# ============================================================================
class HTMLProperty(Enum):
inner = 'innerHTML'
outer = 'outerHTML'
# ============================================================================
# Browser
# ============================================================================
class WaitFor:
__slots__ = ('_parent', )
def __init__(self, *, parent=None):
self._parent = parent
def __get__(self, obj, type=None):
"""docstring for __get__"""
return self.__class__(parent=obj)
def __call__(self, condition_func, *, xpath=None, timeout=1):
funcarg = [(By.XPATH, xpath)] if xpath is not None else []
el = WebDriverWait(self._parent.selenium_driver, timeout).until(
condition_func(*funcarg)
)
return el
def alert(self, *, timeout=1):
"""Wait for an alert to be present"""
condfunc = ec.alert_is_present
return self.__call__(condfunc, timeout=timeout)
def element(self, xpath, *, timeout=1):
condfunc = ec.presence_of_element_located
return self.__call__(condfunc, xpath=xpath, timeout=timeout)
def allelements(self, xpath, *, timeout=1):
condfunc = ec.presence_of_all_elements_located
return self.__call__(condfunc, xpath=xpath, timeout=timeout)
def element_text(self, xpath, text, *, timeout=1):
"""Wait for element that has given text"""
condfunc = partial(ec.text_to_be_present_in_element, (By.XPATH, xpath), text)
return self.__call__(condfunc, timeout=timeout)
def element_value(self, xpath, text, *, timeout=1):
"""Wait for element that has given text as the value of its value attribute"""
condfunc = partial(ec.text_to_be_present_in_element_value, (By.XPATH, xpath), text)
return self.__call__(condfunc, timeout=timeout)
def element_clickable(self, xpath, *, timeout=1):
"""Wait for element to be visible and enabled so that it can be clicked"""
condfunc = ec.element_to_be_clickable
return self.__call__(condfunc, xpath=xpath, timeout=timeout)
def element_invisible(self, xpath, *, timeout=1):
"""Wait for element that is either invisible or not present in the DOM"""
condfunc = ec.invisibility_of_element_located
return self.__call__(condfunc, xpath=xpath, timeout=timeout)
def element_visible(self, xpath, *, timeout=1):
"""Wait for element to exist and to be visible"""
condfunc = ec.visibility_of_element_located
return self.__call__(condfunc, xpath=xpath, timeout=timeout)
def allelements_visible(self, xpath, *, timeout=1):
"""Wait for one or more elements to exist and to be visible"""
condfunc = ec.visibility_of_any_elements_located
return self.__call__(condfunc, xpath=xpath, timeout=timeout)
@contextmanager
def pageload(self, *, timeout=30):
old_page = self.element('/html')
yield
self.stale_element(old_page, timeout=timeout)
def stale_element(self, element, *, timeout=1):
"""Wait for the given element to no longer attached to the DOM"""
condfunc = partial(ec.staleness_of, element)
return self.__call__(condfunc, timeout=timeout)
def isvisible(self, element, *, timeout=1):
"""Wait for an existing element to become visible"""
condfunc = partial(ec.visibility_of, element)
return self.__call__(condfunc, timeout=timeout)
def title(self, text, *, timeout=1):
"""Wait for the page title to match the given text"""
condfunc = partial(ec.title_is, text)
return self.__call__(condfunc, timeout=timeout)
def title_substring(self, text, *, timeout=1):
"""Wait for the page title to contain the given text"""
condfunc = partial(ec.title_contains, text)
return self.__call__(condfunc, timeout=timeout)
class Browser:
__slots__ = ('_driver', )
# Data descriptors
waitfor = WaitFor()
def __init__(self, driver):
if not isinstance(driver, BrowserDriver):
msg = ('driver arg expected {} object, got {} instead'.
format(BrowserDriver.__name__, type(driver).__name__))
raise TypeError(msg)
self._driver = driver
def __enter__(self):
self._driver.__enter__()
return self
def __exit__(self, exctype, exc, exctb):
return self._driver.__exit__(exctype, exc, exctb)
@contextmanager
def actionchain(self):
"""Perform a series of browser actions"""
actions = ActionChains(self.selenium_driver)
yield actions
actions.perform()
def allelements(self, xpath):
"""Return list of selenium elements representing xpath"""
return self.selenium_driver.find_elements_by_xpath(xpath)
def element(self, xpath):
"""Return selenium element representing xpath"""
return self.selenium_driver.find_element_by_xpath(xpath)
def element_html(self, el_or_xpath, htmlproperty=HTMLProperty.inner):
"""Return either the innerHTML or outerHTML value of an element"""
errmsg = None
if not isinstance(el_or_xpath, (str, WebElement)):
errmsg = ('el_or_xpath expected {} or {} object, got {} object instead'.
format('str', WebElement.__name__, type(el_or_xpath).__name__))
elif not isinstance(htmlproperty, HTMLProperty):
errmsg = ('htmlproperty expected {} object, got {} object instead'.
format(HTMLProperty.__name__, type(htmlproperty).__name__))
if errmsg:
raise TypeError(errmsg)
el = el_or_xpath
if isinstance(el, str):
el = self.element(el)
return el.get_attribute(htmlproperty.value)
def go(self, url, *, timeout=1):
"""Visit a url"""
if not isinstance(url, URL):
msg = ('url arg expected {} object, got {} object instead'.
format(URL.__name__, type(url).__name__))
raise TypeError(msg)
with self.waitfor.pageload(timeout=timeout):
self.selenium_driver.get(str(url))
def maximize(self):
"""Maximize the browser window"""
self.selenium_driver.maximize_window()
def switch(self, iframe=None):
"""Switch context"""
switch = self.selenium_driver.switch_to
if iframe is None:
switch.default_content()
else:
switch.frame(iframe.element)
@property
def driver(self):
"""Return BrowserDriver object associated with this Browser"""
return self._driver
@property
def location(self):
"""Return browser's current url"""
return URL(self.selenium_driver.current_url)
@property
def selenium_driver(self):
"""Return underlying selenium WebDriver object"""
return self._driver.driver
@property
def title(self):
"""Page title"""
return self.selenium_driver.title
@property
def source(self):
"""Retrieve page source"""
return self.selenium_driver.page_source
# ============================================================================
# Context
# ============================================================================
class PageObject(metaclass=ABCMeta):
"""Describes a set of web elements"""
# --------------------
# General methods
# --------------------
@abstractmethod
def __bool__(self):
"""Return whether the page object is valid"""
raise NotImplementedError
@abstractmethod
def reload(self):
"""Reload the page object"""
raise NotImplementedError
@property
@abstractmethod
def name(self):
"""Retrieve object's name
This is used as a key on the object's parent
"""
raise NotImplementedError
@property
@abstractmethod
def source(self):
"""Retrieve object's source"""
raise NotImplementedError
@property
@abstractmethod
def xpath(self):
"""Calculate and return the xpath to the page object"""
raise NotImplementedError
@property
@abstractmethod
def absxpath(self):
"""Calculate and return the absolute xpath to the page object"""
raise NotImplementedError
@property
@abstractmethod
def browser(self):
"""Return the browser"""
raise NotImplementedError
@property
@abstractmethod
def visible(self):
"""Return True if the page object is visible"""
raise NotImplementedError
@property
@abstractmethod
def page(self):
"""Return the object's page"""
raise NotImplementedError
@property
@abstractmethod
def parent(self):
"""Return the parent page object"""
raise NotImplementedError
@parent.setter
@abstractmethod
def parent(self, parent):
"""Set the parent page object"""
raise NotImplementedError
# ============================================================================
# Container
# ============================================================================
class CompositePageObject(PageObject):
# --------------------
# Container methods
# --------------------
@abstractmethod
def __getitem__(self, name):
"""Retrieve a child page object by name"""
raise NotImplementedError
@abstractmethod
def __delitem__(self, name):
"""Remove a child page object by name"""
raise NotImplementedError
@abstractmethod
def __iter__(self):
"""Iterate over names of child page objects"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Return the number of child page objects"""
raise NotImplementedError
@abstractmethod
def add(self, obj):
"""Add a new child page object"""
raise NotImplementedError
@abstractmethod
def clear(self):
"""Remove all child page objects"""
raise NotImplementedError
@abstractmethod
def children(self):
"""Iterator over child page objects"""
raise NotImplementedError
class Page(CompositePageObject):
@abstractmethod
def go(self):
"""Retrieve the page"""
raise NotImplementedError
@property
@abstractmethod
def url(self):
"""Return the current page's location"""
raise NotImplementedError
@property
@abstractmethod
def parser(self):
"""Return an lxml parser"""
raise NotImplementedError
# ============================================================================
#
# ============================================================================
| arielmakestuff/selweb | selweb/core.py | Python | mit | 11,856 | [
"VisIt"
] | 5b47417e644bad2551cc2aa0785db2bb06b87084190ca738a6f5cc1c04d76a8b |
import scipy.stats
import numpy
from numpy import logical_and
def binned_transform(u):
v = numpy.copy(u)
v[:len(v)/2] = 20*v[:len(v)/2] - 10
v[len(v)/2:] = 10**(v[len(v)/2:]*10 - 5)
return v
class BinnedModel(object):
"""
Each bin in the x variable has a distribution (Gaussian or other).
The bin borders have to be specified.
"""
def __init__(self, bins, rv_type = scipy.stats.norm):
self.bins = bins
self.parameter_names = ['y%d' % (i+1) for i in range(len(bins))] + ['ys%d' % (i+1) for i in range(len(bins))]
self.chains = None
def binned_model(v):
params = zip(v[:len(v)/2], v[len(v)/2:])
def model(x):
yvec = numpy.empty_like(x)
svec = numpy.empty_like(x)
for j, (lo, hi) in enumerate(bins):
mask = logical_and(x >= lo, x < hi)
if mask.any():
yvec[mask] = params[j][0]
svec[mask] = params[j][1]
rv = rv_type(yvec, svec)
return rv
return model
def binned_likelihood(v):
model = binned_model(v)
like = 0
for k, c in self.chains:
x = c[:,0]
y = c[:,1]
w = c[:,2] if c.shape[1] > 2 else 1
rv = model(x)
prob = (rv.pdf(y) * w).mean()
if prob == 0:
print 'parameters %s ruled out by object %s' % (str(v), k)
return -1e100
like += numpy.log(prob)
return like
self.transform = binned_transform
self.loglikelihood = binned_likelihood
self.model = binned_model
class RvSwitch(object):
def __init__(self, p, a, b):
self.p = p
self.a = a
self.b = b
def ppf(self, q):
return numpy.where(q < self.p, self.a.ppf(q / self.p), self.b.ppf((1 - q) / (1 - self.p)))
def pdf(self, y):
return self.a.pdf(y) + self.b.pdf(y)
class BinnedEnableModel(object):
"""
The distribution is such that with probability 1-'p', the value is uniformly
in the 'yzerorange', and with probability 1-'p', the value is distributed
at location 'y' with width 'ys'.
Each bin in the x variable has such a distribution with 3 variables. The bin borders have to be specified.
"""
def __init__(self, bins, yzerorange, rv_type = scipy.stats.norm):
self.bins = bins
self.parameter_names = ['p%d' % (i+1) for i in range(len(bins))] + \
['y%d' % (i+1) for i in range(len(bins))] + \
['ys%d' % (i+1) for i in range(len(bins))]
self.chains = None
def binned_model(v):
params = zip(v[:len(v)/3], v[len(v)/3:2*len(v)/3], v[2*len(v)/3:])
def model(x):
pvec = numpy.empty_like(x)
yvec = numpy.empty_like(x)
svec = numpy.empty_like(x)
for j, (lo, hi) in enumerate(bins):
mask = logical_and(x >= lo, x < hi)
if mask.any():
pvec[mask] = params[j][0]
yvec[mask] = params[j][1]
svec[mask] = params[j][2]
rv = RvSwitch(pvec,
rv_type(yvec, svec),
scipy.stats.uniform(yzerorange[0], yzerorange[1] - yzerorange[0]))
return rv
return model
def binned_likelihood(v):
model = binned_model(v)
like = 0
for k, c in self.chains:
x = c[:,0]
y = c[:,1]
w = c[:,2] if c.shape[1] > 2 else 1
rv = model(x)
prob = (rv.pdf(y) * w).mean()
if prob == 0:
print 'parameters %s ruled out by object %s' % (str(v), k)
return -1e100
like += numpy.log(prob)
return like
self.transform = binned_transform
self.loglikelihood = binned_likelihood
self.model = binned_model
| JohannesBuchner/syscorr | syscorr/binned.py | Python | agpl-3.0 | 3,309 | [
"Gaussian"
] | aedc59141c2baa423fe4db62f943e70aec755075f6b9d62d7a174428ef4c4ac4 |
#!/usr/bin/env python
# vim: set fileencoding=utf-8
import os
import argparse
from gff3 import genes, get_gff3_id, get_rbs_from, feature_test_true, feature_lambda, feature_test_type
from cpt_gffParser import gffParse, gffWrite
from Bio import SeqIO
from jinja2 import Environment, FileSystemLoader
import logging
from math import floor
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name="pat")
# Path to script, required because of Galaxy.
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
# Path to the HTML template for the report
def genes_all(feature_list, feature_type=["gene"], sort=False):
"""
Simple filter to extract gene features from the feature set.
"""
if not sort:
for x in feature_lambda(
feature_list, feature_test_type, {"types": feature_type}, subfeatures=True
):
yield x
else:
data = list(genes_all(feature_list, feature_type, sort=False))
data = sorted(data, key=lambda feature: feature.location.start)
for x in data:
yield x
def checkSubs(feature, qualName):
subFeats = []
res = ""
subFeats = feature.sub_features
while (len(subFeats) > 0):
for feat in subFeats:
for i in feat.qualifiers.keys():
for j in qualName:
if i == j:
if res == "":
res = feat.qualifiers[i][0]
else:
res += "; " + feat.qualifiers[i][0]
if res != "":
return res
tempFeats = []
for feat in subFeats: # Should be breadth-first results
for x in feat.sub_features:
tempFeats.append(x)
subFeats = tempFeats
return res
def annotation_table_report(record, types, wanted_cols, gaf_data, searchSubs):
getTypes = []
for x in [y.strip() for y in types.split(",")]:
getTypes.append(x)
getTypes.append("gene")
sorted_features = list(genes_all(record.features, getTypes, sort=True))
if wanted_cols is None or len(wanted_cols.strip()) == 0:
return [], []
useSubs = searchSubs
def rid(record, feature):
"""Organism ID
"""
return record.id
def id(record, feature):
"""ID
"""
return feature.id
def featureType(record, feature):
"""Type
"""
return feature.type
def name(record, feature):
"""Name
"""
for x in ["Name", "name"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Name", "name"])
if res != "":
return res
return "None"
def start(record, feature):
"""Boundary
"""
return str(feature.location.start + 1)
def end(record, feature):
"""Boundary
"""
return str(feature.location.end)
def location(record, feature):
"""Location
"""
return str(feature.location.start + 1) + "..{0.end}".format(feature.location)
def length(record, feature):
"""CDS Length (AA)
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if cdss == []:
return "None"
res = (sum([len(cds) for cds in cdss]) / 3) - 1
if floor(res) == res:
res = int(res)
return str(res)
def notes(record, feature):
"""User entered Notes"""
for x in ["Note", "note", "Notes", "notes"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Note", "note", "Notes", "notes"])
if res != "":
return res
return "None"
def date_created(record, feature):
"""Created"""
return feature.qualifiers.get("date_creation", ["None"])[0]
def date_last_modified(record, feature):
"""Last Modified"""
res = feature.qualifiers.get("date_last_modified", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["date_last_modified"])
if res != "":
return res
return "None"
def description(record, feature):
"""Description"""
res = feature.qualifiers.get("description", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["description"])
if res != "":
return res
return "None"
def owner(record, feature):
"""Owner
User who created the feature. In a 464 scenario this may be one of
the TAs."""
for x in ["Owner", "owner"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Owner", "owner"])
if res != "":
return res
return "None"
def product(record, feature):
"""Product
User entered product qualifier (collects "Product" and "product"
entries)"""
"""User entered Notes"""
for x in ["product", "Product"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["product", "Product"])
if res != "":
return res
return "None"
def note(record, feature):
"""Note
User entered Note qualifier(s)"""
return feature.qualifiers.get("Note", [])
def strand(record, feature):
"""Strand
"""
return "+" if feature.location.strand > 0 else "-"
def sd_spacing(record, feature):
"""Shine-Dalgarno spacing
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
return "None"
else:
resp = []
for rbs in rbss:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if len(cdss) == 0:
return "No CDS"
if rbs.location.strand > 0:
distance = min(
cdss, key=lambda x: x.location.start - rbs.location.end
)
distance_val = str(distance.location.start - rbs.location.end)
resp.append(distance_val)
else:
distance = min(
cdss, key=lambda x: x.location.end - rbs.location.start
)
distance_val = str(rbs.location.start - distance.location.end)
resp.append(distance_val)
if len(resp) == 1:
return str(resp[0])
return resp
def sd_seq(record, feature):
"""Shine-Dalgarno sequence
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
return "None"
else:
resp = []
for rbs in rbss:
resp.append(str(rbs.extract(record).seq))
if len(resp) == 1:
return str(resp[0])
else:
return resp
def start_codon(record, feature):
"""Start Codon
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
data = [x for x in cdss]
if len(data) == 1:
return str(data[0].extract(record).seq[0:3])
else:
return [
"{0} ({1.location.start}..{1.location.end}:{1.location.strand})".format(
x.extract(record).seq[0:3], x
)
for x in data
]
def stop_codon(record, feature):
"""Stop Codon
"""
return str(feature.extract(record).seq[-3:])
def dbxrefs(record, feature):
"""DBxrefs
"""
"""User entered Notes"""
for x in ["Dbxref", "db_xref", "DB_xref", "DBxref", "DB_Xref", "DBXref"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
return "None"
def upstream_feature(record, feature):
"""Next gene upstream"""
if feature.strand > 0:
upstream_features = [
x for x in sorted_features if (x.location.start < feature.location.start and x.type == "gene" and x.strand == feature.strand)
]
if len(upstream_features) > 0:
foundSelf = False
featCheck = upstream_features[-1].sub_features
for x in featCheck:
if x == feature:
foundSelf = True
break
featCheck = featCheck + x.sub_features
if foundSelf:
if len(upstream_features) > 1:
return upstream_features[-2]
return None
return upstream_features[-1]
else:
return None
else:
upstream_features = [
x for x in sorted_features if (x.location.end > feature.location.end and x.type == "gene" and x.strand == feature.strand)
]
if len(upstream_features) > 0:
foundSelf = False
featCheck = upstream_features[0].sub_features
for x in featCheck:
if x == feature:
foundSelf = True
break
featCheck = featCheck + x.sub_features
if foundSelf:
if len(upstream_features) > 1:
return upstream_features[1]
return None
return upstream_features[0]
else:
return None
def upstream_feature__name(record, feature):
"""Next gene upstream"""
up = upstream_feature(record, feature)
if up:
return str(up.id)
return "None"
def ig_dist(record, feature):
"""Distance to next upstream gene on same strand"""
up = upstream_feature(record, feature)
if up:
dist = None
if feature.strand > 0:
dist = feature.location.start - up.location.end
else:
dist = up.location.start - feature.location.end
return str(dist)
else:
return "None"
def _main_gaf_func(record, feature, gaf_data, attr):
if feature.id in gaf_data:
return [x[attr] for x in gaf_data[feature.id]]
return []
def gaf_annotation_extension(record, feature, gaf_data):
"""GAF Annotation Extension
Contains cross references to other ontologies that can be used
to qualify or enhance the annotation. The cross-reference is
prefaced by an appropriate GO relationship; references to
multiple ontologies can be entered. For example, if a gene
product is localized to the mitochondria of lymphocytes, the GO
ID (column 5) would be mitochondrion ; GO:0005439, and the
annotation extension column would contain a cross-reference to
the term lymphocyte from the Cell Type Ontology.
"""
return _main_gaf_func(record, feature, gaf_data, "annotation_extension")
def gaf_aspect(record, feature, gaf_data):
"""GAF Aspect code
E.g. P (biological process), F (molecular function) or C (cellular component)
"""
return _main_gaf_func(record, feature, gaf_data, "aspect")
def gaf_assigned_by(record, feature, gaf_data):
"""GAF Creating Organisation
"""
return _main_gaf_func(record, feature, gaf_data, "assigned_by")
def gaf_date(record, feature, gaf_data):
"""GAF Creation Date
"""
return _main_gaf_func(record, feature, gaf_data, "date")
def gaf_db(record, feature, gaf_data):
"""GAF DB
"""
return _main_gaf_func(record, feature, gaf_data, "db")
def gaf_db_reference(record, feature, gaf_data):
"""GAF DB Reference
"""
return _main_gaf_func(record, feature, gaf_data, "db_reference")
def gaf_evidence_code(record, feature, gaf_data):
"""GAF Evidence Code
"""
return _main_gaf_func(record, feature, gaf_data, "evidence_code")
def gaf_go_id(record, feature, gaf_data):
"""GAF GO ID
"""
return _main_gaf_func(record, feature, gaf_data, "go_id")
def gaf_go_term(record, feature, gaf_data):
"""GAF GO Term
"""
return _main_gaf_func(record, feature, gaf_data, "go_term")
def gaf_id(record, feature, gaf_data):
"""GAF ID
"""
return _main_gaf_func(record, feature, gaf_data, "id")
def gaf_notes(record, feature, gaf_data):
"""GAF Notes
"""
return _main_gaf_func(record, feature, gaf_data, "notes")
def gaf_owner(record, feature, gaf_data):
"""GAF Creator
"""
return _main_gaf_func(record, feature, gaf_data, "owner")
def gaf_with_or_from(record, feature, gaf_data):
"""GAF With/From
"""
return _main_gaf_func(record, feature, gaf_data, "with_or_from")
cols = []
data = []
funcs = []
lcl = locals()
for x in [y.strip().lower() for y in wanted_cols.split(",")]:
if not x:
continue
if x == "type":
x = "featureType"
if x in lcl:
funcs.append(lcl[x])
# Keep track of docs
func_doc = lcl[x].__doc__.strip().split("\n\n")
# If there's a double newline, assume following text is the
# "help" and the first part is the "name". Generate empty help
# if not provided
if len(func_doc) == 1:
func_doc += [""]
cols.append(func_doc)
elif "__" in x:
chosen_funcs = [lcl[y] for y in x.split("__")]
func_doc = [
" of ".join(
[y.__doc__.strip().split("\n\n")[0] for y in chosen_funcs[::-1]]
)
]
cols.append(func_doc)
funcs.append(chosen_funcs)
for gene in genes_all(record.features, getTypes, sort=True):
row = []
for func in funcs:
if isinstance(func, list):
# If we have a list of functions, repeatedly apply them
value = gene
for f in func:
if value is None:
value = "None"
break
value = f(record, value)
else:
# Otherwise just apply the lone function
if func.__name__.startswith("gaf_"):
value = func(record, gene, gaf_data)
else:
value = func(record, gene)
if isinstance(value, list):
collapsed_value = ", ".join(value)
value = [str(collapsed_value)]#.encode("unicode_escape")]
else:
value = str(value)#.encode("unicode_escape")
row.append(value)
# print row
data.append(row)
return data, cols
def parseGafData(file):
cols = []
data = {}
# '10d04a01-5ed8-49c8-b724-d6aa4df5a98d': {
# 'annotation_extension': '',
# 'aspect': '',
# 'assigned_by': 'CPT',
# 'date': '2017-05-04T16:25:22.161916Z',
# 'db': 'UniProtKB',
# 'db_reference': 'GO_REF:0000100',
# 'evidence_code': 'ISA',
# 'gene': '0d307196-833d-46e8-90e9-d80f7a041d88',
# 'go_id': 'GO:0039660',
# 'go_term': 'structural constituent of virion',
# 'id': '10d04a01-5ed8-49c8-b724-d6aa4df5a98d',
# 'notes': 'hit was putative minor structural protein',
# 'owner': 'amarc1@tamu.edu',
# 'with_or_from': 'UNIREF90:B2ZYZ7'
# },
for row in file:
if row.startswith("#"):
# Header
cols = (
row.strip().replace("# ", "").replace("GO Term", "go_term").split("\t")
)
else:
line = row.strip().split("\t")
tmp = dict(zip(cols, line))
if "gene" not in tmp.keys():
continue
if tmp["gene"] not in data:
data[tmp["gene"]] = []
data[tmp["gene"]].append(tmp)
return data
def evaluate_and_report(
annotations,
genome,
types="gene",
reportTemplateName="phage_annotation_validator.html",
annotationTableCols="",
gafData=None,
searchSubs = False,
):
"""
Generate our HTML evaluation of the genome
"""
# Get features from GFF file
seq_dict = SeqIO.to_dict(SeqIO.parse(genome, "fasta"))
# Get the first GFF3 record
# TODO: support multiple GFF3 files.
at_table_data = []
gaf = {}
if gafData:
gaf = parseGafData(gafData)
for record in gffParse(annotations, base_dict=seq_dict):
if reportTemplateName.endswith(".html"):
record.id = record.id.replace(".", "-")
log.info("Producing an annotation table for %s" % record.id)
annotation_table_data, annotation_table_col_names = annotation_table_report(
record, types, annotationTableCols, gaf, searchSubs
)
at_table_data.append((record, annotation_table_data))
# break
# This is data that will go into our HTML template
kwargs = {
"annotation_table_data": at_table_data,
"annotation_table_col_names": annotation_table_col_names,
}
env = Environment(
loader=FileSystemLoader(SCRIPT_PATH), trim_blocks=True, lstrip_blocks=True
)
if reportTemplateName.endswith(".html"):
env.filters["nice_id"] = str(get_gff3_id).replace(".", "-")
else:
env.filters["nice_id"] = get_gff3_id
def join(listy):
return "\n".join(listy)
env.filters.update({"join": join})
tpl = env.get_template(reportTemplateName)
return tpl.render(**kwargs).encode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="rebase gff3 features against parent locations", epilog=""
)
parser.add_argument(
"annotations", type=argparse.FileType("r"), help="Parent GFF3 annotations"
)
parser.add_argument("genome", type=argparse.FileType("r"), help="Genome Sequence")
parser.add_argument(
"--types",
help="Select extra types to display in output (Will always include gene)",
)
parser.add_argument(
"--reportTemplateName",
help="Report template file name",
default="phageqc_report_full.html",
)
parser.add_argument(
"--annotationTableCols",
help="Select columns to report in the annotation table output format",
)
parser.add_argument(
"--gafData", help="CPT GAF-like table", type=argparse.FileType("r")
)
parser.add_argument(
"--searchSubs", help="Attempt to populate fields from sub-features if qualifier is empty", action="store_true"
)
args = parser.parse_args()
print(evaluate_and_report(**vars(args)).decode("utf-8"))
| TAMU-CPT/galaxy-tools | tools/phage/phage_annotation_table.py | Python | gpl-3.0 | 19,472 | [
"Galaxy"
] | 640b1033db0226962234f43478b02a23b671398f7fc79e99adfdf6d6939c5c9a |
"""
RequestOperation to create a tarball from a list of LFNs.
Download a list of files to local storage, then tars it and uploads it to a StorageElement
This operation requires the following arguments:
* ArchiveLFN: The LFN of the tarball
* SourceSE: Where the files to be archived are downloaded from
* TarballSE: Where the tarball will be uploaded to
* RegisterDescendent: If True the tarball will be registered as a descendent of the LFNs
"""
import os
import shutil
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
__RCSID__ = '$Id$'
class ArchiveFiles(OperationHandlerBase):
"""ArchiveFiles operation handler."""
def __init__(self, operation=None, csPath=None):
"""Initialize the ArchifeFiles handler.
:param self: self reference
:param Operation operation: Operation instance
:param string csPath: CS path for this handler
"""
OperationHandlerBase.__init__(self, operation, csPath)
gMonitor.registerActivity('ArchiveFilesAtt', 'Request attempt',
'RequestExecutingAgent', 'Files/min', gMonitor.OP_SUM)
gMonitor.registerActivity('ArchiveFilesOK', 'Requests successful',
'RequestExecutingAgent', 'Files/min', gMonitor.OP_SUM)
gMonitor.registerActivity('ArchiveFilesFail', 'Requests failed',
'RequestExecutingAgent', 'Files/min', gMonitor.OP_SUM)
self.cacheFolder = os.environ.get('AGENT_WORKDIRECTORY')
self.parameterDict = {}
self.waitingFiles = []
self.lfns = []
def __call__(self):
"""Process the ArchiveFiles operation."""
try:
gMonitor.addMark('ArchiveFilesAtt', 1)
self._run()
gMonitor.addMark('ArchiveFilesOK', 1)
except RuntimeError as e:
self.log.info('Failed to execute ArchiveFiles', repr(e))
gMonitor.addMark('ArchiveFilesFail', 1)
return S_ERROR(str(e))
except Exception as e:
self.log.exception('Failed to execute ArchiveFiles', repr(e), lException=e)
gMonitor.addMark('ArchiveFilesFail', 1)
return S_ERROR(str(e))
finally:
self._cleanup()
return S_OK()
def _run(self):
"""Execute the download and tarring."""
self.parameterDict = DEncode.decode(self.operation.Arguments)[0] # tuple: dict, number of characters
self.cacheFolder = os.path.join(self.cacheFolder, self.request.RequestName)
self._checkArchiveLFN()
for parameter, value in self.parameterDict.iteritems():
self.log.info('Parameters: %s = %s' % (parameter, value))
self.log.info('Cache folder: %r' % self.cacheFolder)
self.waitingFiles = self.getWaitingFilesList()
self.lfns = [opFile.LFN for opFile in self.waitingFiles]
self._checkReplicas()
self._downloadFiles()
self._tarFiles()
self._uploadTarBall()
self._registerDescendent()
self._markFilesDone()
def _checkArchiveLFN(self):
"""Make sure the archive LFN does not exist yet."""
archiveLFN = self.parameterDict['ArchiveLFN']
exists = returnSingleResult(self.fc.isFile(archiveLFN))
self.log.debug('Checking for Tarball existence %r' % exists)
if exists['OK'] and exists['Value']:
raise RuntimeError('Tarball %r already exists' % archiveLFN)
def _checkReplicas(self):
"""Make sure the source files are at the sourceSE."""
resReplica = self.fc.getReplicas(self.lfns)
if not resReplica['OK']:
self.log.error('Failed to get replica information:', resReplica['Message'])
raise RuntimeError('Failed to get replica information')
atSource = []
notAt = []
failed = []
sourceSE = self.parameterDict['SourceSE']
for lfn, replInfo in resReplica['Value']['Successful'].iteritems():
if sourceSE in replInfo:
atSource.append(lfn)
else:
self.log.warn('LFN %r not found at source, only at: %s' % (lfn, ','.join(replInfo.keys())))
notAt.append(lfn)
for lfn, errorMessage in resReplica['Value']['Failed'].iteritems():
self.log.warn('Failed to get replica info', '%s: %s' % (lfn, errorMessage))
if 'No such file or directory' in errorMessage:
continue
failed.append(lfn)
if failed:
self.log.error('LFNs failed to get replica info:', '%r' % ' '.join(failed))
raise RuntimeError('Failed to get some replica information')
if notAt:
self.log.error('LFNs not at sourceSE:', '%r' % ' '.join(notAt))
raise RuntimeError('Some replicas are not at the source')
def _downloadFiles(self):
"""Download the files."""
self._checkFilePermissions()
for index, opFile in enumerate(self.waitingFiles):
lfn = opFile.LFN
self.log.info('Processing file (%d/%d) %r' % (index, len(self.waitingFiles), lfn))
sourceSE = self.parameterDict['SourceSE']
attempts = 0
destFolder = os.path.join(self.cacheFolder, os.path.dirname(lfn)[1:])
self.log.debug('Local Cache Folder: %s' % destFolder)
if not os.path.exists(destFolder):
os.makedirs(destFolder)
while True:
attempts += 1
download = returnSingleResult(self.dm.getFile(lfn, destinationDir=destFolder, sourceSE=sourceSE))
if download['OK']:
self.log.info('Downloaded file %r to %r' % (lfn, destFolder))
break
errorString = download['Message']
self.log.error('Failed to download file:', errorString)
opFile.Error = errorString
opFile.Attempt += 1
self.operation.Error = opFile.Error
if 'No such file or directory' in opFile.Error:
# The File does not exist, we just ignore this and continue, otherwise we never archive the other files
opFile.Status = 'Done'
download = S_OK()
break
if attempts > 10:
self.log.error('Completely failed to download file:', errorString)
raise RuntimeError('Completely failed to download file: %s' % errorString)
return
def _checkFilePermissions(self):
"""Check that the request owner has permission to read and remove the files.
Otherwise the error might show up after considerable time was spent.
"""
permissions = self.fc.hasAccess(self.lfns, 'removeFile')
if not permissions['OK']:
raise RuntimeError('Could not resolve permissions')
if permissions['Value']['Failed']:
for lfn in permissions['Value']['Failed']:
self.log.error('Cannot archive file:', lfn)
for opFile in self.waitingFiles:
if opFile.LFN == lfn:
opFile.Status = 'Failed'
opFile.Error = 'Permission denied'
break
raise RuntimeError('Do not have sufficient permissions')
return
def _tarFiles(self):
"""Tar the files."""
tarFileName = os.path.splitext(os.path.basename(self.parameterDict['ArchiveLFN']))[0]
baseDir = self.parameterDict['ArchiveLFN'].strip('/').split('/')[0]
shutil.make_archive(tarFileName, format='tar', root_dir=self.cacheFolder, base_dir=baseDir,
dry_run=False, logger=self.log)
def _uploadTarBall(self):
"""Upload the tarball to specified LFN."""
lfn = self.parameterDict['ArchiveLFN']
self.log.info('Uploading tarball to %r' % lfn)
localFile = os.path.basename(lfn)
tarballSE = self.parameterDict['TarballSE']
upload = returnSingleResult(self.dm.putAndRegister(lfn, localFile, tarballSE))
if not upload['OK']:
raise RuntimeError('Failed to upload tarball: %s' % upload['Message'])
self.log.verbose('Uploading finished')
def _registerDescendent(self):
"""Register the tarball as a descendent of the archived LFNs.
Actually registers all LFNs as an ancestor to the Tarball.
"""
registerDescendents = self.parameterDict.get('RegisterDescendent', None)
if not registerDescendents:
self.log.verbose('Will not register tarball as descendent to the Archived LFNs.')
return
self.log.info('Will register tarball as descendent to the Archived LFNs.')
tarballLFN = self.parameterDict['ArchiveLFN']
ancestorDict = {tarballLFN: {'Ancestors': self.lfns}}
for _trial in range(3):
resAncestors = returnSingleResult(self.fc.addFileAncestors(ancestorDict))
if resAncestors['OK']:
break
else:
self.log.error('Failed to register ancestors', resAncestors['Message'])
raise RuntimeError('Failed to register ancestors')
self.log.info('Successfully registered ancestors')
def _markFilesDone(self):
"""Mark all the files as done."""
self.log.info('Marking files as done')
for opFile in self.waitingFiles:
opFile.Status = 'Done'
def _cleanup(self):
"""Remove the tarball and the downloaded files."""
self.log.info('Cleaning files and tarball')
try:
if 'ArchiveLFN' in self.parameterDict:
os.remove(os.path.basename(self.parameterDict['ArchiveLFN']))
except OSError as e:
self.log.debug('Error when removing tarball: %s' % str(e))
try:
shutil.rmtree(self.cacheFolder, ignore_errors=True)
except OSError as e:
self.log.debug('Error when removing cacheFolder: %s' % str(e))
| fstagni/DIRAC | DataManagementSystem/Agent/RequestOperations/ArchiveFiles.py | Python | gpl-3.0 | 9,341 | [
"DIRAC"
] | ea02c3cef27164b1246c1f568608c9837214c2e6a8c846e875c58b72e77fde2c |
# -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads the latest GraphicsFuzz AmberScript tests from vk-gl-cts.
Downloads the latest tests, including those that are pending.
"""
import argparse
import sys
from pathlib import Path
from gfauto import (
amber_converter,
binaries_util,
fuzz,
gerrit_util,
settings_util,
subprocess_util,
util,
)
def download_cts_graphicsfuzz_tests( # pylint: disable=too-many-locals;
git_tool: Path, cookie: str, output_tests_dir: Path,
) -> Path:
work_dir = Path() / "temp" / ("cts_" + fuzz.get_random_name())
latest_change = gerrit_util.get_latest_deqp_change(cookie)
latest_change_number = latest_change["_number"]
latest_change_details = gerrit_util.get_gerrit_change_details(
change_number=latest_change_number, cookie=cookie
)
current_revision = latest_change_details["current_revision"]
cts_archive_path = gerrit_util.download_gerrit_revision(
output_path=work_dir / "cts.tgz",
change_number=latest_change_number,
revision=current_revision,
download_type=gerrit_util.DownloadType.Archive,
cookie=cookie,
)
cts_dir_name = "cts_temp"
cts_out = util.extract_archive(cts_archive_path, work_dir / cts_dir_name)
pending_graphicsfuzz_changes = gerrit_util.get_deqp_graphicsfuzz_pending_changes(
cookie
)
for pending_change in pending_graphicsfuzz_changes:
change_number = pending_change["_number"]
change_details = gerrit_util.get_gerrit_change_details(
change_number=change_number, cookie=cookie
)
current_revision = change_details["current_revision"]
patch_zip = gerrit_util.download_gerrit_revision(
output_path=work_dir / f"{change_number}.zip",
change_number=change_number,
revision=current_revision,
download_type=gerrit_util.DownloadType.Patch,
cookie=cookie,
)
util.extract_archive(patch_zip, work_dir)
# Create a dummy git repo in the work directory, otherwise "git apply" can fail silently.
# --unsafe-paths is possibly supposed to address this, but it doesn't seem to work if we
# are already in a git repo.
subprocess_util.run(
[str(git_tool), "init", "."], verbose=True, working_dir=work_dir
)
cmd = [str(git_tool), "apply"]
patch_names = [p.name for p in work_dir.glob("*.diff")]
cmd += patch_names
# Use unix-style path for git.
cmd += [
"--verbose",
"--unsafe-paths",
f"--directory={cts_dir_name}",
f"--exclude={cts_dir_name}/external/vulkancts/data/vulkan/amber/graphicsfuzz/index.txt",
f"--include={cts_dir_name}/external/vulkancts/data/vulkan/amber/graphicsfuzz/*",
]
subprocess_util.run(cmd, verbose=True, working_dir=work_dir)
util.copy_dir(
cts_out
/ "external"
/ "vulkancts"
/ "data"
/ "vulkan"
/ "amber"
/ "graphicsfuzz",
output_tests_dir,
)
# Sometimes dEQP contributors add non-GraphicsFuzz AmberScript files to the graphicsfuzz directory.
# We remove these.
bad_test_names = ["texel_offset.amber"]
for bad_test_name in bad_test_names:
bad_test = output_tests_dir / bad_test_name
if bad_test.is_file():
bad_test.unlink()
return output_tests_dir
GERRIT_COOKIE_INSTRUCTIONS = (
"Log in to the Khronos Gerrit page in your "
"browser and paste the following into the JavaScript console (F12) to copy the cookie to your clipboard: "
"copy( document.cookie.match( /GerritAccount=([^;]*)/ )[1])"
)
def extract_shaders(tests_dir: Path, binaries: binaries_util.BinaryManager) -> None:
for amber_file in sorted(tests_dir.glob("*.amber")):
amber_converter.extract_shaders(
amber_file, output_dir=amber_file.parent, binaries=binaries
)
zip_files = [
util.ZipEntry(f, Path(f.name))
for f in sorted(tests_dir.glob(f"{amber_file.stem}.*"))
]
util.create_zip(amber_file.with_suffix(".zip"), zip_files)
def main() -> None:
parser = argparse.ArgumentParser(
description="Downloads the latest GraphicsFuzz AmberScript tests from vk-gl-cts, "
"including those in pending CLs. "
"Requires Git. Requires Khronos membership."
)
parser.add_argument(
"gerrit_cookie",
help="The Gerrit cookie used for authentication. Requires Khronos membership. Obtain this as follows. "
+ GERRIT_COOKIE_INSTRUCTIONS,
)
parser.add_argument(
"--settings",
help="Path to the settings JSON file for this instance.",
default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH),
)
parsed_args = parser.parse_args(sys.argv[1:])
cookie: str = parsed_args.gerrit_cookie
settings_path: Path = Path(parsed_args.settings)
# Need git.
git_tool = util.tool_on_path("git")
settings = settings_util.read_or_create(settings_path)
binaries = binaries_util.get_default_binary_manager(settings=settings)
tests_dir = Path() / "graphicsfuzz"
download_cts_graphicsfuzz_tests(git_tool, cookie, tests_dir)
extract_shaders(tests_dir, binaries)
if __name__ == "__main__":
main()
| google/graphicsfuzz | gfauto/gfauto/download_cts_gf_tests.py | Python | apache-2.0 | 5,898 | [
"Amber"
] | 497d6d620bf72c27e5ec98967c4e57410f689b1f39148159acd25c92b7d68d86 |
"""
ComponentMonitoring type used to monitor DIRAC components.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DIRAC.MonitoringSystem.Client.Types.BaseType import BaseType
__RCSID__ = "$Id$"
class ComponentMonitoring(BaseType):
"""
.. class:: ComponentMonitoring
"""
def __init__(self):
""" c'tor
:param self: self reference
"""
super(ComponentMonitoring, self).__init__()
self.keyFields = ['host', 'component', 'pid', 'status',
'componentType', 'componentLocation']
self.monitoringFields = ['runningTime', 'memoryUsage', 'threads', 'cpuPercentage',
'Connections', 'PendingQueries', 'ActiveQueries',
'RunningThreads', 'MaxFD', 'ServiceResponseTime',
'cycleDuration', 'cycles']
self.addMapping({"host": {"type": "keyword"},
"component": {"type": "keyword"},
"status": {"type": "keyword"},
"componentType": {"type": "keyword"},
"componentLocation": {"type": "keyword"}})
self.dataToKeep = 86400 * 30 # we need to define...
self.period = "month"
self.checkType()
| yujikato/DIRAC | src/DIRAC/MonitoringSystem/Client/Types/ComponentMonitoring.py | Python | gpl-3.0 | 1,284 | [
"DIRAC"
] | 8380193ffb06d1101086d3f862945ccca84c50b61fe30336ba7a4a5e065c944d |
import abc
import numbers
import warnings
import h5py
import numpy as np
from . import bg_estimate
#: default hdf5 compression keyword arguments
COMPRESSION = {"compression": "gzip",
"compression_opts": 9,
}
#: valid background data identifiers
VALID_BG_KEYS = ["data",
"fit",
]
class ImageData(object):
"""Base class for image management
See Also
--------
Amplitude: ImageData with amplitude background correction
Phase: ImageData with phase background correction
"""
__metaclass__ = abc.ABCMeta
def __init__(self, h5, h5dtype="float32"):
"""
Parameters
----------
h5: h5py.Group
HDF5 group where all data is kept
h5dtype: str
The datatype in which to store the image data. The default
is "float32" which is sufficient for 2D image analysis and
consumes only half the disk space of the numpy default
"float64".
"""
self.h5dtype = np.dtype(h5dtype)
self.h5 = h5
if "bg_data" not in self.h5:
self.h5.create_group("bg_data")
def __repr__(self):
name = self.__class__.__name__
rep = "{name} image, {x}x{y}px".format(name=name,
x=self.raw.shape[0],
y=self.raw.shape[1],
)
return rep
def __setitem__(self, key, value):
"""Image data setter
If `value` is None, then `key` is removed from `self.h5`.
The datatype `self.h5dtype` is used, unless the input
array is boolean.
"""
if value is None:
if key in self.h5:
del self.h5[key]
else:
if value.dtype == np.dtype("bool"):
h5dtype = "bool"
else:
h5dtype = self.h5dtype
write_image_dataset(group=self.h5,
key=key,
data=value,
h5dtype=h5dtype)
@abc.abstractmethod
def _bg_combine(self, *bgs):
"""Combine several background images"""
@abc.abstractmethod
def _bg_correct(self, raw, bg):
"""Remove `bg` from `raw` image data"""
@property
def bg(self):
"""combined background image data"""
return self._bg_combine(self.h5["bg_data"].values())
@property
def image(self):
"""background corrected image data"""
return self._bg_correct(self.raw, self.bg)
@property
def info(self):
"""list of background correction parameters"""
info = []
name = self.__class__.__name__.lower()
# get bg information
for key in VALID_BG_KEYS:
if key in self.h5["bg_data"]:
attrs = self.h5["bg_data"][key].attrs
for akey in attrs:
atr = attrs[akey]
var = "{} background {}".format(name, akey)
info.append((var, atr))
if "fit" in self.h5["bg_data"]:
# mask background
var_mask = "{} background from mask".format(name)
if ("estimate_bg_from_mask" in self.h5
and self.h5["estimate_bg_from_mask"] is not None):
# bg was computed from mask image
info.append((var_mask, True))
elif ("estimate_bg_from_binary" in self.h5
and self.h5["estimate_bg_from_binary"] is not None):
# bg was computed from mask image (old notation)
warnings.warn("Old file format detected!", DeprecationWarning)
info.append((var_mask, True))
else:
info.append((var_mask, False))
return info
@property
def raw(self):
"""raw (uncorrected) image data"""
return self.h5["raw"][:]
def del_bg(self, key):
"""Remove the background image data
Parameters
----------
key: str
One of :const:`VALID_BG_KEYS`
"""
if key not in VALID_BG_KEYS:
raise ValueError("Invalid bg key: {}".format(key))
if key in self.h5["bg_data"]:
del self.h5["bg_data"][key]
else:
msg = "No bg data to clear for '{}' in {}.".format(key, self)
warnings.warn(msg)
def estimate_bg(self, fit_offset="mean", fit_profile="tilt",
border_px=0, from_mask=None, ret_mask=False):
"""Estimate image background
Parameters
----------
fit_profile: str
The type of background profile to fit:
- "offset": offset only
- "poly2o": 2D 2nd order polynomial with mixed terms
- "tilt": 2D linear tilt with offset (default)
fit_offset: str
The method for computing the profile offset
- "fit": offset as fitting parameter
- "gauss": center of a gaussian fit
- "mean": simple average
- "mode": mode (see `qpimage.bg_estimate.mode`)
border_px: float
Assume that a frame of `border_px` pixels around
the image is background.
from_mask: boolean np.ndarray or None
Use a boolean array to define the background area.
The mask image must have the same shape as the
input data.`True` elements are used for background
estimation.
ret_mask: bool
Return the mask image used to compute the background.
Notes
-----
If both `border_px` and `from_mask` are given, the
intersection of the two resulting mask images is used.
The arguments passed to this method are stored in the
hdf5 file `self.h5` and are used for optional integrity
checking using `qpimage.integrity_check.check`.
See Also
--------
qpimage.bg_estimate.estimate
"""
# remove existing bg before accessing imdat.image
self.set_bg(bg=None, key="fit")
# compute bg
bgimage, mask = bg_estimate.estimate(data=self.image,
fit_offset=fit_offset,
fit_profile=fit_profile,
border_px=border_px,
from_mask=from_mask,
ret_mask=True)
attrs = {"fit_offset": fit_offset,
"fit_profile": fit_profile,
"border_px": border_px}
self.set_bg(bg=bgimage, key="fit", attrs=attrs)
# save `from_mask` separately (arrays vs. h5 attributes)
# (if `from_mask` is `None`, this will remove the array)
self["estimate_bg_from_mask"] = from_mask
# return mask image
if ret_mask:
return mask
def get_bg(self, key=None, ret_attrs=False):
"""Get the background data
Parameters
----------
key: None or str
A user-defined key that identifies the background data.
Examples are "data" for experimental data, or "fit"
for an estimated background correction
(see :const:`VALID_BG_KEYS`). If set to `None`,
returns the combined background image (:const:`ImageData.bg`).
ret_attrs: bool
Also returns the attributes of the background data.
"""
if key is None:
if ret_attrs:
raise ValueError("No attributes for combined background!")
return self.bg
else:
if key not in VALID_BG_KEYS:
raise ValueError("Invalid bg key: {}".format(key))
if key in self.h5["bg_data"]:
data = self.h5["bg_data"][key][:]
if ret_attrs:
attrs = dict(self.h5["bg_data"][key].attrs)
# remove keys for image visualization in hdf5 files
for h5k in ["CLASS", "IMAGE_VERSION", "IMAGE_SUBCLASS"]:
if h5k in attrs:
attrs.pop(h5k)
ret = (data, attrs)
else:
ret = data
else:
raise KeyError("No background data for {}!".format(key))
return ret
def set_bg(self, bg, key="data", attrs={}):
"""Set the background data
Parameters
----------
bg: numbers.Real, 2d ndarray, ImageData, or h5py.Dataset
The background data. If `bg` is an `h5py.Dataset` object,
it must exist in the same hdf5 file (a hard link is created).
If set to `None`, the data will be removed.
key: str
One of :const:`VALID_BG_KEYS`)
attrs: dict
List of background attributes
See Also
--------
del_bg: removing background data
"""
if key not in VALID_BG_KEYS:
raise ValueError("Invalid bg key: {}".format(key))
# remove previous background key
if key in self.h5["bg_data"]:
del self.h5["bg_data"][key]
# set background
if isinstance(bg, (numbers.Real, np.ndarray)):
dset = write_image_dataset(group=self.h5["bg_data"],
key=key,
data=bg,
h5dtype=self.h5dtype)
for kw in attrs:
dset.attrs[kw] = attrs[kw]
elif isinstance(bg, h5py.Dataset):
# Create a hard link
# (This functionality was intended for saving memory when storing
# large QPSeries with universal background data, i.e. when using
# `QPSeries.add_qpimage` with the `bg_from_idx` keyword.)
self.h5["bg_data"][key] = bg
elif bg is not None:
msg = "Unknown background data type: {}".format(bg)
raise ValueError(msg)
class Amplitude(ImageData):
"""Dedicated class for amplitude image data
For amplitude image data, background correction is defined
by dividing the raw image by the background image.
"""
def _bg_combine(self, bgs):
"""Combine several background amplitude images"""
out = np.ones(self.h5["raw"].shape, dtype=float)
# bg is an h5py.DataSet
for bg in bgs:
out *= bg[:]
return out
def _bg_correct(self, raw, bg):
"""Remove background from raw amplitude image"""
return raw / bg
class Phase(ImageData):
"""Dedicated class for phase image data
For phase image data, background correction is defined
by subtracting the background image from the raw image.
"""
def _bg_combine(self, bgs):
"""Combine several background phase images"""
out = np.zeros(self.h5["raw"].shape, dtype=float)
for bg in bgs:
# bg is an h5py.DataSet
out += bg[:]
return out
def _bg_correct(self, raw, bg):
"""Remove background from raw phase image"""
return raw - bg
def write_image_dataset(group, key, data, h5dtype=None):
"""Write an image to an hdf5 group as a dataset
This convenience function sets all attributes such that the image
can be visualized with HDFView, sets the compression and fletcher32
filters, and sets the chunk size to the image shape.
Parameters
----------
group: h5py.Group
HDF5 group to store data to
key: str
Dataset identifier
data: np.ndarray of shape (M,N)
Image data to store
h5dtype: str
The datatype in which to store the image data. The default
is the datatype of `data`.
Returns
-------
dataset: h5py.Dataset
The created HDF5 dataset object
"""
if h5dtype is None:
h5dtype = data.dtype
if key in group:
del group[key]
if group.file.driver == "core":
kwargs = {}
else:
kwargs = {"fletcher32": True,
"chunks": data.shape}
kwargs.update(COMPRESSION)
dset = group.create_dataset(key,
data=data.astype(h5dtype),
**kwargs)
return dset
| RI-imaging/qpimage | qpimage/image_data.py | Python | mit | 12,445 | [
"Gaussian"
] | eb5e219c577139d7e56420c20e688d30ab37fae65c3edcb5ccd6e8367e439937 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RYarn(RPackage):
"""Expedite large RNA-Seq analyses using a combination of previously
developed tools. YARN is meant to make it easier for the user in
performing basic mis-annotation quality control, filtering, and
condition-aware normalization. YARN leverages many Bioconductor tools
and statistical techniques to account for the large heterogeneity and
sparsity found in very large RNA-seq experiments."""
homepage = "http://www.example.co://www.bioconductor.org/packages/yarn/"
url = "https://git.bioconductor.org/packages/yarn"
list_url = homepage
version('1.2.0', git='https://git.bioconductor.org/packages/yarn', commit='28af616ef8c27dcadf6568e276dea8465486a697')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-biomart', type=('build', 'run'))
depends_on('r-downloader', type=('build', 'run'))
depends_on('r-edger', type=('build', 'run'))
depends_on('r-gplots', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-readr', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-quantro', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.2.0')
| skosukhin/spack | var/spack/repos/builtin/packages/r-yarn/package.py | Python | lgpl-2.1 | 2,616 | [
"Bioconductor"
] | da87c28c551a254dcdae358956b5fccc0b994b001a606f9f272bcbed93c7a860 |
#!/usr/bin/env python
"""
initial.py
ROMS initial conditions utilities
Written by Brian Powell on 01/15/14
Copyright (c)2017 University of Hawaii under the BSD-License.
"""
import seapy
import numpy as np
import netCDF4
def from_roms(roms_file, ini_file, record=0, time=None, grid=None):
"""
Given a ROMS history, average, or climatology file, generate
initial conditions on the same grid.
Parameters
----------
roms_file: string or list
Input ROMS source (history, average, climatology file)
ini_file: string
Input name for output initial condition file
record: int
Input index to use as initial condition
time: datetime optional
Input datetime to use for the initial condition (default to record time)
grid: seapy.model.grid or string, optional
Input ROMS grid: specify the grid if loaded or filename to load
Returns
-------
None
"""
# Load the grid
if grid is None:
grid = seapy.model.asgrid(roms_file)
else:
grid = seapy.model.asgrid(grid)
ncroms = seapy.netcdf(roms_file)
src_ref, romstime = seapy.roms.get_reftime(ncroms)
# Create the initial file and fill up the descriptive data
ncini = seapy.roms.ncgen.create_ini(ini_file,
eta_rho=grid.eta_rho, xi_rho=grid.xi_rho, s_rho=grid.n,
reftime=src_ref, title="generated from " + roms_file)
grid.to_netcdf(ncini)
if time is None:
time = netCDF4.num2date(ncroms.variables[romstime][record],
ncroms.variables[romstime].units)
ncini.variables["ocean_time"][:] = netCDF4.date2num(time,
ncini.variables["ocean_time"].units)
# Fill up the initial state with the roms file data
for var in seapy.roms.fields:
ncini.variables[var][0, :] = ncroms.variables[var][record, :]
# Close up
ncini.close()
ncroms.close()
pass
| dalepartridge/seapy | roms/initial.py | Python | mit | 2,042 | [
"Brian",
"NetCDF"
] | 58442d6e64224b3ce21823261c1eb83b14acb198c3c9c49debdbf6e8e7fbdbcf |
import argparse
import os
import subprocess
from pylab import *
mtlr_train = 'mtlr_train';
mtlr_test = 'mtlr_test';
model_temp = './model.tmp';
train_data = None;
test_data = None;
intervals_file = None;
intervals_number = 0;
cv_dirs = [];
def findc(k, start, end, loss):
"""
Simple way to find the best C1 value, using k-fold cross validation.
This uses a binary search approach to find the best C1 value.
"""
global intervals_file, intervals_number;
# Get the number of intervals
intervals_number = sum(1 for _ in open(intervals_file, 'rbU'));
# Set the minimum
basec = start;
# calculate the start-point c and its error
minc = start;
minerror = testc(start, k, loss);
# Binary search
while (start < end and (end - start) > 0.05):
# calculate the mid-point c and its error
midc = float((start+end)/2);
miderror = testc(midc, k, loss);
# If the mid-point error is lower (or equal)
# (this will prefer larger C values with the same error)
if miderror <= minerror:
# The mid-point will be the new minimum
minc = midc;
minerror = miderror;
# Print it
print("*C: {0}, error: {1}".format(minc, minerror));
# We advance towards the end
start = minc;
# If the mid-point error is higher
else:
# We move the end to the mid-point
end = midc;
# Print the best C
print("*C: {0}, error: {1}".format(minc, minerror));
# No errors
return 0;
def testc(c, k, loss):
"""
Tests a C using crossvalidation
"""
global mtlr_test, mtlr_train, model_temp;
global train_data, test_data;
global intervals_file, intervals_number;
global cv_dirs;
# Build the base command for training
base_train = [mtlr_train, '-d', '0']; # Command and c1
base_train += ['-m', str(intervals_number)]; # Add time points
base_train += ['-q', intervals_file]; # Intervals
base_train += ['-o', model_temp]; # Output
# Build the base command for testing
base_test = [mtlr_test, '-l', loss]; # Command and loss fn
base_test += ['-m', str(intervals_number)]; # Add time points
base_test += ['-q', intervals_file]; # Intervals
base_test += ['-o', model_temp]; # Model
# Array of errors returned
errors = [];
# Check if the crossvalidation dataset and interval files exist
for currentdir in cv_dirs:
# Get the current fold train and validation datasets
traindata = os.path.join(currentdir, train_data);
testdata = os.path.join(currentdir, test_data);
# Form the current training command
current_train = base_train.copy();
current_train += ['-i', traindata]; # Training data
current_train += ['-c', str(c)]; # Current C
# Form the current testing command
current_test = base_test.copy();
current_test += ['-i', testdata]; # Testing data
# Train the model
subprocess.call(current_train, stdout=subprocess.DEVNULL);
# Test the model
out = subprocess.check_output(current_test);
# Parse the output to get the error
out = out.decode('ascii');
out = out[out.index("l2-log-"):out.index("\n#avg log-like")];
# Append the error
errors.append(float(out.split(' ')[1]));
print(" C: {0}, error: {1}".format(c, mean(errors)));
# No errors
return mean(errors);
def main():
global mtlr_test, mtlr_train;
global train_data, test_data;
global intervals_file, intervals_number;
global cv_dirs;
parser = argparse.ArgumentParser(description='Finds the best C param.');
parser.add_argument('--cross', '-c', dest='cvdir', required=True,
help='directory with crossvalidation output (foldX directories).');
parser.add_argument('--ds', '-n', dest='name', required=True,
help='dataset name (not filename--so strip the .data).');
parser.add_argument('--folds', '-f', dest='folds', default=5,
help='number of iterations for the crossvalidation.');
parser.add_argument('--start', '-s', dest='start', default=1,
help='the start of the range of possible C values.');
parser.add_argument('--end', '-e', dest='end', default=100,
help='the end of the range of possible C values.');
parser.add_argument('--mtlr', '-p', dest='mtlr', default='.',
help='directory where the mtlr executables are located.');
parser.add_argument('--loss', '-l', dest='loss', default='l2_log',
help='specific loss function to optimize for.');
# Parse the arguments
args = parser.parse_args();
# Get the mtlr executables filenames
mtlr_train = os.path.join(args.mtlr, 'mtlr_train');
mtlr_test = os.path.join(args.mtlr, 'mtlr_test');
# Check the mtlr_train executable
if not os.path.isfile(mtlr_train):
print("Error: {0} is not a file".format(mtlr_train));
return -1;
# Check the mtlr_test executable
if not os.path.isfile(mtlr_test):
print("Error: {0} is not a file".format(mtlr_test));
return -2;
# Get the training filenames
train_data = args.name + '.train.data';
test_data = args.name + '.valid.data';
# Check if the crossvalidation dataset and interval files exist
for i in range(int(args.folds)):
# Get the current fold name
currentfold = "fold" + str(i+1);
# Get the current fold directory
currentdir = os.path.abspath(os.path.join(args.cvdir, currentfold));
# Add the current fold directory to the array
cv_dirs.append(currentdir);
# Get the current fold filenames
traindata = os.path.join(currentdir, train_data);
testdata = os.path.join(currentdir, test_data);
# Check the training files
if not os.path.isfile(traindata):
print("Error: Train files missing in {0}".format(currentdir));
return -3;
# Check the training files
if not os.path.isfile(testdata):
print("Error: Test files missing in {0}".format(currentdir));
return -4;
# Get the intervals filename
intervals_file = os.path.join(args.cvdir, args.name + '.intervals');
# Check the intervals file
if not os.path.isfile(intervals_file):
print("Error: Interval file missing: {0}".format(intervals_file));
return -5;
# Find the best C
return findc(args.folds, float(args.start), float(args.end), args.loss);
if __name__ == '__main__':
main(); | dluces/cmput466-project | findc/findc.py | Python | mit | 5,973 | [
"FoldX"
] | 737b9186b8f5fae86c3854700bb434f5341415e6e563f590c41a0492f5288338 |
"""
====================================================
How to convert 3D electrode positions to a 2D image.
====================================================
Sometimes we want to convert a 3D representation of electrodes into a 2D
image. For example, if we are using electrocorticography it is common to
create scatterplots on top of a brain, with each point representing an
electrode.
In this example, we'll show two ways of doing this in MNE-Python. First,
if we have the 3D locations of each electrode then we can use Mayavi to
take a snapshot of a view of the brain. If we do not have these 3D locations,
and only have a 2D image of the electrodes on the brain, we can use the
:class:`mne.viz.ClickableImage` class to choose our own electrode positions
on the image.
"""
# Authors: Christopher Holdgraf <choldgraf@berkeley.edu>
#
# License: BSD (3-clause)
from scipy.io import loadmat
import numpy as np
from mayavi import mlab
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage # noqa
from mne.viz import plot_trans, snapshot_brain_montage
print(__doc__)
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
path_data = mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat'
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
###############################################################################
# Load data
# ---------
#
# First we'll load a sample ECoG dataset which we'll use for generating
# a 2D snapshot.
mat = loadmat(path_data)
ch_names = mat['ch_names'].tolist()
elec = mat['elec']
dig_ch_pos = dict(zip(ch_names, elec))
mon = mne.channels.DigMontage(dig_ch_pos=dig_ch_pos)
info = mne.create_info(ch_names, 1000., 'ecog', montage=mon)
print('Created %s channel positions' % len(ch_names))
###############################################################################
# Project 3D electrodes to a 2D snapshot
# --------------------------------------
#
# Because we have the 3D location of each electrode, we can use the
# :func:`mne.viz.snapshot_brain_montage` function to return a 2D image along
# with the electrode positions on that image. We use this in conjunction with
# :func:`mne.viz.plot_trans`, which visualizes electrode positions.
fig = plot_trans(info, trans=None, subject='sample', subjects_dir=subjects_dir)
mlab.view(200, 70)
xy, im = snapshot_brain_montage(fig, mon)
# Convert from a dictionary to array to plot
xy_pts = np.vstack(xy[ch] for ch in info['ch_names'])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
fig2, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
# fig2.savefig('./brain.png', bbox_inches='tight') # For ClickableImage
###############################################################################
# Manually creating 2D electrode positions
# ----------------------------------------
#
# If we don't have the 3D electrode positions then we can still create a
# 2D representation of the electrodes. Assuming that you can see the electrodes
# on the 2D image, we can use :class:`mne.viz.ClickableImage` to open the image
# interactively. You can click points on the image and the x/y coordinate will
# be stored.
#
# We'll open an image file, then use ClickableImage to
# return 2D locations of mouse clicks (or load a file already created).
# Then, we'll return these xy positions as a layout for use with plotting topo
# maps.
# This code opens the image so you can click on it. Commented out
# because we've stored the clicks as a layout file already.
# # The click coordinates are stored as a list of tuples
# im = plt.imread('./brain.png')
# click = ClickableImage(im)
# click.plot_clicks()
# # Generate a layout from our clicks and normalize by the image
# print('Generating and saving layout...')
# lt = click.to_layout()
# lt.save(op.join(layout_path, layout_name)) # To save if we want
# # We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
x = lt.pos[:, 0] * float(im.shape[1])
y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position
fig, ax = plt.subplots()
ax.imshow(im)
ax.scatter(x, y, s=120, color='r')
plt.autoscale(tight=True)
ax.set_axis_off()
plt.show()
| jaeilepp/mne-python | examples/visualization/plot_3d_to_2d.py | Python | bsd-3-clause | 4,488 | [
"Mayavi"
] | c7a96b53232b2e4465fd381756eeebdb3fc9748e7f21b772a4d3d8253a312f35 |
# $HeadURL$
"""
DIRAC - Distributed Infrastructure with Remote Agent Control
The LHCb distributed data production and analysis system.
DIRAC is a software framework for distributed computing which
allows to integrate various computing resources in a single
system. At the same time it integrates all kinds of computing
activities like Monte Carlo simulations, data processing, or
final user analysis.
It is build as number of cooperating systems:
- Accounting
- Configuration
- Core
- Base
- DISET
- Security
- Utilities
- Workflow
- Framework
- RequestManagement
- Resources
- Transformation
Which are used by other system providing functionality to
the end user:
- DataManagement
- Interfaces
- ResourceStatus
- StorageManagement
- WorkloadManagement
It defines the following data members:
- majorVersion: DIRAC Major version number
- minorVersion: DIRAC Minor version number
- patchLevel: DIRAC Patch level number
- preVersion: DIRAC Pre release number
- version: DIRAC version string
- buildVersion: DIRAC version string
- errorMail: mail address for important errors
- alarmMail: mail address for important alarms
- pythonPath: absolute real path to the directory that contains this file
- rootPath: absolute real path to the parent of DIRAC.pythonPath
- platform: DIRAC platform string for current host
- platformTuple: DIRAC platform tuple for current host
It loads Modules from :
- DIRAC.Core.Utililies
It loads:
- S_OK: OK return structure
- S_ERROR: ERROR return structure
- gLogger: global Logger object
- gConfig: global Config object
It defines the following functions:
- abort: aborts execution
- exit: finish execution using callbacks
- siteName: returns DIRAC name for current site
"""
__RCSID__ = "$Id$"
from pkgutil import extend_path
__path__ = extend_path( __path__, __name__ )
import platform as pyPlatform
import sys, os
# Define Version
majorVersion = 6
minorVersion = 13
patchLevel = 0
preVersion = 0
version = "v%sr%s" % ( majorVersion, minorVersion )
buildVersion = "v%dr%d" % ( majorVersion, minorVersion )
if patchLevel:
version = "%sp%s" % ( version, patchLevel )
buildVersion = "%s build %s" % ( buildVersion, patchLevel )
if preVersion:
version = "%s-pre%s" % ( version, preVersion )
buildVersion = "%s pre %s" % ( buildVersion, preVersion )
# Check of python version
__pythonMajorVersion = ( "2", )
__pythonMinorVersion = ( "4", "5", "6", "7" )
pythonVersion = pyPlatform.python_version_tuple()
if str( pythonVersion[0] ) not in __pythonMajorVersion or str( pythonVersion[1] ) not in __pythonMinorVersion:
print "Python Version %s not supported by DIRAC" % pyPlatform.python_version()
print "Supported versions are: "
for major in __pythonMajorVersion:
for minor in __pythonMinorVersion:
print "%s.%s.x" % ( major, minor )
sys.exit( 1 )
errorMail = "dirac.alarms@gmail.com"
alarmMail = "dirac.alarms@gmail.com"
# Set rootPath of DIRAC installation
pythonPath = os.path.realpath( __path__[0] )
rootPath = os.path.dirname( pythonPath )
# Import DIRAC.Core.Utils modules
#from DIRAC.Core.Utilities import *
from DIRAC.Core.Utilities.Network import getFQDN
import DIRAC.Core.Utilities.ExitCallback as ExitCallback
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
# Logger
from DIRAC.FrameworkSystem.Client.Logger import gLogger
#Configuration client
from DIRAC.ConfigurationSystem.Client.Config import gConfig
# Some Defaults if not present in the configuration
FQDN = getFQDN()
if len( FQDN.split( '.' ) ) > 2 :
# Use the last component of the FQDN as country code if there are more than 2 components
_siteName = 'DIRAC.Client.%s' % FQDN.split( '.' )[-1]
else:
# else use local as country code
_siteName = 'DIRAC.Client.local'
__siteName = False
# # Update DErrno with the extensions errors
# from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
# from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
# allExtensions = CSGlobals.getCSExtensions()
#
# # Update for each extension. Careful to conflict :-)
# for extension in allExtensions:
# ol = ObjectLoader( baseModules = ["%sDIRAC" % extension] )
# extraErrorModule = ol.loadModule( 'Core.Utilities.DErrno' )
# if extraErrorModule['OK']:
# extraErrorModule = extraErrorModule['Value']
#
# # The next 3 dictionary MUST be present for consistency
#
# # Global name of errors
# DErrno.__dict__.update( extraErrorModule.extra_dErrName )
# # Dictionary with the error codes
# DErrno.dErrorCode.update( extraErrorModule.extra_dErrorCode )
# # Error description string
# DErrno.dStrError.update( extraErrorModule.extra_dStrError )
#
# # extra_compatErrorString is optional
# for err in getattr( extraErrorModule, 'extra_compatErrorString', [] ) :
# DErrno.compatErrorString.setdefault( err, [] ).extend( extraErrorModule.extra_compatErrorString[err] )
def siteName():
"""
Determine and return DIRAC name for current site
"""
global __siteName
if not __siteName:
__siteName = gConfig.getValue( '/LocalSite/Site', _siteName )
return __siteName
#Callbacks
ExitCallback.registerSignals()
#Set the platform
from DIRAC.Core.Utilities.Platform import getPlatformString
platform = getPlatformString()
platformTuple = tuple( platform.split( '_' ) )
def exit( exitCode = 0 ):
"""
Finish execution using callbacks
"""
ExitCallback.execute( exitCode, [] )
sys.exit( exitCode )
def abort( exitCode, *args, **kwargs ):
"""
Abort execution
"""
try:
gLogger.fatal( *args, **kwargs )
os._exit( exitCode )
except:
gLogger.exception( 'Error while executing DIRAC.abort' )
os._exit( exitCode )
| vmendez/DIRAC | __init__.py | Python | gpl-3.0 | 5,953 | [
"DIRAC"
] | 0d0e2bf9035615d14978bc4cecafdc2034a76def84a0a2aa5c168f0a83b9d7eb |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=80 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2014 Raoul Snyman #
# Portions copyright (c) 2008-2014 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Ken Roberts #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
__version__ = '0.0.1'
__v = __version__.split('.')
__version_hex__ = int(__v[0]) << 24 | \
int(__v[1]) << 16 | \
int(__v[2]) << 8
__module = 'projectors'
import logging
log = logging.getLogger(__name__)
import socket
log.info('ui.projectors installed')
# Set common constants
CR = chr(0x0D) # \r
LF = chr(0x0A) # \n
TIMEOUT = 30.0
from openlp.core.projectors.error_codes import ProjectorError
from openlp.core.projectors.error_codes import ProjectorFailedError
from openlp.core.projectors.error_codes import ProjectorFanFailure
from openlp.core.projectors.error_codes import ProjectorNetworkError
from openlp.core.projectors.error_codes import ProjectorNotConnectedError
from openlp.core.projectors.projectorbase import ProjectorBase
from openlp.core.projectors.projectortab import ProjectorSettingTab
from openlp.core.projectors.projectorform import ProjectorForm
from openlp.core.projectors.projectormanager import ProjectorManager
from openlp.core.projectors.db import Projector, ProjectorDBManager
__all__ = ['CR', 'LF', 'TIMEOUT',
'ProjectorError',
'ProjectorFailedError',
'ProjectorFanFailure',
'ProjectorNetworkError',
'ProjectorNotConnectedError',
'ProjectorBase',
'ProjectorForm',
'ProjectorDBManager',
'ProjectorManager',
'Projectors'
] | alisonken1/openlp-projector-2.0 | openlp/projectors/__init__.py | Python | gpl-2.0 | 3,515 | [
"Brian"
] | 8b15b42f48b070e4244b710a93ead4cb9f4daef41c4de9bb1f49438c6932a3e2 |
# massfunctions.py: A collection of functions which convert between mass and
# cumulative number density
# Note: all masses and number densities assumed to be logarithmic
# Several functions require the mpmath package.
# Sarah Wellons 6/2016+
import numpy as np
import torrey_cmf
import scipy.interpolate as interp
from scipy.optimize import newton
def getnum(M, z, massfunc='zfourge', interpdir='N'):
"""
Converts stellar mass to number density at the given redshift using the given mass function.
Note: No checks are performed to ensure that the mass function is well-defined at the given
parameters; it is incumbent upon the user to make an appropriate choice of mass function.
Parameters
==========
M : Stellar mass in units of log(Msun). May be a single value or an array.
z : Redshift
massfunc : Keyword for desired mass function. Currently available keywords include ['illustris',
'zfourge', 'muzzin', 'ilbert', 'liwhite']. Defaults to 'zfourge'.
interpdir : (Optional) ['N', 'M'] Indicates whether the mass functions should be interpolated
across redshifts in log N or log M. Doesn't make a significant difference in most cases.
Returns
=======
N : Comoving cumulative number density in units of log(Mpc^-3), same dimensions as M.
"""
if massfunc == 'illustris':
tc = torrey_cmf.number_density()
return tc.cmf_fit(M,z)
else:
if massfunc == 'zfourge':
mf = zfourge_function
par1, par2 = zfourgeparams(z)
elif massfunc == 'muzzin':
mf = muzzin_function
par1, par2 = muzzinparams(z)
elif massfunc == 'ilbert':
mf = ilbert_function
par1, par2 = ilbertparams(z)
elif massfunc == 'liwhite':
mf = liwhite_function
par1, par2 = liwhiteparams(z)
else: raise ValueError("Unrecognized mass function. Available keywords include ['illustris', 'zfourge', 'muzzin', 'ilbert', 'liwhite'].")
if isinstance(M, np.ndarray) or isinstance(M, list):
N1 = np.zeros([len(M)])
N2 = np.zeros([len(M)])
for i, elem in enumerate(M):
N1[i] = mf(elem,par1)
N2[i] = mf(elem,par2)
Nmin = np.min(np.concatenate((N1,N2)))*1.05
Nmax = np.max(np.concatenate((N1,N2)))*0.95
else:
N1 = mf(M, par1)
N2 = mf(M, par2)
Nmin = min(N1, N2)*1.05
Nmax = max(N1, N2)*0.95
if par1[0] == z: return N1
if par2[0] == z: return N2
if interpdir == 'N': # Interpolate N linearly by dz
return (N1*(par2[0]-z)+N2*(z-par1[0]))/(par2[0]-par1[0])
else: # Interpolate M by dz, then N by dM to get desired M-values
npts = int(max(25, (Nmax-Nmin)/0.05)) # Ensure adequately small spacing between N-values
Narr = np.linspace(Nmin, Nmax, npts)
Marr1 = np.zeros(npts)
Marr2 = np.zeros(npts)
for i in range(0,npts):
Marr1[i] = newton(mf, 11., args=(par1,Narr[i]), maxiter=500)
Marr2[i] = newton(mf, 11., args=(par2,Narr[i]), maxiter=500)
Minterp = (Marr1*(par2[0]-z)+Marr2*(z-par1[0]))/(par2[0]-par1[0])
if Minterp[0] > Minterp[-1]:
Minterp = Minterp[::-1]
Narr = Narr[::-1]
f = interp.interp1d(Minterp, Narr, kind='cubic')
return f(M)
def getmass(N, z, massfunc='zfourge', interpdir='N'):
"""
Converts number density to stellar mass at the given redshift using the given mass function.
Note: No checks are performed to ensure that the mass function is well-defined at the given
parameters; it is incumbent upon the user to make an appropriate choice of mass function.
Parameters
==========
N : Comoving cumulative number density in units of log(Mpc^-3). May be a single value or an array.
z : Redshift
massfunc : Keyword for desired mass function. Currently available keywords include ['illustris',
'zfourge', 'muzzin', 'ilbert', 'liwhite']. Defaults to 'zfourge'.
interpdir : (Optional) ['N', 'M'] Indicates whether the mass functions should be interpolated
across redshifts in log N or log M. Doesn't make a significant difference in most cases.
Returns
=======
mass : Stellar mass in units of log(Msun), same dimensions as N.
"""
if massfunc == 'illustris':
tc = torrey_cmf.number_density()
if isinstance(N, np.ndarray) or isinstance(N, list):
mass = np.zeros([len(N)])
for i, elem in enumerate(N):
mass[i] = tc.mass_from_density(elem, z)
else:
mass = tc.mass_from_density(N, z)
return mass
else:
if massfunc == 'zfourge':
mf = zfourge_function
par1, par2 = zfourgeparams(z)
elif massfunc == 'muzzin':
mf = muzzin_function
par1, par2 = muzzinparams(z)
elif massfunc == 'ilbert':
mf = ilbert_function
par1, par2 = ilbertparams(z)
elif massfunc == 'liwhite':
mf = liwhite_function
par1, par2 = liwhiteparams(z)
else: raise ValueError("Unrecognized mass function. Available keywords include ['illustris', 'zfourge', 'muzzin', 'ilbert', 'liwhite'].")
if isinstance(N, np.ndarray) or isinstance(N, list):
M1 = np.zeros([len(N)])
M2 = np.zeros([len(N)])
for i, elem in enumerate(N):
M1[i] = newton(mf, 11., args=(par1,elem), maxiter=500)
if par1[0] == z: return M1
for i, elem in enumerate(N):
M2[i] = newton(mf, 11., args=(par2,elem), maxiter=500)
Mmin = np.min(np.concatenate((M1,M2)))*0.9
Mmax = np.max(np.concatenate((M1,M2)))*1.1
else:
M1 = newton(mf, 11., args=(par1,N), maxiter=500)
if par1[0] == z: return M1
M2 = newton(mf, 11., args=(par2,N), maxiter=500)
Mmin = min(M1, M2)*0.9
Mmax = max(M1, M2)*1.1
if interpdir == 'N':
npts = int(max(25, (Mmax-Mmin)/0.05))
Marr = np.linspace(Mmin, Mmax, npts)
Narr1 = np.zeros(npts)
Narr2 = np.zeros(npts)
for i in range(0,npts):
Narr1[i] = mf(Marr[i], par1)
Narr2[i] = mf(Marr[i], par2)
Ninterp = (Narr1*(par2[0]-z)+Narr2*(z-par1[0]))/(par2[0]-par1[0])
if Ninterp[0] > Ninterp[-1]:
Ninterp = Ninterp[::-1]
Marr = Marr[::-1]
f = interp.interp1d(Ninterp, Marr, kind='cubic')
return f(N)
else:
return (M1*(par2[0]-z)+M2*(z-par1[0]))/(par2[0]-par1[0])
# ------- COSMOS/Ultravista mass functions ------- #
# Fits from Ilbert et al. 2013 (A&A 556:55)
def ilbert_function(M, par, target=0):
from mpmath import gammainc
x = 10.**(M-par[1])
g1 = gammainc(par[2]+1,a=x)
g2 = gammainc(par[4]+1,a=x)
return np.log10(par[3]*float(g1) + par[5]*float(g2)) - target
def ilbertparams(z, type='total'):
zarr = np.array([0.35, 0.65, 0.95, 1.3, 1.75, 2.25, 2.75, 3.5])
Mchar = np.array([10.88, 11.03, 10.87, 10.71, 10.74, 10.74, 10.76, 10.74])
P1 = np.array([1.68, 1.22, 2.03, 1.35, 0.88, 0.62, 0.26, 0.03])*1.e-3
a1 = np.array([-0.69, -1., -0.52, -0.08, -0.24, -0.22, -0.15, 0.95])
P2 = np.array([0.77, 0.16, 0.29, 0.67, 0.33, 0.15, 0.14, 0.09])*1.e-3
a2 = np.array([-1.42, -1.64, -1.62, -1.46, -1.6, -1.6, -1.6, -1.6])
if z < zarr[0]:
return [0, Mchar[0], a1[0], P1[0], a2[0], P2[0]], [zarr[0], Mchar[0], a1[0], P1[0], a2[0], P2[0]]
elif z > zarr[-1]:
return [zarr[-1], Mchar[-1], a1[-1], P1[-1], a2[-1], P2[-1]], [50., Mchar[-1], a1[-1], P1[-1], a2[-1], P2[-1]]
else:
i = np.argmax(zarr > z) - 1
return [zarr[i], Mchar[i], a1[i], P1[i], a2[i], P2[i]], [zarr[i+1], Mchar[i+1], a1[i+1], P1[i+1], a2[i+1], P2[i+1]]
# Fits from Muzzin et al. 2013 (ApJ 777:18)
def muzzin_function(M, par, target=0):
from mpmath import gammainc
x = 10.**(M-par[1])
g1 = gammainc(par[2]+1,a=x)
g2 = gammainc(par[4]+1,a=x)
return np.log10(par[3]*float(g1) + par[5]*float(g2))-target
def muzzinparams(z, type='total'):
zarr = np.array([0.35, 0.75, 1.25, 1.75, 2.25, 2.75, 3.5])
# Alpha free
# Mchar = np.array([10.97, 11., 10.87, 10.81, 10.81, 11.03, 11.49])
# P1 = np.array([16.27, 16.25, 13.91, 10.13, 4.79, 1.93, 0.09])*1.e-4
# a1 = np.array([-0.53, -1.17, -1.02, -0.86, -0.55, -1.01, -1.45])
# P2 = np.array([9.47, 0., 0., 0., 0., 0., 0.])*1.e-4
# a2 = np.array([-1.37, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2])
# Alpha fixed
Mchar = np.array([10.97, 11.04, 10.99, 10.96, 11., 11.09, 11.4])
P1 = np.array([16.27, 14.48, 9.30, 6.33, 2.94, 1.66, 0.13])*1.e-4
a1 = np.array([-0.53, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2])
P2 = np.array([9.47, 0., 0., 0., 0., 0., 0.])*1.e-4
a2 = np.array([-1.37, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2])
if z < zarr[0]:
return [0, Mchar[0], a1[0], P1[0], a2[0], P2[0]], [zarr[0], Mchar[0], a1[0], P1[0], a2[0], P2[0]]
elif z > zarr[-1]:
return [zarr[-1], Mchar[-1], a1[-1], P1[-1], a2[-1], P2[-1]], [50., Mchar[-1], a1[-1], P1[-1], a2[-1], P2[-1]]
else:
i = np.argmax(zarr > z) - 1
return [zarr[i], Mchar[i], a1[i], P1[i], a2[i], P2[i]], [zarr[i+1], Mchar[i+1], a1[i+1], P1[i+1], a2[i+1], P2[i+1]]
# ------- Low-z mass function from Li & White 2009 (MNRAS 398:2977) ------ #
# Note: Only applies to a single redshift (z ~ 0.1)!
def liwhite_function(M, par, target=0):
from mpmath import gammainc
h = 0.7
# log(M*), alpha, Phi*,
par_high = [10.71 - np.log10(h**2), -1.99, 0.0044*h**3]
par_mid = [10.37 - np.log10(h**2), -0.9, 0.0132*h**3]
par_low = [9.61 - np.log10(h**2), -1.13, 0.0146*h**3]
# Pre-tabulated integrals
hightot = 0.000280917465551 # Total contribution from high-mass piece
subtractmid = 0.000245951678324549 # Value of middle Schechter fn integrated down to first break
midtot = 0.00748165061711 # Total contribution from mid-mass piece
subtractlow = 0.00266964412065
if M > 10.67 - np.log10(h**2):
x = 10.**(M-par_high[0])
g = gammainc(par_high[1]+1,a=x)
return np.log10(par_high[2]*float(g))-target
elif M > 9.33 - np.log10(h**2):
x = 10.**(M-par_mid[0])
g = gammainc(par_mid[1]+1,a=x)
return np.log10(par_mid[2]*float(g)-subtractmid+hightot)-target
else:
x = 10.**(M-par_low[0])
g = gammainc(par_low[1]+1,a=x)
return np.log10(par_low[2]*float(g)-subtractlow+midtot+hightot)-target
# Dummy function, this mass function is only defined at z=0
def liwhiteparams(z):
return [0,1], [1,0]
def zfourge_function(M, par, target=0):
from mpmath import gammainc
x = 10.**(M-par[1])
g1 = gammainc(par[2]+1,a=x)
g2 = gammainc(par[4]+1,a=x)
N = np.log10(10.**(par[3])*float(g1) + 10.**(par[5])*float(g2))
return N-target
# Returns the double-Schechter fit parameters z, log(M*), alpha1, log(P1), alpha2, and log(P2)
# of the neighboring redshift bins
def zfourgeparams(z, type='total'):
zarr = np.array([0.35, 0.625, 0.875, 1.125, 1.375, 1.75, 2.25, 2.75])
# Double-Schechter at z > 2
if type == 'star-forming':
Mchar = np.array([10.59, 10.65, 10.56, 10.44, 10.69, 10.59, 10.58, 10.61])
a1 = np.array([-1.08, -0.97, -0.46, 0.53, -0.55, 0.75, 2.06, 2.36])
P1 = np.array([-2.67, -2.97, -2.81, -2.98, -3.04, -3.37, -4.30, -4.95])
a2 = np.array([-2.00, -1.58, -1.61, -1.44, -1.62, -1.47, -1.38, -1.67])
P2 = np.array([-4.46, -3.34, -3.36, -3.11, -3.59, -3.28, -3.28, -3.71])
elif type == 'quiescent':
Mchar = np.array([10.75, 10.68, 10.63, 10.63, 10.49, 10.77, 10.69, 9.95])
a1 = np.array([-0.47, -0.10, 0.04, 0.11, 0.85, -0.19, -0.37, -0.62])
P1 = np.array([-2.76, -2.67, -2.81, -3.03, -3.36, -3.41, -3.59, -4.22])
a2 = np.array([-1.97, -1.69, -1.51, -1.57, -0.54, -0.18, -3.07, 2.51])
P2 = np.array([-5.21, -4.29, -4.40, -4.80, -3.72, -3.91, -6.95, -4.51])
else: # Total
Mchar = np.array([10.78, 10.70, 10.66, 10.54, 10.61, 10.74, 10.69, 10.74])
a1 = np.array([-0.98, -0.39, -0.37, 0.30 , -0.12, 0.04 , 1.03 , 1.62])
P1 = np.array([-2.54, -2.55, -2.56, -2.72, -2.78, -3.05, -3.80, -4.54])
a2 = np.array([-1.90, -1.53, -1.61, -1.45, -1.56, -1.49, -1.33, -1.57])
P2 = np.array([-4.29, -3.15, -3.39, -3.17, -3.43, -3.38, -3.26, -3.69])
# # Single-Schechter at z > 2
# if type == 'star-forming':
# Mchar = np.array([10.59, 10.65, 10.56, 10.44, 10.69, 10.59, 11.28, 11.49])
# a1 = np.array([-1.08, -0.97, -0.46, 0.53, -0.55, 0.75, -1.6, -1.93])
# P1 = np.array([-2.67, -2.97, -2.81, -2.98, -3.04, -3.37, -3.96, -4.82])
# a2 = np.array([-2.00, -1.58, -1.61, -1.44, -1.62, -1.47, -1.38, -1.67])
# P2 = np.array([-4.46, -3.34, -3.36, -3.11, -3.59, -3.28, -50., -50.])
# elif type == 'quiescent':
# Mchar = np.array([10.75, 10.68, 10.63, 10.63, 10.49, 10.77, 10.73, 10.65])
# a1 = np.array([-0.47, -0.10, 0.04, 0.11, 0.85, -0.19, -0.49, -0.43])
# P1 = np.array([-2.76, -2.67, -2.81, -3.03, -3.36, -3.41, -3.63, -3.92])
# a2 = np.array([-1.97, -1.69, -1.51, -1.57, -0.54, -0.18, -3.07, 2.51])
# P2 = np.array([-5.21, -4.29, -4.40, -4.80, -3.72, -3.91, -50., -50.])
# else: # Total
# Mchar = np.array([10.78, 10.70, 10.66, 10.54, 10.61, 10.74, 11.13, 11.35])
# a1 = np.array([-0.98, -0.39, -0.37, 0.30 , -0.12, 0.04, -1.43, -1.74])
# P1 = np.array([-2.54, -2.55, -2.56, -2.72, -2.78, -3.05, -3.59, -4.36])
# a2 = np.array([-1.90, -1.53, -1.61, -1.45, -1.56, -1.49, -1.33, -1.57])
# P2 = np.array([-4.29, -3.15, -3.39, -3.17, -3.43, -3.38, -50., -50.])
if z < zarr[0]:
return [z, Mchar[0], a1[0], P1[0], a2[0], P2[0]], [zarr[1], Mchar[1], a1[1], P1[1], a2[1], P2[1]]
elif z > zarr[-1]:
return [z, Mchar[-1], a1[-1], P1[-1], a2[-1], P2[-1]], [50., Mchar[-2], a1[-2], P1[-2], a2[-2], P2[-2]]
else:
i = np.argmax(zarr > z) - 1
return [zarr[i], Mchar[i], a1[i], P1[i], a2[i], P2[i]], [zarr[i+1], Mchar[i+1], a1[i+1], P1[i+1], a2[i+1], P2[i+1]]
# Calculates the quenched fraction for the given galaxy mass and redshift
# by comparing the star-forming and quiescent mass functions
def quenchedfrac_zfourge(M, z):
par_q1, par_q2 = zfourgeparams(z, type='quiescent')
x_q1 = 10.**(M-par_q1[1])
dn_q1 = np.log10(np.log(10)*np.exp(-1.*x_q1)*x_q1*(10.**par_q1[3]*x_q1**(par_q1[2]) + 10.**(par_q1[5])*x_q1**(par_q1[4])))
x_q2 = 10.**(M-par_q2[1])
dn_q2 = np.log10(np.log(10)*np.exp(-1.*x_q2)*x_q2*(10.**par_q2[3]*x_q2**(par_q2[2]) + 10.**(par_q2[5])*x_q2**(par_q2[4])))
par_sf1, par_sf2 = zfourgeparams(z, type='star-forming')
x_sf1 = 10.**(M-par_sf1[1])
dn_sf1 = np.log10(np.log(10)*np.exp(-1.*x_sf1)*x_sf1*(10.**par_sf1[3]*x_sf1**(par_sf1[2]) + 10.**(par_sf1[5])*x_sf1**(par_sf1[4])))
x_sf2 = 10.**(M-par_sf2[1])
dn_sf2 = np.log10(np.log(10)*np.exp(-1.*x_sf2)*x_sf2*(10.**par_sf2[3]*x_sf2**(par_sf2[2]) + 10.**(par_sf2[5])*x_sf2**(par_sf2[4])))
fq1 = 10.**dn_q1/(10.**dn_q1+10.**dn_sf1)
fq2 = 10.**dn_q2/(10.**dn_q2+10.**dn_sf2)
return (fq1*(par_q2[0]-z)+fq2*(z-par_q1[0]))/(par_q2[0]-par_q1[0])
# ------ OBSOLETE, left in for backwards-compatibility ------ #
def getnum_illustris(M, z):
tc = torrey_cmf.number_density()
return tc.cmf_fit(M,z)
def getmass_illustris(N, z):
tc = torrey_cmf.number_density()
if isinstance(N, np.ndarray) or isinstance(N, list):
mass = np.zeros([len(N)])
for i, elem in enumerate(N):
mass[i] = tc.mass_from_density(elem, z)
else:
mass = tc.mass_from_density(N, z)
return mass
def getmass_zfourge(N, z):
if isinstance(N, np.ndarray) or isinstance(N, list):
mass = np.zeros([len(N)])
for i, elem in enumerate(N):
mass[i] = newton(getnum_zfourge, 10., args=(z,elem))
else:
mass = newton(getnum_zfourge, 10., args=(z,N))
return mass
| sawellons/NDpredict | massfunctions.py | Python | gpl-3.0 | 16,288 | [
"Galaxy"
] | 8cbeb12a9cc7622243892a382c9875e7bc9037ba38356b28d12dd2a95631602b |
# Copyright (c) 2006, 2008-2010, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""handle diagram generation options for class diagram or default diagrams
"""
from six.moves import builtins
import astroid
from pylint.pyreverse.diagrams import PackageDiagram, ClassDiagram
from pylint.pyreverse.utils import LocalsVisitor
BUILTINS_NAME = builtins.__name__
# diagram generators ##########################################################
class DiaDefGenerator(object):
"""handle diagram generation options"""
def __init__(self, linker, handler):
"""common Diagram Handler initialization"""
self.config = handler.config
self._set_default_options()
self.linker = linker
self.classdiagram = None # defined by subclasses
def get_title(self, node):
"""get title for objects"""
title = node.name
if self.module_names:
title = '%s.%s' % (node.root().name, title)
return title
def _set_option(self, option):
"""activate some options if not explicitly deactivated"""
# if we have a class diagram, we want more information by default;
# so if the option is None, we return True
if option is None:
return bool(self.config.classes)
return option
def _set_default_options(self):
"""set different default options with _default dictionary"""
self.module_names = self._set_option(self.config.module_names)
all_ancestors = self._set_option(self.config.all_ancestors)
all_associated = self._set_option(self.config.all_associated)
anc_level, ass_level = (0, 0)
if all_ancestors:
anc_level = -1
if all_associated:
ass_level = -1
if self.config.show_ancestors is not None:
anc_level = self.config.show_ancestors
if self.config.show_associated is not None:
ass_level = self.config.show_associated
self.anc_level, self.ass_level = anc_level, ass_level
def _get_levels(self):
"""help function for search levels"""
return self.anc_level, self.ass_level
def show_node(self, node):
"""true if builtins and not show_builtins"""
if self.config.show_builtin:
return True
return node.root().name != BUILTINS_NAME
def add_class(self, node):
"""visit one class and add it to diagram"""
self.linker.visit(node)
self.classdiagram.add_object(self.get_title(node), node)
def get_ancestors(self, node, level):
"""return ancestor nodes of a class node"""
if level == 0:
return
for ancestor in node.ancestors(recurs=False):
if not self.show_node(ancestor):
continue
yield ancestor
def get_associated(self, klass_node, level):
"""return associated nodes of a class node"""
if level == 0:
return
for ass_nodes in list(klass_node.instance_attrs_type.values()) + \
list(klass_node.locals_type.values()):
for ass_node in ass_nodes:
if isinstance(ass_node, astroid.Instance):
ass_node = ass_node._proxied
if not (isinstance(ass_node, astroid.ClassDef)
and self.show_node(ass_node)):
continue
yield ass_node
def extract_classes(self, klass_node, anc_level, ass_level):
"""extract recursively classes related to klass_node"""
if self.classdiagram.has_node(klass_node) or not self.show_node(klass_node):
return
self.add_class(klass_node)
for ancestor in self.get_ancestors(klass_node, anc_level):
self.extract_classes(ancestor, anc_level-1, ass_level)
for ass_node in self.get_associated(klass_node, ass_level):
self.extract_classes(ass_node, anc_level, ass_level-1)
class DefaultDiadefGenerator(LocalsVisitor, DiaDefGenerator):
"""generate minimum diagram definition for the project :
* a package diagram including project's modules
* a class diagram including project's classes
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
LocalsVisitor.__init__(self)
def visit_project(self, node):
"""visit an pyreverse.utils.Project node
create a diagram definition for packages
"""
mode = self.config.mode
if len(node.modules) > 1:
self.pkgdiagram = PackageDiagram('packages %s' % node.name, mode)
else:
self.pkgdiagram = None
self.classdiagram = ClassDiagram('classes %s' % node.name, mode)
def leave_project(self, node): # pylint: disable=unused-argument
"""leave the pyreverse.utils.Project node
return the generated diagram definition
"""
if self.pkgdiagram:
return self.pkgdiagram, self.classdiagram
return self.classdiagram,
def visit_module(self, node):
"""visit an astroid.Module node
add this class to the package diagram definition
"""
if self.pkgdiagram:
self.linker.visit(node)
self.pkgdiagram.add_object(node.name, node)
def visit_classdef(self, node):
"""visit an astroid.Class node
add this class to the class diagram definition
"""
anc_level, ass_level = self._get_levels()
self.extract_classes(node, anc_level, ass_level)
def visit_importfrom(self, node):
"""visit astroid.From and catch modules for package diagram
"""
if self.pkgdiagram:
self.pkgdiagram.add_from_depend(node, node.modname)
class ClassDiadefGenerator(DiaDefGenerator):
"""generate a class diagram definition including all classes related to a
given class
"""
def __init__(self, linker, handler):
DiaDefGenerator.__init__(self, linker, handler)
def class_diagram(self, project, klass):
"""return a class diagram definition for the given klass and its
related klasses
"""
self.classdiagram = ClassDiagram(klass, self.config.mode)
if len(project.modules) > 1:
module, klass = klass.rsplit('.', 1)
module = project.get_module(module)
else:
module = project.modules[0]
klass = klass.split('.')[-1]
klass = next(module.ilookup(klass))
anc_level, ass_level = self._get_levels()
self.extract_classes(klass, anc_level, ass_level)
return self.classdiagram
# diagram handler #############################################################
class DiadefsHandler(object):
"""handle diagram definitions :
get it from user (i.e. xml files) or generate them
"""
def __init__(self, config):
self.config = config
def get_diadefs(self, project, linker):
"""Get the diagrams configuration data
:param project:The pyreverse project
:type project: pyreverse.utils.Project
:param linker: The linker
:type linker: pyreverse.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:returns: The list of diagram definitions
:rtype: list(:class:`pylint.pyreverse.diagrams.ClassDiagram`)
"""
# read and interpret diagram definitions (Diadefs)
diagrams = []
generator = ClassDiadefGenerator(linker, self)
for klass in self.config.classes:
diagrams.append(generator.class_diagram(project, klass))
if not diagrams:
diagrams = DefaultDiadefGenerator(linker, self).visit(project)
for diagram in diagrams:
diagram.extract_relationships()
return diagrams
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/pylint/pyreverse/diadefslib.py | Python | apache-2.0 | 8,093 | [
"VisIt"
] | dbc3798152168792a1b00f82dd466a7915b9ec7459fadf2f69b412f368451603 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import piw
from pi import const,domain,proxy,node,utils,logic,paths,container
class FilterStreamPolicy:
def __init__(self,f):
self.__f = f
def create_converter(self,iso):
return piw.filtering_converter(self.__f)
class TriggerStreamPolicy:
def create_converter(self,iso):
return piw.triggering_converter()
class ImpulseStreamPolicy:
def create_converter(self,iso):
return piw.impulse_converter()
class LopassStreamPolicy:
def __init__(self,f,c):
self.__f = f
self.__c = c
def create_converter(self,iso):
return piw.lopass_converter(self.__f,self.__c)
class AnisoStreamPolicy:
def create_converter(self,iso):
return piw.null_converter()
class IsoStreamPolicy:
def __init__(self,ubound,lbound,rest):
self.__ubound=ubound
self.__lbound=lbound
self.__rest=rest
def create_converter(self,iso):
if iso:
return piw.resampling_converter()
else:
return piw.interpolating_converter(self.__ubound,self.__lbound,self.__rest)
class ThrottleStreamPolicy:
def __init__(self,interval):
self.__interval=interval
def create_converter(self,iso):
return piw.throttling_converter(self.__interval)
def DefaultStreamPolicy(iso):
if iso is True: return IsoStreamPolicy(1,-1,0)
if iso is False: return AnisoStreamPolicy()
return iso
class NullPolicyImpl:
protocols = ''
def __init__(self,atom,data_domain,init,transient):
self.__datanode = node.Server(transient=transient)
def data_node(self):
return self.__datanode
def get_data(self):
raise RuntimeError("unimplemented in Null Policy")
def set_data(self,d):
raise RuntimeError("unimplemented in Null Policy")
def change_value(self,v,t=0,p=True):
raise RuntimeError("unimplemented in Null Policy")
def set_value(self,v,t=0):
raise RuntimeError("unimplemented in Null Policy")
def get_value(self):
raise RuntimeError("unimplemented in Null Policy")
def close(self):
pass
class ReadOnlyPolicyImpl:
protocols = 'output'
def __init__(self,atom,data_domain,init,transient):
self.__datanode = node.Server(transient=transient)
atom.set_property_string('domain',str(data_domain))
self.__value = init
self.__data_domain = data_domain
self.set_value(self.__value)
def data_node(self):
return self.__datanode
def get_data(self):
return self.data_node().get_data()
def set_data(self,d):
self.data_node().set_data(d)
def change_value(self,v,t=0,p=False):
d=self.__data_domain.value2data(v,t)
self.data_node().set_data(d)
self.__value=v
def set_value(self,v,t=0):
d=self.__data_domain.value2data(v,t)
self.data_node().set_data(d)
self.__value=v
def get_value(self):
return self.__value
def close(self):
pass
class FastReadOnlyPolicyImpl:
protocols = 'output'
def __init__(self,atom,data_domain,init,transient):
self.__data_domain = data_domain
self.__datanode = node.Server(transient = transient)
atom.set_property_string('domain',str(data_domain))
def data_node(self):
return self.__datanode
def set_source(self,src):
self.data_node().set_source(src)
def set_clock(self,src):
self.data_node().set_clock(src)
def close(self):
pass
def get_data(self):
raise RuntimeError("unimplemented in FastReadOnlyPolicy")
def set_data(self,d):
raise RuntimeError("unimplemented in FastReadOnlyPolicy")
def change_value(self,v,t=0,p=True):
raise RuntimeError("unimplemented in FastReadOnlyPolicy")
def set_value(self,v,t=0):
raise RuntimeError("unimplemented in FastReadOnlyPolicy")
def change_value(self,v,t=0,p=True):
raise RuntimeError("unimplemented in FastReadOnlyPolicy")
def get_value(self):
raise RuntimeError("unimplemented in FastReadOnlyPolicy")
class PlumberBackend(piw.backend):
def __init__(self,correlator,stream_policy,signal,sigtype,priority,iid):
self.__stream_policy = stream_policy
piw.backend.__init__(self,correlator,iid,signal,priority,sigtype)
def create_converter(self,iso):
return self.__stream_policy.create_converter(iso)
class BackendDelegate(piw.backend_delegate):
def __init__(self,plumber,policy,config,clocked):
piw.backend_delegate.__init__(self,clocked)
self.__plumber = plumber
self.__policy = policy
self.__config = config
self.__cbackend = None
self.__dbackend = None
self.set_clocked(clocked)
def create_controller_backend(self):
if self.__cbackend:
return self.__cbackend
backend = self.__policy.get_controller_backend(self.__config)
self.__cbackend = backend
return self.__cbackend
def create_data_backend(self):
if self.__dbackend:
return self.__dbackend
backend = self.__policy.get_data_backend(self.__config)
self.__dbackend = backend
return self.__dbackend
def clear(self):
piw.backend_delegate.clear(self)
self.__cbackend = None
self.__dbackend = None
class Plumber(proxy.AtomProxy):
input_merge = 0
input_latch = 1
input_input = 2
input_linger = 3
monitor = set(['latency','domain'])
def __init__(self,policy,config,clocked):
proxy.AtomProxy.__init__(self)
self.__backend = BackendDelegate(self,policy,config,clocked)
self.__config = config
self.__connector = None
self.__correlator = None
self.__stream_policy = None
self.__mainanchor = piw.canchor()
self.__mainanchor.set_client(self)
self.__mainanchor.set_address_str(config.address)
def clocked(self):
return self.__backend.isclocked()
def connect_static(self):
return self.__config.connect_static()
def disconnect(self):
self.set_data_clone(None)
self.__mainanchor.set_address_str('')
self.__connector = None
self.__backend.clear()
def node_ready(self):
self.__connector = piw.connector(self.__config.connect_static(),self.__backend,self.__config.filter,self.domain().iso())
self.set_data_clone(self.__connector)
self.__backend.set_latency(self.latency())
if self.__config.callback:
self.__config.callback(self)
def set_clocked(self,c):
self.__backend.set_clocked(c)
def node_removed(self):
self.set_data_clone(None)
self.__connector = None
self.__backend.clear()
if self.__config.callback:
self.__config.callback(None)
def node_changed(self,parts):
if 'domain' in parts:
self.node_removed()
self.node_ready()
return
if 'latency' in parts:
self.__backend.set_latency(self.latency())
class PlumberConfig:
__slots__ = ('address','filter','iid','hint','clocked','callback')
def __init__(self,address,filter,iid,hint=None,clocked=False,callback=None):
self.address=address
self.filter=filter
self.iid=iid
self.hint=hint
self.clocked=clocked
self.callback=None
def connect_static(self):
return self.hint == 'ctl'
class PlumberSlot:
def __init__(self,iid,src,hint,plumber):
self.src = src
self.plumber = plumber
self.iid = iid
self.hint = hint
def set_clocked(self,clock):
self.plumber.set_clocked(clock)
def __cmp__(self,other):
if isinstance(other,PlumberSlot):
return cmp(self.src,other.src)
return -1
def __hash__(self):
return hash(self.src)
class ConnectablePolicyImpl:
protocols = 'input'
def __init__(self,atom,data_domain,init,clocked,data_node,auto_slot):
self.__closed = False
self.__datanode = data_node
self.__value = init
self.__data_domain = data_domain
self.__clock = clocked
self.__connection_iids = set()
self.__auto_slot = auto_slot
self.__cconnections = 0
self.__dconnections = 0
atom.set_property_string('domain',str(data_domain))
self.__connections = container.PersistentMetaData(atom,'master',asserted=self.__add_connection, retracted=self.__del_connection)
self.set_value(self.__value)
def data_node(self):
return self.__datanode
def get_data(self):
return self.data_node().get_data()
def get_domain(self):
return self.__data_domain
def set_data(self,d):
self.data_node().set_data(d)
def change_value(self,v,t=0,p=False):
d=self.__data_domain.value2data(v,t)
self.data_node().set_data(d)
self.__value=v
def set_value(self,v,t=0):
d=self.__data_domain.value2data(v,t)
self.data_node().set_data(d)
self.__value=v
def get_value(self):
return self.__value
def closed(self):
return self.__closed
def destroy_plumber(self,plumber):
pass
def create_plumber(self,config,clocked):
return None
def get_controller_backend(self,config):
pass
def get_data_backend(self,config):
pass
def make_filter(self,stream,slot):
if logic.is_pred_arity(stream,'conn',5,5):
if stream.args[4] == 'ctl':
using = slot
else:
if stream.args[0] is not None:
using = int(stream.args[0])
else:
using = 0
if self.__auto_slot:
using = using | (slot<<8)
if stream.args[1] is not None:
tgt = int(stream.args[1])
else:
tgt = 0
id=stream.args[2]
path=stream.args[3]
if path is not None:
return (id,piw.signal_dsc_filter(using,tgt,path))
else:
return (id,piw.signal_cnc_filter(using,tgt))
print 'cop out of',stream
return ('',piw.null_filter())
def close(self):
self.__closed = True
self.__connections.clear()
def set_clocked(self,c):
self.__clock = c
self.__connections.visit(lambda v,s: s.set_clocked(c))
def count_control_connections(self):
return self.__cconnections
def count_data_connections(self):
return self.__dconnections
def count_connections(self):
return self.__dconnections+self.__cconnections
def __add_connection(self,src,delegate):
iid = (max(self.__connection_iids)+1 if self.__connection_iids else 1)
(a,f) = self.make_filter(src,iid)
if not paths.valid_id(a):
return None
self.__connection_iids.add(iid)
hint = None
if logic.is_pred_arity(src,'conn',5,5):
hint = src.args[4]
s = PlumberSlot(iid,src,hint,self.create_plumber(PlumberConfig(a,f,iid,hint),self.__clock))
if hint == 'ctl':
self.__cconnections = self.__cconnections+1
else:
self.__dconnections = self.__dconnections+1
return s
def __del_connection(self,src,slot,destroy):
self.__connection_iids.discard(slot.iid)
if slot.hint == 'ctl':
self.__cconnections = self.__cconnections-1
else:
self.__dconnections = self.__dconnections-1
slot.plumber.disconnect()
self.destroy_plumber(slot.plumber)
class FunctorController:
def __init__(self,cdomain,policy=None,functor=None):
self.__policy = policy or ThrottleStreamPolicy(10)
f = functor or piw.slowchange(utils.changify(self.__dump))
self.__backend = piw.functor_backend(1,False)
self.__backend.set_gfunctor(utils.make_change_nb(f))
self.__correlator = piw.correlator(cdomain,chr(1),piw.null_filter(),self.__backend.cookie(),0,0)
def __dump(self,d):
print 'default control',d
def get_backend(self,config):
return PlumberBackend(self.__correlator,self.__policy,1,Plumber.input_input,-1,config.iid)
class FastPolicyImpl(ConnectablePolicyImpl):
protocols = 'input explicit'
def __init__(self,atom,data_domain,init,transient,handler,stream_policy=None,clock=True):
self.__stream_policy = stream_policy or ThrottleStreamPolicy(500)
self.__handler = handler
self.__slow_handler = utils.fastchange(self.__handler)
self.__ctl_handler = piw.change2(self.__slow_handler,piw.slowchange(utils.changify(self.__control)))
self.__clock_domain = piw.clockdomain_ctl()
self.__clock_domain.set_source(piw.makestring('*', 0L))
self.__clock = piw.clocksink()
self.__clock_domain.sink(self.__clock,'FastPolicy')
self.__upstream = None
self.__backend = None
self.__correlator = None
self.__ctrl = None
ConnectablePolicyImpl.__init__(self,atom,data_domain,init,clock,node.Server(transient=transient),False)
self.data_node().set_change_handler(self.__slow_handler)
self.data_node().set_data(self.get_domain().value2data(init))
def __control(self,d):
v = self.get_domain().data2value(d)
ConnectablePolicyImpl.set_value(self,v)
def set_value(self,v,t=0):
d = self.get_domain().value2data(v,t)
self.__slow_handler(d)
def change_value(self,v,t=0,p=True):
self.set_value(v,t)
def get_clock(self):
return self.__clock
def get_controller_backend(self,config):
if self.__ctrl is None:
self.__ctrl = FunctorController(self.__clock_domain,self.__stream_policy,self.__ctl_handler)
return self.__ctrl.get_backend(config)
def get_data_backend(self,config):
if self.__backend is None:
self.__backend = piw.functor_backend(1,True)
self.__backend.set_gfunctor(utils.make_change_nb(self.__handler))
self.__correlator = piw.correlator(self.__clock_domain,chr(1),piw.null_filter(),self.__backend.cookie(),0,0)
if config.clocked:
self.__set_clock(self.__backend.get_clock())
return PlumberBackend(self.__correlator,self.__stream_policy,1,Plumber.input_input,-1,config.iid)
def create_plumber(self,config,clocked):
return Plumber(self,config,clocked)
def __set_clock(self,clock):
if self.__upstream is not None:
self.__clock.remove_upstream(self.__upstream)
self.__upstream = clock
if self.__upstream is not None:
self.__clock.add_upstream(self.__upstream)
def destroy_plumber(self,plumber):
if self.count_connections()==0:
self.__correlator = None
self.__backend = None
self.__ctrl = None
class TriggerFunctorController:
def __init__(self,cdomain,policy=None,functor=None):
self.__policy = policy or ThrottleStreamPolicy(10)
f = functor or piw.slowchange(utils.changify(self.__dump))
self.__backend = piw.functor_backend(1,True)
self.__backend.send_duplicates(True)
self.__backend.set_gfunctor(f)
self.__correlator = piw.correlator(cdomain,chr(1),piw.null_filter(),self.__backend.cookie(),0,0)
def __dump(self,d):
print 'default control',d
def get_backend(self,config):
return PlumberBackend(self.__correlator,self.__policy,1,Plumber.input_input,-1,config.iid)
class TriggerPolicyImpl(ConnectablePolicyImpl):
protocols = 'input explicit'
def __init__(self,atom,data_domain,init,transient,handler,stream_policy=None,clock=True):
self.__stream_policy = stream_policy or ThrottleStreamPolicy(500)
self.__handler = handler
self.__slow_handler = piw.fastchange(self.__handler)
self.__clock_domain = piw.clockdomain_ctl()
self.__clock_domain.set_source(piw.makestring('*', 0L))
self.__clock = piw.clocksink()
self.__clock_domain.sink(self.__clock,'TriggerPolicy')
self.__upstream = None
self.__backend = None
self.__correlator = None
self.__ctrl = None
ConnectablePolicyImpl.__init__(self,atom,data_domain,init,clock,node.Server(transient=transient),False)
self.data_node().set_change_handler(self.__slow_handler)
self.data_node().set_data(self.get_domain().value2data(0))
def set_value(self,v,t=0):
pass
def get_value(self):
return 0
def set_status(self,v):
ConnectablePolicyImpl.set_value(self, v.as_long())
def change_value(self,v,t=0,p=True):
pass
def get_clock(self):
return self.__clock
def get_controller_backend(self,config):
if self.__ctrl is None:
self.__ctrl = TriggerFunctorController(self.__clock_domain,self.__stream_policy,self.__handler)
return self.__ctrl.get_backend(config)
def get_data_backend(self,config):
if self.__correlator is None:
self.__backend = piw.functor_backend(1,True)
self.__backend.set_functor(piw.pathnull(0),self.__handler)
self.__backend.send_duplicates(True)
self.__correlator = piw.correlator(self.__clock_domain,chr(1),piw.root_filter(),self.__backend.cookie(),0,0)
if config.clocked:
self.__set_clock(self.__backend.get_clock())
return PlumberBackend(self.__correlator,self.__stream_policy,1,Plumber.input_input,-1,config.iid)
def create_plumber(self,config,clocked):
return Plumber(self,config,clocked)
def __set_clock(self,clock):
if self.__upstream is not None:
self.__clock.remove_upstream(self.__upstream)
self.__upstream = clock
if self.__upstream is not None:
self.__clock.add_upstream(self.__upstream)
def destroy_plumber(self,plumber):
if self.count_connections()==0:
self.__backend = None
self.__correlator = None
self.__ctrl = None
class LoadPolicyNode(node.Server):
def __init__(self,load_func,transient):
node.Server.__init__(self,transient=transient)
self.__load_func = load_func
self.set_readwrite()
def load_value(self,delegate,value):
return self.__load_func(delegate,value)
class LoadPolicyDelegate:
def retval(self): pass
def set_residual(self,n,r): pass
def set_deferred(self,n,r): pass
def add_error(self,msg): pass
class LoadPolicyImpl(ConnectablePolicyImpl):
protocols = 'input output explicit'
def __init__(self,atom,data_domain,init,transient,handler,stream_policy=None,clock=False):
self.__stream_policy = stream_policy or ThrottleStreamPolicy(500)
self.__handler = utils.weaken(handler)
self.__clock_domain = piw.clockdomain_ctl()
self.__clock_domain.set_source(piw.makestring('*', 0L))
self.__backend = None
self.__correlator = None
self.__ctrl = None
ConnectablePolicyImpl.__init__(self,atom,data_domain,init,clock,LoadPolicyNode(self.__handler,transient),False)
self.data_node().set_data(self.get_domain().value2data(init))
def __slow_handler(self,d):
self.__handler(LoadPolicyDelegate(),d)
def change_value(self,v,t=0,p=True):
d = self.get_domain().value2data(v,t)
self.__slow_handler(d)
def get_controller_backend(self,config):
if self.__ctrl is None:
self.__ctrl = FunctorController(self.__clock_domain,functor=piw.slowchange(utils.changify(self.__slow_handler)))
return self.__ctrl.get_backend(config)
def get_data_backend(self,config):
if self.__correlator is None:
self.__backend = piw.functor_backend(1,True)
self.__backend.set_functor(piw.pathnull(0),utils.make_change_nb(piw.slowchange(utils.changify(self.__slow_handler))))
self.__correlator = piw.correlator(self.__clock_domain,chr(1),piw.root_filter(),self.__backend.cookie(),0,0)
return PlumberBackend(self.__correlator,self.__stream_policy,1,Plumber.input_input,-1,config.iid)
def create_plumber(self,config,clocked):
return Plumber(self,config,clocked)
def destroy_plumber(self,plumber):
if self.count_connections()==0:
self.__backend = None
self.__correlator = None
self.__ctrl = None
class SlowPolicyImpl(ConnectablePolicyImpl):
protocols = 'input output explicit'
def __init__(self,atom,data_domain,init,transient,handler,stream_policy=None,clock=False,callback=None):
self.__stream_policy = stream_policy or ThrottleStreamPolicy(500)
self.__handler = utils.weaken(handler)
self.__clock_domain = piw.clockdomain_ctl()
self.__clock_domain.set_source(piw.makestring('*', 0L))
self.__backend = None
self.__correlator = None
self.__callback = callback
self.__ctrl = None
ConnectablePolicyImpl.__init__(self,atom,data_domain,init,clock,node.Server(transient=transient),False)
self.data_node().set_change_handler(self.__slow_handler)
self.data_node().set_data(self.get_domain().value2data(init))
def __slow_handler(self,d):
v = self.get_domain().data2value(d)
if self.__handler(v) is not False:
self.set_value(v,d.time())
def change_value(self,v,t=0,p=True):
if self.__handler(v) is not False:
self.set_value(v,t)
def get_controller_backend(self,config):
if self.__ctrl is None:
self.__ctrl = FunctorController(self.__clock_domain,functor=piw.slowchange(utils.changify(self.__slow_handler)))
return self.__ctrl.get_backend(config)
def get_data_backend(self,config):
if self.__correlator is None:
self.__backend = piw.functor_backend(1,True)
self.__backend.set_functor(piw.pathnull(0),utils.make_change_nb(piw.slowchange(utils.changify(self.__slow_handler))))
self.__correlator = piw.correlator(self.__clock_domain,chr(1),piw.root_filter(),self.__backend.cookie(),0,0)
return PlumberBackend(self.__correlator,self.__stream_policy,1,Plumber.input_input,-1,config.iid)
def create_plumber(self,config,clocked):
config.callback=self.__callback
return Plumber(self,config,clocked)
def destroy_plumber(self,plumber):
if self.count_connections()==0:
self.__backend = None
self.__correlator = None
self.__ctrl = None
class PolicyFactory:
def __init__(self,klass,*args,**kwds):
self.__klass = klass
self.__args = args
self.__kwds = kwds
def __call__(self,atom,data_domain,init,transient):
return self.__klass(atom,data_domain,init,transient,*self.__args,**self.__kwds)
def LoadPolicy(*args,**kwds):
return PolicyFactory(LoadPolicyImpl,*args,**kwds)
def SlowPolicy(*args,**kwds):
return PolicyFactory(SlowPolicyImpl,*args,**kwds)
def FastPolicy(*args,**kwds):
return PolicyFactory(FastPolicyImpl,*args,**kwds)
def TriggerPolicy(*args,**kwds):
return PolicyFactory(TriggerPolicyImpl,*args,**kwds)
def FastReadOnlyPolicy(*args,**kwds):
return PolicyFactory(FastReadOnlyPolicyImpl,*args,**kwds)
def ReadOnlyPolicy(*args,**kwds):
return PolicyFactory(ReadOnlyPolicyImpl,*args,**kwds)
def NullPolicy(*args,**kwds):
return PolicyFactory(NullPolicyImpl,*args,**kwds)
| barnone/EigenD | pi/policy.py | Python | gpl-3.0 | 24,392 | [
"VisIt"
] | a2d9f082da9646622e66950e36fee9d5533ea1c60a22b3d6b132e75baf54ea12 |
# Sample module in the public domain. Feel free to use this as a template
# for your modules (and you can remove this header and take complete credit
# and liability)
#
# Contact: Brian Carrier [carrier <at> sleuthkit [dot] org]
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Simple data source-level ingest module for Autopsy.
# Used as part of Python tutorials from Basis Technology - August 2015
#
# Looks for files of a given name, opens then in SQLite, queries the DB,
# and makes artifacts
import jarray
import inspect
import os
from java.lang import Class
from java.lang import System
from java.sql import DriverManager, SQLException
from java.util.logging import Level
from java.util import ArrayList
from java.io import File
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.ingest import ModuleDataEvent
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.casemodule.services import Blackboard
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
class ContactsDbIngestModuleFactory(IngestModuleFactoryAdapter):
moduleName = "Contacts Db Analyzer"
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Sample module that parses contacts.db"
def getModuleVersionNumber(self):
return "1.0"
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
return ContactsDbIngestModule()
# Data Source-level ingest module. One gets created per data source.
class ContactsDbIngestModule(DataSourceIngestModule):
_logger = Logger.getLogger(ContactsDbIngestModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__, inspect.stack()[1][3], msg)
def __init__(self):
self.context = None
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
def startUp(self, context):
self.context = context
# Where the analysis is done.
# The 'dataSource' object being passed in is of type org.sleuthkit.datamodel.Content.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/latest/interfaceorg_1_1sleuthkit_1_1datamodel_1_1_content.html
# 'progressBar' is of type org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress
# See: http://sleuthkit.org/autopsy/docs/api-docs/latest/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_data_source_ingest_module_progress.html
def process(self, dataSource, progressBar):
# we don't know how much work there is yet
progressBar.switchToIndeterminate()
# Use blackboard class to index blackboard artifacts for keyword search
blackboard = Case.getCurrentCase().getServices().getBlackboard()
# Find files named contacts.db, regardless of parent path
fileManager = Case.getCurrentCase().getServices().getFileManager()
files = fileManager.findFiles(dataSource, "contacts.db")
numFiles = len(files)
progressBar.switchToDeterminate(numFiles)
fileCount = 0
for file in files:
# Check if the user pressed cancel while we were busy
if self.context.isJobCancelled():
return IngestModule.ProcessResult.OK
self.log(Level.INFO, "Processing file: " + file.getName())
fileCount += 1
# Save the DB locally in the temp folder. use file id as name to reduce collisions
lclDbPath = os.path.join(Case.getCurrentCase().getTempDirectory(), str(file.getId()) + ".db")
ContentUtils.writeToFile(file, File(lclDbPath))
# Open the DB using JDBC
try:
Class.forName("org.sqlite.JDBC").newInstance()
dbConn = DriverManager.getConnection("jdbc:sqlite:%s" % lclDbPath)
except SQLException as e:
self.log(Level.INFO, "Could not open database file (not SQLite) " + file.getName() + " (" + e.getMessage() + ")")
return IngestModule.ProcessResult.OK
# Query the contacts table in the database and get all columns.
try:
stmt = dbConn.createStatement()
resultSet = stmt.executeQuery("SELECT * FROM contacts")
except SQLException as e:
self.log(Level.INFO, "Error querying database for contacts table (" + e.getMessage() + ")")
return IngestModule.ProcessResult.OK
# Cycle through each row and create artifacts
while resultSet.next():
try:
name = resultSet.getString("name")
email = resultSet.getString("email")
phone = resultSet.getString("phone")
except SQLException as e:
self.log(Level.INFO, "Error getting values from contacts table (" + e.getMessage() + ")")
# Make an artifact on the blackboard, TSK_CONTACT and give it attributes for each of the fields
art = file.newArtifact(BlackboardArtifact.ARTIFACT_TYPE.TSK_CONTACT)
attributes = ArrayList()
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_NAME_PERSON.getTypeID(),
ContactsDbIngestModuleFactory.moduleName, name))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_EMAIL.getTypeID(),
ContactsDbIngestModuleFactory.moduleName, email))
attributes.add(BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_PHONE_NUMBER.getTypeID(),
ContactsDbIngestModuleFactory.moduleName, phone))
art.addAttributes(attributes)
try:
# index the artifact for keyword search
blackboard.indexArtifact(art)
except Blackboard.BlackboardException as e:
self.log(Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# Fire an event to notify the UI and others that there are new artifacts
IngestServices.getInstance().fireModuleDataEvent(
ModuleDataEvent(ContactsDbIngestModuleFactory.moduleName,
BlackboardArtifact.ARTIFACT_TYPE.TSK_CONTACT, None))
# Clean up
stmt.close()
dbConn.close()
os.remove(lclDbPath)
# After all databases, post a message to the ingest messages in box.
message = IngestMessage.createMessage(IngestMessage.MessageType.DATA,
"ContactsDb Analyzer", "Found %d files" % fileCount)
IngestServices.getInstance().postMessage(message)
return IngestModule.ProcessResult.OK
| wschaeferB/autopsy | pythonExamples/Aug2015DataSourceTutorial/FindContactsDb.py | Python | apache-2.0 | 9,193 | [
"Brian"
] | 98a93215f856a75ad39317039edd83fd9e005c4cec069dd20b812acef477f585 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: André Felipe Dias <andref.dias@pronus.eng.br>
from __future__ import unicode_literals
import re
import shutil
from os import makedirs
from os.path import join, dirname, basename, isfile, exists, curdir, split, splitext, abspath
from docutils import nodes
from docutils.io import FileOutput
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.html import MetaBody as Meta
from docutils.transforms import Transform
from genshi.builder import Element, tag
from rst2html5 import HTML5Translator, HTML5Writer
try:
from urllib.parse import urlparse
except ImportError: # Python 2
from urlparse import urlparse
"""
Translates a restructuredText document to a HTML5 slideshow
"""
__docformat__ = 'reStructuredText'
class presentation(nodes.Element):
pass
class Presentation(Directive):
'''
This directive handles attributes global to the presentation.
Usually, it is placed at the top of the document
but it is possible to change presentation attributes in the middle.
See test/cases.py for examples.
'''
option_spec = {
'distribution': directives.unchanged,
'deck-selector': directives.unchanged,
'slide-selector': directives.unchanged,
'increment': directives.unchanged,
}
def run(self):
return [presentation(**self.options)]
directives.register_directive('presentation', Presentation)
class slide_contents(nodes.Element):
pass
class SlideTransform(Transform):
'''
State Machine to transform default doctree to one with slideshow structure:
section, header, contents.
'''
default_priority = 851
# node classes that should be ignored to not form new slides
force_new_slide = (presentation, nodes.field_list)
skip_classes = (Meta.meta, nodes.docinfo) + force_new_slide
def apply(self):
self.contents = []
self.header = []
self.slides = []
self.slide = nodes.section()
self.inner_level = 0
self.visit(self.document.children)
self.document.extend(self.slides)
return
def visit(self, children):
self.inner_level += 1
while children:
node = children.pop(0)
if isinstance(node, self.skip_classes):
if isinstance(node, self.force_new_slide):
# meta and docinfo doesn't close slide
# see meta_tag_and_slides in test/cases.py
self.close_slide()
self.slides.append(node)
continue
self.parse(node)
self.inner_level -= 1
if self.inner_level <= 1:
self.close_slide()
return
def parse(self, node):
if isinstance(node, nodes.transition):
self.close_slide()
self.slide.update_all_atts(node)
elif isinstance(node, nodes.section):
# All subsections are flattened to the same level.
if self.inner_level == 1:
self.close_slide()
self.slide.update_all_atts(node)
self.visit(node.children)
elif isinstance(node, (nodes.title, nodes.subtitle)):
# Titles and subtitles are converted to nodes.title and
# their heading levels are defined later during translation
self.header.append(node)
else:
self.contents.append(node)
return
def close_slide(self):
if not (self.contents or self.header):
return
if self.header:
header = nodes.header()
header.extend(self.header)
self.slide.append(header)
self.header = []
if self.contents:
contents = slide_contents()
contents.extend(self.contents)
self.contents = []
self.slide.append(contents)
self.slides.append(self.slide)
self.slide = nodes.section()
return
class SlideWriter(HTML5Writer):
choices = ['impress.js', 'jmpress.js', 'deck.js', 'None']
settings_spec = HTML5Writer.settings_spec + (
'rst2html5slides Specific Options',
None,
(
(
'Choose a web presentation framework for the output. '
'Possible values are %s. '
'Default is %s.' % (', '.join(choices), choices[0]),
['--presentation'],
{
'choices': choices,
'default': choices[0],
'metavar': '<framework>'
}
),
(
'Specify the name of the slide distribution function. '
'Options are "linear", "grid" or "grid-rotate". '
'An additional parameter can be specified along with the name such as in '
'"grid_rotate 3".',
['--distribution'],
{
'metavar': '<function_name>',
}
),
(
'Specify the value of the increment used by the distribution functions. '
'To specify different values for X and Y increments, '
'separate them by space. Example "1000 500". '
'Default value is 1600 for X and Y increments.',
['--increment'],
{
'metavar': '<increment>',
}
),
(
'Disable slide automatic identification based on title.',
['--manual-slide-id'],
{
'action': 'store_true',
'dest': 'manual_slide_identification',
}
),
(
'Specify the tag, id and/or class to replace the default (and non-standard) '
'<deck> tag used to surround the slides. '
'Follow the pattern tag#id.class (such as a CSS selector). '
'Examples: div, div#impress, div.deck-container, article#impress.impress-not-supported',
['--deck-selector'],
{
'dest': 'deck_selector',
'metavar': '<deck_selector>',
},
),
(
'Specify the tag, id and/or class to replace the default (and non-standard) '
'<slide> tag used to surround each slide.'
'Follow the pattern tag#id.class (such as a CSS selector)'
'Examples: div.slide, section, div.step',
['--slide-selector'],
{
'dest': 'slide_selector',
'metavar': '<slide_selector>',
},
),
)
)
def __init__(self):
HTML5Writer.__init__(self)
self.translator_class = SlideTranslator
def rebuild_output(self):
def copy_file(origin, destination):
dest_dir = dirname(destination)
if not exists(dest_dir):
makedirs(dest_dir)
shutil.copy(origin, destination)
def roundrobin(*iterables):
"""
roundrobin('ABC', 'D', 'EF') --> A D E B F C
see: https://docs.python.org/3.4/library/itertools.html#itertools-recipes
"""
from itertools import cycle, islice
pending = len(iterables)
# small modification to run under both Python 3 and 2
next_attr = '__next__' if hasattr(iter(iterables[0]), '__next__') else 'next'
nexts = cycle(getattr(iter(it), next_attr) for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
output = self.output
href_pattern = re.compile('href=".*?"|src=".*?"')
path_pattern = re.compile('"(.*?)"')
hrefs = re.findall(href_pattern, output)
save_to_destination = self.destination.destination_path not in ('<stdout>', '<string>')
dest_dir = dirname(self.destination.destination_path)
if save_to_destination:
source_dir = dirname(self.document.settings._source)
for i, href in enumerate(hrefs):
path = re.findall(path_pattern, href)[0]
if urlparse(path).netloc: # scheme is not always present, but netloc is
continue
href_path = re.findall('^(?:\.+/)*(.*)', path)[0]
hrefs[i] = re.sub(path_pattern, '"%s"' % href_path, href)
if save_to_destination:
source_path = join(source_dir, path)
if not isfile(source_path):
# try css, js files relative to the template path
if isfile(self.document.settings.template):
template_dir = dirname(self.document.settings.template)
else:
template_dir = curdir
rel_template_path = abspath(join(template_dir, path))
if not isfile(rel_template_path):
self.document.reporter.error('file not found: %s' % source_path)
continue
source_path = rel_template_path
copy_file(source_path, join(dest_dir, href_path))
# rebuild output references
splitted = re.split(href_pattern, output)
self.output = ''.join(roundrobin(splitted, hrefs))
return
def translate(self):
destination_path = self.destination.destination_path
if destination_path not in ('<stdout>', '<string>'): # there is a specified destination
dest_dir, extension = splitext(destination_path)
if extension: # there is a filename in the destination
dest_dir, dest_filename = split(destination_path)
else: # The specified destination is a directory. A new filename is necessary
dest_filename = splitext(basename(self.document.settings._source))[0] + '.html'
self.destination = FileOutput(destination_path=join(dest_dir, dest_filename),
encoding='utf-8')
HTML5Writer.translate(self)
self.rebuild_output()
return
def get_transforms(self):
return HTML5Writer.get_transforms(self) + [SlideTransform]
class SlideTranslator(HTML5Translator):
tag_name_re = re.compile('^\w+')
class_re = re.compile('\.([\w\-]+)')
id_re = re.compile('#([\w|\-]+)')
def __init__(self, *args):
self.rst_terms['section'] = ['slide', 'visit_section', 'depart_section'] # [0] might be replaced later
self.rst_terms['slide_contents'] = ('section', 'default_visit', 'default_departure')
self.rst_terms['title'] = (None, 'visit_title', 'depart_title') # flatten titles
self.rst_terms['presentation'] = (None, 'visit_presentation', None)
HTML5Translator.__init__(self, *args)
self.metatags.append(tag.meta(generator='rst2html5slides'))
self.metatags.append(tag.meta(generator_homepage='https://pypi.python.org/pypi/rst2html5slides'))
self._reset_settings()
return
def _compacted_paragraph(self, node):
'''
a single node followed by a single field list should also be compacted
'''
field_list_sibling = len([n for n in node.parent
if not isinstance(n, (nodes.field_list))]) == 1
return not node['classes'] and \
(HTML5Translator._compacted_paragraph(self, node) or field_list_sibling)
def visit_section(self, node):
if self.document.settings.manual_slide_identification:
node['ids'] = []
elif 'id' in self.slide_attributes:
node['ids'] = [self.slide_attributes['id']]
node.attributes.update(self.slide_attributes)
if not self.distribution.get('func'):
# (Only) slide data-* attributes are cumulative
# otherwise impress.js defaults data-x,y,z to 0, data-scale to 1 etc.
keys = list(self.slide_attributes.keys())
for key in keys:
if not key.startswith('data-'):
del self.slide_attributes[key]
else: # do not accumulate any slide attributes
self.slide_attributes = {}
self.default_visit(node)
return
def depart_section(self, node):
self.heading_level = 0 # a new section reset title level
if 'class' in self.slide_selector:
node['classes'].extend([self.slide_selector['class']])
self.default_departure(node)
return
def visit_title(self, node):
'''
In rst2html5slides, subsections are flattened and every title node is grouped
inside the same header as a nodes.title.
According to their position, the title node should become h1, h2, h3 etc.
Example:
<header>
<title 1>
<title 2>
<title 3>
becomes:
<header>
<h1>Title 1</h1>
<h2>Subtitle</h2>
<h3>Subsubtitle</h3>
see test/cases.py h2 and h3
'''
self.default_visit(node)
self.heading_level += 1
return
def depart_document(self, node):
self._distribute_slides()
if len(self.context.stack[0]):
deck = getattr(tag, self.deck_selector['tag'])(
*self.context.stack[0],
id=self.deck_selector.get('id', None),
class_=self.deck_selector.get('class', None)
)
self.context.stack = ['\n', deck, '\n']
# _reset is necessary to run the several test cases
self._reset_settings()
return
def visit_field(self, node):
field_name = node.children[0].astext()
field_value = self._strip_spaces(node.children[1].astext())
visit_field_func = getattr(self, 'visit_field_' + field_name.replace('-', '_'), None)
if visit_field_func:
visit_field_func(field_value)
else:
self.slide_attributes[field_name] = field_value
raise nodes.SkipNode
def visit_field_class(self, value):
self.slide_attributes['classes'] = value.split()
return
def visit_field_classes(self, value):
self.visit_field_class(value)
return
def visit_presentation(self, node):
if 'distribution' in node:
self._get_distribution(node['distribution'])
if 'deck-selector' in node:
self._get_deck_selector(node['deck-selector'])
if 'slide-selector' in node:
self._get_slide_selector(node['slide-selector'])
if 'increment' in node:
self._get_increment(node['increment'])
raise nodes.SkipNode
def _reset_settings(self):
config = {
'impress.js': ('impress.html', 'div#impress', 'div.step', 'linear'),
'jmpress.js': ('jmpress.html', 'div#jmpress', 'div.step', 'linear'),
'deck.js': ('deck.html', 'div#deck', 'div.step', 'none'),
'None': ('default.html', 'deck', 'slide', 'none'),
}
self.distribution = {
'incr_x': 1600,
'incr_y': 1600,
'data-*': {},
'visited': 0,
}
self.deck_selector = {}
self.slide_selector = {}
self.slide_attributes = {}
settings = self.document.settings
template, deck_selector, slide_selector, distribution = config[settings.presentation]
settings.template = settings.template or abspath(join(dirname(__file__), curdir, 'template', template))
self._get_distribution(settings.distribution or distribution)
self._get_deck_selector(settings.deck_selector or deck_selector)
self._get_slide_selector(settings.slide_selector or slide_selector)
self._get_increment(settings.increment)
return
def _get_deck_selector(self, value):
tag_name = self.tag_name_re.findall(value)
id = self.id_re.findall(value)
class_ = self.class_re.findall(value)
if tag_name:
self.deck_selector['tag'] = tag_name[0]
if id:
self.deck_selector['id'] = id[0]
if class_:
self.deck_selector['class'] = class_[0]
return
def _get_slide_selector(self, value):
tag_name = self.tag_name_re.findall(value)
class_ = self.class_re.findall(value)
if tag_name:
self.rst_terms['section'][0] = tag_name[0]
if class_:
self.slide_selector['class'] = class_[0]
return
def _get_increment(self, value):
if not value:
return
value = value.split()
self.distribution['incr_x'] = int(value[0])
self.distribution['incr_y'] = int(value[1]) if len(value) > 1 else self.distribution['incr_x']
return
def _get_distribution(self, field_value):
# distribute all slides so far with current distribution function
self._distribute_slides()
if not field_value:
return
values = field_value.split()
# distribution function names must end with '_distribution'
self.distribution['func'] = getattr(self, values[0] + '_distribution', None)
if len(values) > 1:
self.distribution['parameter'] = int(values[1])
elif 'parameter' in self.distribution:
del self.distribution['parameter']
return
def _distribute_slides(self):
'''
Distribute slides spatially according to some predefined function.
data-* attributes are used to keep the coordinates.
'''
if not (self.distribution.get('func') and self.context.stack):
return
initial_pos = self.distribution['visited']
slides = (elem for item in self.context.stack[0][initial_pos::] for elem in item if isinstance(elem, Element))
self.distribution['visited'] = len(self.context.stack[0])
def enumerate_slides(slides):
index = 0
for slide in slides:
slide_data = self._get_data(slide)
if slide_data:
index = 0
self.distribution['data-*'].update(slide_data)
yield index, slide
index += 1
self.distribution['func'](enumerate_slides(slides))
return
def _get_data(self, slide):
def convert(value):
if isinstance(value, (int, float)):
return value
try:
if '.' in value:
return float(value)
else:
return int(value)
except ValueError:
return value
return {q[0].localname: convert(q[1]) for q in slide.attrib
if q[0].localname.startswith('data-')}
def none_distribution(self, enumerated_slides):
'''
Makes no positioning, but keep slide's data-* attributes
'''
data_attributes = self.distribution['data-*']
for index, slide in enumerated_slides:
slide(**data_attributes)
def linear_distribution(self, enumerated_slides):
'''
Linear distribution
'''
data_attributes = self.distribution['data-*']
data_attributes.setdefault('data-x', 0)
incr_x = self.distribution['incr_x']
for index, slide in enumerated_slides:
slide(**data_attributes)
data_attributes['data-x'] += incr_x
return
def grid_distribution(self, enumerated_slides):
'''
change line after certain number of slides
It might receive one parameter to indicate the length of the line
[ ] [ ] [ ] [ ]
[ ] [ ] [ ] [ ]
...
'''
data_attributes = self.distribution['data-*']
line_length = self.distribution.get('parameter', 4)
incr_x = self.distribution['incr_x']
incr_y = self.distribution['incr_y']
for index, slide in enumerated_slides:
if index == 0:
x_ref = data_attributes.setdefault('data-x', 0)
elif index % line_length == 0: # break line
data_attributes['data-x'] = x_ref
data_attributes['data-y'] = data_attributes.setdefault('data-y', 0) + incr_y
slide(**data_attributes)
data_attributes['data-x'] += incr_x
return
def grid_rotate_distribution(self, enumerated_slides):
'''
Similar to grid, but slides are rotated when line changes
'''
data_attributes = self.distribution['data-*']
line_length = self.distribution.get('parameter', 4)
incr_x = self.distribution['incr_x']
incr_y = self.distribution['incr_y']
for index, slide in enumerated_slides:
if index == 0:
data_attributes.setdefault('data-x', 0)
# jmpress doesn't rotate clockwise when it is 180
rotate_z_ref = data_attributes.setdefault('data-rotate-z', 0) + 179.9
elif index % line_length == 0:
data_attributes['data-x'] -= incr_x # keep same data-x reverting last += incr_x
data_attributes['data-y'] = data_attributes.setdefault('data-y', 0) + incr_y
incr_x = -incr_x
data_attributes['data-rotate-z'] = rotate_z_ref \
if data_attributes['data-rotate-z'] != rotate_z_ref else (rotate_z_ref - 179.9)
slide(**data_attributes)
data_attributes['data-x'] += incr_x
def main():
from docutils.core import publish_cmdline, default_description
description = ('Translates a restructuredText document to a HTML5 slideshow. ' +
default_description)
publish_cmdline(writer=SlideWriter(), description=description)
return
if __name__ == '__main__':
main()
| wdv4758h/rst2html5slides | src/rst2html5slides.py | Python | mit | 22,222 | [
"VisIt"
] | 6d7be278ff33b014e0ef3a734cf4adc1d32baf99ec578feff3d2f14e21ad47dc |
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
from lmfit import Parameters
from lmfit.models import GaussianModel, LinearModel
from astropy.constants import c
from fitelp.label_tools import line_label
import fitelp.constants as constants
constants.init()
def vel_to_wave(restWave, vel, flux, fluxError=None, delta=False):
if delta is True:
wave = (vel / c.to('km/s').value) * restWave
else:
wave = (vel / c.to('km/s').value) * restWave + restWave
flux = flux / (restWave / c.to('km/s').value)
if fluxError is not None:
fluxError = fluxError / (restWave / c.to('km/s').value)
return vel, flux, fluxError
else:
return wave, flux
def wave_to_vel(restWave, wave, flux, fluxError=None, delta=False):
if delta is True:
vel = (wave / restWave) * c.to('km/s').value
else:
vel = ((wave - restWave) / restWave) * c.to('km/s').value
flux = flux * (restWave / c.to('km/s').value)
if fluxError is not None:
fluxError = fluxError * (restWave / c.to('km/s').value)
return vel, flux, fluxError
else:
return vel, flux
class FittingProfile(object):
def __init__(self, wave, flux, restWave, lineName, zone, rp, fluxError=None, xAxis='vel', initVals='vel'):
"""The input vel and flux must be limited to a single emission line profile"""
self.flux = flux
self.fluxError = fluxError
self.restWave = restWave
self.lineName = lineName
self.zone = zone
self.weights = self._weights()
self.rp = rp
self.xAxis = xAxis
self.initVals = initVals
if xAxis == 'vel':
if fluxError is None:
vel, self.flux = wave_to_vel(restWave, wave, flux)
else:
vel, self.flux, self.fluxError = wave_to_vel(restWave, wave, flux, fluxError)
self.x = vel
else:
self.x = wave
self.linGaussParams = Parameters()
def _weights(self):
if self.fluxError is None:
return None
else:
fluxErrorCR = self.fluxError# - self.continuum
return 1./fluxErrorCR
def _get_amplitude(self, numOfComponents, modelFit):
amplitudeTotal = 0.
for i in range(numOfComponents):
amplitudeTotal = amplitudeTotal + modelFit.best_values['g%d_amplitude' % (i+1)]
print("Amplitude Total is %f" % amplitudeTotal)
return amplitudeTotal
def _gaussian_component(self, pars, prefix, c, s, a, limits):
"""Fits a gaussian with given parameters.
pars is the lmfit Parameters for the fit, prefix is the label of the gaussian, c is the center, s is sigma,
a is amplitude. Returns the Gaussian model"""
varyCentre = True
varySigma = True
varyAmp = True
if limits['c'] is False:
varyCentre = False
cMin, cMax = -np.inf, np.inf
elif type(limits['c']) is tuple:
cMin = limits['c'][0]
cMax = limits['c'][1]
else:
cMin = c - c*limits['c']
cMax = c + c*limits['c']
if limits['s'] is False:
varySigma = False
sMin, sMax = -np.inf, np.inf
elif type(limits['s']) is tuple:
sMin = limits['s'][0]
sMax = limits['s'][1]
else:
sMin = s - s * limits['s']
sMax = s + s * limits['s']
if limits['a'] is False:
varyAmp = False
aMin, aMax = -np.inf, np.inf
elif type(limits['a']) is tuple:
aMin = limits['a'][0]
aMax = limits['a'][1]
else:
aMin = a - a * limits['a']
aMax = a + a * limits['a']
g = GaussianModel(prefix=prefix)
pars.update(g.make_params())
if isinstance(c, str):
pars[prefix + 'center'].set(expr=c, min=cMin, max=cMax, vary=varyCentre)
else:
pars[prefix + 'center'].set(c, min=cMin, max=cMax, vary=varyCentre)
if isinstance(s, str):
pars[prefix + 'sigma'].set(expr=s, min=sMin, max=sMax, vary=varySigma)
else:
pars[prefix + 'sigma'].set(s, min=sMin, max=sMax, vary=varySigma)
if isinstance(a, str):
pars[prefix + 'amplitude'].set(expr=a, min=aMin, max=aMax, vary=varyAmp)
else:
pars[prefix + 'amplitude'].set(a, min=aMin, max=aMax, vary=varyAmp)
return g
def multiple_close_emission_lines(self, lineNames, cListInit, sListInit, lS, lI):
"""All lists should be the same length"""
gList = []
# Assume initial parameters are in velocity
lin = LinearModel(prefix='lin_')
self.linGaussParams = lin.guess(self.flux, x=self.x)
self.linGaussParams.update(lin.make_params())
self.linGaussParams['lin_slope'].set(lS, vary=True)
self.linGaussParams['lin_intercept'].set(lI, vary=True)
for j, lineName in enumerate(lineNames):
numComps = self.rp.emProfiles[lineName]['numComps']
restWave = self.rp.emProfiles[lineName]['restWavelength']
copyFrom = self.rp.emProfiles[lineName]['copyFrom']
if copyFrom is not None:
copyFromRestWave = self.rp.emProfiles[copyFrom]['restWavelength']
cList = ['g{0}{1}_center*{2}'.format(copyFrom.replace('-', ''), (i + 1), (restWave / copyFromRestWave)) for i in range(numComps)]
sList = ['g{0}{1}_sigma'.format(copyFrom.replace('-', ''), i + 1) for i in range(numComps)]
if type(self.rp.emProfiles[lineName]['ampList']) is list:
aList = self.rp.emProfiles[lineName]['ampList']
if self.xAxis == 'vel':
aList = vel_to_wave(restWave, vel=0, flux=np.array(aList))[1]
else:
ampRatio = self.rp.emProfiles[lineName]['ampList']
aList = ['g{0}{1}_amplitude*{2}'.format(copyFrom.replace('-', ''), i + 1, ampRatio) for i in range(numComps)]
else:
cList = vel_to_wave(restWave, vel=np.array(cListInit), flux=0)[0]
sList = vel_to_wave(restWave, vel=np.array(sListInit), flux=0, delta=True)[0]
aListInit = self.rp.emProfiles[lineName]['ampList']
aList = vel_to_wave(restWave, vel=0, flux=np.array(aListInit))[1]
limits = self.rp.emProfiles[lineName]['compLimits']
for i in range(numComps):
if type(limits['c']) is list:
cLimit = limits['c'][i]
else:
cLimit = limits['c']
if type(limits['s']) is list:
sLimit = limits['s'][i]
else:
sLimit = limits['s']
if type(limits['a']) is list:
aLimit = limits['a'][i]
else:
aLimit = limits['a']
lims = {'c': cLimit, 's': sLimit, 'a': aLimit}
if len(lineNames) == 1:
prefix = 'g{0}_'.format(i + 1)
else:
prefix = 'g{0}{1}_'.format(lineName.replace('-', ''), i + 1)
gList.append(self._gaussian_component(self.linGaussParams, prefix, cList[i], sList[i], aList[i], lims))
gList = np.array(gList)
mod = lin + gList.sum()
init = mod.eval(self.linGaussParams, x=self.x)
out = mod.fit(self.flux, self.linGaussParams, x=self.x, weights=self.weights)
f = open(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, "{0}_Log.txt".format(self.rp.regionName)), "a")
print("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
print(out.fit_report())
f.write("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
f.write(out.fit_report())
f.close()
components = out.eval_components()
if not hasattr(self.rp, 'plotResiduals'):
self.rp.plotResiduals = True
numComps = self.rp.emProfiles[lineName]['numComps']
self.plot_emission_line(numComps, components, out, self.rp.plotResiduals, lineNames, init=init, scaleFlux=self.rp.scaleFlux)
return out, components
def lin_and_multi_gaussian(self, numOfComponents, cList, sList, aList, lS, lI, limits):
"""All lists should be the same length"""
gList = []
if self.xAxis == 'wave' and self.initVals == 'vel':
cList = vel_to_wave(self.restWave, vel=np.array(cList), flux=0)[0]
sList = vel_to_wave(self.restWave, vel=np.array(sList), flux=0, delta=True)[0]
aList = vel_to_wave(self.restWave, vel=0, flux=np.array(aList))[1]
elif self.xAxis == 'vel' and self.initVals == 'wave':
cList = wave_to_vel(self.restWave, wave=np.array(cList), flux=0)[0]
sList = wave_to_vel(self.restWave, wave=np.array(sList), flux=0, delta=True)[0]
aList = wave_to_vel(self.restWave, wave=0, flux=np.array(aList))[1]
lin = LinearModel(prefix='lin_')
self.linGaussParams = lin.guess(self.flux, x=self.x)
self.linGaussParams.update(lin.make_params())
self.linGaussParams['lin_slope'].set(lS, vary=True)
self.linGaussParams['lin_intercept'].set(lI, vary=True)
for i in range(numOfComponents):
if type(limits['c']) is list:
cLimit = limits['c'][i]
else:
cLimit = limits['c']
if type(limits['s']) is list:
sLimit = limits['s'][i]
else:
sLimit = limits['s']
if type(limits['a']) is list:
aLimit = limits['a'][i]
else:
aLimit = limits['a']
lims = {'c': cLimit, 's': sLimit, 'a': aLimit}
prefix = 'g{0}_'.format(i+1)
gList.append(self._gaussian_component(self.linGaussParams, prefix, cList[i], sList[i], aList[i], lims))
gList = np.array(gList)
mod = lin + gList.sum()
init = mod.eval(self.linGaussParams, x=self.x)
out = mod.fit(self.flux, self.linGaussParams, x=self.x, weights=self.weights)
f = open(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, "{0}_Log.txt".format(self.rp.regionName)), "a")
print("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
print(out.fit_report())
f.write("######## %s %s Linear and Multi-gaussian Model ##########\n" % (self.rp.regionName, self.lineName))
f.write(out.fit_report())
f.close()
components = out.eval_components()
if not hasattr(self.rp, 'plotResiduals'):
self.rp.plotResiduals = True
self.plot_emission_line(numOfComponents, components, out, self.rp.plotResiduals, init=init, scaleFlux=self.rp.scaleFlux)
self._get_amplitude(numOfComponents, out)
return out, components
def plot_emission_line(self, numOfComponents, components, out, plotResiduals=True, lineNames=None, init=None, scaleFlux=1e14):
ion, lambdaZero = line_label(self.lineName, self.restWave)
fig = plt.figure("%s %s %s" % (self.rp.regionName, ion, lambdaZero))
if plotResiduals is True:
frame1 = fig.add_axes((.1, .3, .8, .6))
plt.title("%s %s" % (ion, lambdaZero))
if self.xAxis == 'wave':
x = self.x
xLabel = constants.WAVE_AXIS_LABEL
yLabel = constants.FluxUnitsLabels(scaleFlux).FLUX_WAVE_AXIS_LABEL
elif self.xAxis == 'vel':
if hasattr(self.rp, 'showSystemicVelocity') and self.rp.showSystemicVelocity is True:
x = self.x - self.rp.systemicVelocity
xLabel = constants.DELTA_VEL_AXIS_LABEL
else:
x = self.x
xLabel = constants.VEL_AXIS_LABEL
if hasattr(self.rp, 'rp.plottingXRange') and self.rp.plottingXRange is not None:
plt.xlim(self.rp.plottingXRange)
yLabel = constants.FluxUnitsLabels(scaleFlux).FLUX_VEL_AXIS_LABEL
else:
raise Exception("Invalid xAxis argument. Must be either 'wave' or 'vel'. ")
plt.plot(x, self.flux, label='Data')
for i in range(numOfComponents):
labelComp = self.rp.componentLabels
if lineNames is None:
plt.plot(x, components['g%d_' % (i+1)]+components['lin_'], color=self.rp.componentColours[i], linestyle=':', label=labelComp[i])
else:
for j, lineName in enumerate(lineNames):
plt.plot(x, components['g{0}{1}_'.format(lineName.replace('-', ''), i + 1)] + components['lin_'], color=self.rp.componentColours[i], linestyle=':', label=labelComp[i])
# plt.plot(x, components['lin_'], label='lin_')
plt.plot(x, out.best_fit, color='black', linestyle='--', label='Fit')
# plt.plot(x, init, label='init')
plt.legend(loc='upper left')
plt.ylabel(yLabel)
if plotResiduals is True:
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame2 = fig.add_axes((.1, .1, .8, .2))
plt.plot(x, self.flux - out.best_fit)
plt.axhline(y=0, linestyle='--', color='black')
plt.ylabel('Residuals')
# plt.locator_params(axis='y', nbins=3)
# nbins = len(frame2.get_yticklabels())
frame2.yaxis.set_major_locator(MaxNLocator(nbins=3, prune='upper'))
plt.xlabel(xLabel)
plt.savefig(os.path.join(constants.OUTPUT_DIR, self.rp.regionName, self.lineName + " {0} Component Linear-Gaussian Model".format(numOfComponents)), bbox_inches='tight')
def plot_profiles(lineNames, rp, nameForComps='', title='', sortedIndex=None, plotAllComps=False, xAxis='vel', logscale=False, ymin=None):
try:
plt.figure(title)
ax = plt.subplot(1, 1, 1)
plt.title(title) # Recombination Emission Lines")
if xAxis == 'wave':
xLabel = constants.WAVE_AXIS_LABEL
yLabel = constants.FluxUnitsLabels(rp.scaleFlux).FLUX_WAVE_AXIS_LABEL
elif xAxis == 'vel':
if hasattr(rp, 'showSystemicVelocity') and rp.showSystemicVelocity is True:
xLabel = constants.DELTA_VEL_AXIS_LABEL
else:
xLabel = constants.VEL_AXIS_LABEL
if hasattr(rp, 'rp.plottingXRange'):
plt.xlim(rp.plottingXRange)
yLabel = constants.FluxUnitsLabels(rp.scaleFlux).FLUX_VEL_AXIS_LABEL
else:
raise Exception("Invalid xAxis argument. Must be either 'wave' or 'vel'. ")
plt.xlabel(xLabel)
plt.ylabel(yLabel)
for i in range(len(lineNames)):
name, x, flux, mod, col, comps, lab = rp.emProfiles[lineNames[i]]['plotInfo']
if xAxis == 'vel' and hasattr(rp, 'showSystemicVelocity') and rp.showSystemicVelocity is True:
x = x - rp.systemicVelocity
ax.plot(x, flux, color=col, label=lab)
ax.plot(x, mod, color=col, linestyle='--')
if plotAllComps is True:
for idx in range(rp.emProfiles[lineNames[i]]['numComps']):
plt.plot(x, comps['g%d_' % (idx + 1)] + comps['lin_'], color=rp.componentColours[idx], linestyle=':')
else:
if name == nameForComps:
for idx in range(rp.emProfiles[lineNames[i]]['numComps']):
plt.plot(x, comps['g%d_' % (idx + 1)] + comps['lin_'], color=rp.componentColours[idx], linestyle=':')
if sortedIndex is not None:
handles, labels = ax.get_legend_handles_labels()
handles2 = [handles[idx] for idx in sortedIndex]
labels2 = [labels[idx] for idx in sortedIndex]
ax.legend(handles2, labels2)
else:
ax.legend()
if logscale is True:
ax.set_yscale('log')
if ymin is not None:
ax.set_ylim(bottom=ymin)
plt.savefig(os.path.join(constants.OUTPUT_DIR, rp.regionName, title.strip(' ') + '.png'), bbox_inches='tight')
except KeyError:
print("SOME IONS IN {0} HAVE NOT BEEN DEFINED.".format(lineNames))
| daniel-muthukrishna/GiantStarFormingRegions | fitelp/fit_line_profiles.py | Python | mit | 16,449 | [
"Gaussian"
] | 2c59b08874ad0440c1a6c1cbc3f805fc573fb1aeac86cc0e36247be2bc9a603d |
from . import get_calibration_dict, gaussian_array, cos_gaussian_array, \
sin_gaussian_array
from . import Waveform, Element, Sequence
import numpy as np
def make_readout_seq_points(channels=[4]):
"""
Square pulse duting readout time with one marker at start
"""
readout_sequence = Sequence(name='plain_readout', variable='')
p_dict = get_calibration_dict()
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
total_points = round(p_dict['cycle_duration'] / resolution)
readout_waveform = Waveform(length=total_points, channel=channels[0])
readout_waveform.wave[readout_start_points:
readout_start_points + readout_points] = 1
readout_waveform.add_marker(1, readout_marker_start_points, marker_points)
element = Element()
element.add_waveform(readout_waveform)
readout_sequence.add_element(element)
readout_sequence.check()
return readout_sequence
################################################################
# Spectroscopy
################################################################
def make_full_ssb_wave_points(freq=8e6, duration=20e-6, channels=[1, 2]):
"""
Cosine and sine waves of given frequency for cycle duration, no markers
"""
seq = Sequence(name='ssb_seq')
element = Element()
resolution = 1 / 1e9
total_points = duration / resolution
waveform_i = Waveform(channel=channels[0])
waveform_q = Waveform(channel=channels[1])
time_array = np.arange(total_points) * resolution
angle = time_array * freq * 2 * np.pi
cos_array = np.cos(angle)
sin_array = np.sin(angle)
waveform_i.wave = cos_array
waveform_q.wave = -1 * sin_array
element.add_waveform(waveform_i)
element.add_waveform(waveform_q)
seq.add_element(element)
seq.check()
return seq
def make_ssb_qubit_seq(start=0, stop=200e6, step=1e6, channels=[1, 2, 4],
pulse_mod=False):
"""
Cosine and sine waves for qubit time with range of frequencies, square
readout wave for readout time. Markers on readout channel (1 for readout
start, 2 for seq start)
"""
ssb_sequence = Sequence(name='qubit_ssb',
variable='ssb_qubit_modulation_freq',
variable_unit='Hz',
step=step,
start=start,
stop=stop)
p_dict = get_calibration_dict()
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
qubit_points = round(p_dict['qubit_time'] / resolution)
total_points = round(p_dict['cycle_duration'] / resolution)
readout_template = Waveform(length=total_points, channel=channels[2])
readout_template.wave[
readout_start_points:readout_start_points + readout_points] = 1
readout_template.add_marker(1, readout_marker_start_points, marker_points)
qubit_time_array = np.arange(qubit_points) * resolution
freq_array = ssb_sequence.variable_array
pulse_mod_points = qubit_points * 2
for i, freq in enumerate(freq_array):
element = Element()
qubit_i = Waveform(length=total_points, channel=channels[0])
qubit_q = Waveform(length=total_points, channel=channels[1])
readout_waveform = readout_template.copy()
if i == 0:
readout_waveform.add_marker(2, 0, marker_points)
qubit_start = pulse_end_points - qubit_points
qubit_end = pulse_end_points
angle = qubit_time_array * freq * 2 * np.pi
cos_array = np.cos(angle)
sin_array = np.sin(angle)
qubit_i.wave[qubit_start:qubit_end] = cos_array
qubit_q.wave[qubit_start:qubit_end] = -1 * sin_array
if pulse_mod:
qubit_i.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
qubit_q.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
element.add_waveform(qubit_i)
element.add_waveform(qubit_q)
element.add_waveform(readout_waveform)
ssb_sequence.add_element(element)
ssb_sequence.check()
return ssb_sequence
################################################################
# Rabi and T1
################################################################
def make_rabi_square_sequence_points(pi_amp=1, start=0, stop=200e-9, step=2e-9,
channels=[1, 4], pulse_mod=False):
"""
Square qubit drive of pi amplitude of varying duration, square readout
drive. Markers on readout channel (1 for readout start, 2 for seq start)
"""
rabi_sequence = Sequence(name='rabi',
variable='drive_duration',
variable_label='Drive Duration',
variable_unit='s',
step=step,
start=start,
stop=stop)
p_dict = get_calibration_dict()
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
total_points = round(p_dict['cycle_time'] / resolution)
pulse_mod_points = int(stop * 2 / resolution)
readout_template = Waveform(length=total_points, channel=channels[1])
wave = readout_template.wave
wave[readout_start_points:readout_start_points + readout_points] = 1
readout_template.wave = wave
readout_template.add_marker(1, readout_marker_start_points, marker_points)
qubit_duration_array_points = np.round(
rabi_sequence.variable_array / resolution).astype(int)
for i, qubit_points in enumerate(qubit_duration_array_points):
element = Element()
qubit_waveform = Waveform(length=total_points, channel=channels[0])
readout_waveform = readout_template.copy()
if i == 0:
readout_waveform.add_marker(2, 0, marker_points)
qubit_start = int(pulse_end_points - qubit_points)
qubit_end = int(pulse_end_points)
qubit_waveform.wave[qubit_start:qubit_end] = pi_amp
if pulse_mod:
qubit_waveform.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
element.add_waveform(qubit_waveform)
element.add_waveform(readout_waveform)
rabi_sequence.add_element(element)
rabi_sequence.check()
return rabi_sequence
def make_rabi_gaussian_sequence_points(sigma_cuttoff, pi_amp=1, start=0,
stop=200e-9, step=2e-9,
channels=[1, 4], pulse_mod=False):
"""
Square qubit drive of pi amplitude of varying duration, square readout
drive. Markers on readout channel (1 for readout start, 2 for seq start)
"""
rabi_sequence = Sequence(name='rabi',
variable='gaussian_drive_duration',
variable_label='Drive Duration',
variable_unit='s',
step=step,
start=start,
stop=stop)
p_dict = get_calibration_dict()
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
total_points = round(p_dict['cycle_duration'] / resolution)
pulse_mod_points = int(sigma_cuttoff * 4 * stop / resolution)
readout_template = Waveform(length=total_points, channel=channels[1])
readout_template.wave[
readout_start_points:readout_start_points + readout_points] = 1
readout_template.add_marker(1, readout_marker_start_points, marker_points)
for i, pi_duration in enumerate(rabi_sequence.variable_array):
pi_pulse = gaussian_array(pi_duration, sigma_cuttoff, pi_amp,
p_dict['sample_rate'])
element = Element()
qubit_waveform = Waveform(length=total_points, channel=channels[0])
readout_waveform = readout_template.copy()
if i == 0:
readout_waveform.add_marker(2, 0, marker_points)
qubit_start = int(pulse_end_points - len(pi_pulse))
qubit_end = int(pulse_end_points)
qubit_waveform.wave[qubit_start:qubit_end] = pi_pulse
if pulse_mod:
qubit_waveform.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
element.add_waveform(qubit_waveform)
element.add_waveform(readout_waveform)
rabi_sequence.add_element(element)
rabi_sequence.check()
return rabi_sequence
def make_rabi_gaussianSSB_sequence_points(sigma_cuttoff, pi_amp=1, start=0,
stop=200e-9, step=2e-9,
SSBfreq=100e6, channels=[1, 2, 4],
pulse_mod=True):
"""
Square qubit drive of pi amplitude of varying duration, square readout
drive. Markers on readout channel (1 for readout start, 2 for seq start)
"""
rabi_sequence = Sequence(name='rabi',
variable='gaussian_drive_duration',
variable_label='Drive Duration',
variable_unit='s',
step=step,
start=start,
stop=stop)
p_dict = get_calibration_dict()
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
total_points = round(p_dict['cycle_duration'] / resolution)
pulse_mod_points = int(sigma_cuttoff * 4 * stop / resolution)
readout_template = Waveform(length=total_points, channel=channels[2])
readout_template.wave[
readout_start_points:readout_start_points + readout_points] = 1
readout_template.add_marker(1, readout_marker_start_points, marker_points)
for i, pi_duration in enumerate(rabi_sequence.variable_array):
pi_pulseI = cos_gaussian_array(pi_duration, sigma_cuttoff,
SSBfreq, pi_amp, p_dict['sample_rate'])
pi_pulseQ = sin_gaussian_array(pi_duration, sigma_cuttoff,
SSBfreq, pi_amp, p_dict['sample_rate'],
positive=False)
element = Element()
qubit_waveformI = Waveform(length=total_points, channel=channels[0])
qubit_waveformQ = Waveform(length=total_points, channel=channels[1])
readout_waveform = readout_template.copy()
if i == 0:
readout_waveform.add_marker(2, 0, marker_points)
qubit_start = int(pulse_end_points - len(pi_pulseI))
qubit_end = int(pulse_end_points)
qubit_waveformI.wave[qubit_start:qubit_end] = pi_pulseI
qubit_waveformQ.wave[qubit_start:qubit_end] = pi_pulseQ
if pulse_mod:
qubit_waveformI.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
element.add_waveform(qubit_waveformI)
element.add_waveform(qubit_waveformQ)
element.add_waveform(readout_waveform)
rabi_sequence.add_element(element)
rabi_sequence.check()
return rabi_sequence
def make_t1_square_seq_points(pi_duration, pi_amp=1, start=0, stop=5e-6,
step=50e-9, channels=[1, 4], pulse_mod=False):
"""
Square qubit drive for pi duration at pi amplitude on qubit with varying
wait time before readout (square pulse for readout time). Markers on
readout channel (1 for readout start, 2 for seq start)
"""
t1_sequence = Sequence(name='t1',
variable='drive_readout_delay',
variable_label='Delay',
variable_unit='s',
step=step,
start=start,
stop=stop)
p_dict = get_calibration_dict()
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
qubit_points = pi_duration / resolution
total_points = round(p_dict['cycle_duration'] / resolution)
readout_template = Waveform(length=total_points, channel=channels[1])
readout_template.wave[
readout_start_points:readout_start_points + readout_points] = 1
readout_template.add_marker(1, readout_marker_start_points, marker_points)
delay_array_points = np.round(
t1_sequence.variable_array / resolution).astype(np.int)
pulse_mod_points = int((qubit_points * 4 + stop) / resolution)
for i, delay_points in enumerate(delay_array_points):
element = Element()
qubit_waveform = Waveform(length=total_points, channel=channels[0])
readout_waveform = readout_template.copy()
if i == 0:
readout_waveform.add_marker(2, 0, marker_points)
qubit_start = int(pulse_end_points - delay_points - qubit_points)
qubit_end = int(qubit_start + qubit_points)
qubit_waveform.wave[qubit_start:qubit_end] = pi_amp
if pulse_mod:
qubit_waveform.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
element.add_waveform(qubit_waveform)
element.add_waveform(readout_waveform)
t1_sequence.add_element(element)
t1_sequence.check()
return t1_sequence
def make_t1_gaussian_seq_points(pi_duration, sigma_cuttoff, pi_amp=1, start=0,
stop=5e-6, step=50e-9, channels=[1, 4],
pulse_mod=False):
"""
Gaussian qubit drive for pi duration at pi amplitude on qubit with varying
wait time before readout (square pulse for readout time). Markers on
readout channel (1 for readout start, 2 for seq start)
"""
t1_sequence = Sequence(name='t1',
variable='drive_readout_delay',
variable_label='Delay',
variable_unit='s',
step=step,
start=start,
stop=stop)
p_dict = get_calibration_dict()
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
total_points = round(p_dict['cycle_duration'] / resolution)
readout_template = Waveform(length=total_points, channel=channels[1])
readout_template.wave[
readout_start_points:readout_start_points + readout_points] = 1
readout_template.add_marker(1, readout_marker_start_points, marker_points)
delay_array_points = np.round(
t1_sequence.variable_array / resolution).astype(np.int)
pulse_mod_points = int((sigma_cuttoff * 4 + stop) / resolution)
for i, delay_points in enumerate(delay_array_points):
pi_pulse = gaussian_array(pi_duration, sigma_cuttoff, pi_amp,
p_dict['sample_rate'])
element = Element()
qubit_waveform = Waveform(length=total_points, channel=channels[0])
readout_waveform = readout_template.copy()
if i == 0:
readout_waveform.add_marker(2, 0, marker_points)
qubit_start = int(pulse_end_points - delay_points - len(pi_pulse))
qubit_end = int(qubit_start + len(pi_pulse))
qubit_waveform.wave[qubit_start:qubit_end] = pi_pulse
if pulse_mod:
qubit_waveform.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
element.add_waveform(qubit_waveform)
element.add_waveform(readout_waveform)
t1_sequence.add_element(element)
t1_sequence.check()
return t1_sequence
################################################################
# Ramsey and T2
################################################################
def make_ramsey_square_sequence_points(pi_duration, pi_half_amp=1 / 2, start=0,
stop=200e-9, step=2e-9, channels=[1, 4],
pulse_mod=False):
"""
Two square pulses on qubit of pi duration of half pi amplitude separated
by varying duration. Square readout with markers (1 for readout start,
2 for seq start)
"""
ramsey_sequence = Sequence(name='ramsey',
variable='drive_drive_delay',
variable_label='Delay',
variable_unit='s',
step=step,
start=start,
stop=stop)
p_dict = get_calibration_dict()
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
qubit_points = round(pi_duration / resolution)
total_points = round(p_dict['cycle_duration'] / resolution)
readout_template = Waveform(length=total_points, channel=channels[1])
readout_template.wave[
readout_start_points:readout_start_points + readout_points] = 1
readout_template.add_marker(1, readout_marker_start_points, marker_points)
delay_array_points = np.round(
ramsey_sequence.variable_array / resolution).astype(np.int)
pulse_mod_points = int((stop + 4 * pi_duration) / resolution)
for i, delay_points in enumerate(delay_array_points):
element = Element()
qubit_waveform = Waveform(length=total_points, channel=channels[0])
readout_waveform = readout_template.copy()
if i == 0:
readout_waveform.add_marker(2, 0, marker_points)
qubit_start_first = int(
pulse_end_points - delay_points - 2 * qubit_points)
qubit_end_first = int(qubit_start_first + qubit_points)
qubit_start_second = int(pulse_end_points - qubit_points)
qubit_end_second = int(qubit_start_second + qubit_points)
qubit_waveform.wave[qubit_start_first:qubit_end_first] = pi_half_amp
qubit_waveform.wave[qubit_start_second:qubit_end_second] = pi_half_amp
if pulse_mod:
qubit_waveform.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
element.add_waveform(readout_waveform)
ramsey_sequence.add_element(element)
ramsey_sequence.check()
return ramsey_sequence
def make_ramsey_gaussian_sequence(pi_duration, sigma_cuttoff,
pi_half_amp=1 / 2, start=0, stop=200e-9,
step=2e-9, channels=[1, 4], pulse_mod=False):
"""
Two Gaussian pulses on qubit of pi duration of half pi amplitude separated
by varying duration. Square readout with markers (1 for readout start,
2 for seq start)
"""
ramsey_sequence = Sequence(name='ramsey',
variable='drive_drive_delay',
variable_label='Delay',
variable_unit='s',
step=step,
start=start,
stop=stop)
p_dict = get_calibration_dict()
pi_half_amp = p_dict['pi_half_pulse_amp']
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
total_points = round(p_dict['cycle_duration'] / resolution)
readout_template = Waveform(length=total_points, channel=channels[1])
readout_template.wave[
readout_start_points:readout_start_points + readout_points] = 1
readout_template.add_marker(1, readout_marker_start_points, marker_points)
delay_array_points = np.round(
ramsey_sequence.variable_array / resolution).astype(np.int)
pi_half_pulse = gaussian_array(pi_duration, sigma_cuttoff, pi_half_amp,
p_dict['sample_rate'])
pulse_mod_points = int((stop + 4 * pi_duration * sigma_cuttoff) /
resolution)
for i, delay_points in enumerate(delay_array_points):
element = Element()
qubit_waveform = Waveform(length=total_points, channel=channels[0])
readout_waveform = readout_template.copy()
if i == 0:
readout_waveform.add_marker(2, 0, marker_points)
qubit_start_first = int(pulse_end_points - delay_points - 2 *
len(pi_half_pulse))
qubit_end_first = int(qubit_start_first + len(pi_half_pulse))
qubit_start_second = int(pulse_end_points - len(pi_half_pulse))
qubit_end_second = int(qubit_start_second + len(pi_half_pulse))
qubit_waveform.wave[qubit_start_first:qubit_end_first] = pi_half_pulse
qubit_waveform.wave[
qubit_start_second:qubit_end_second] = pi_half_pulse
if pulse_mod:
qubit_waveform.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
element.add_waveform(qubit_waveform)
element.add_waveform(readout_waveform)
ramsey_sequence.add_element(element)
ramsey_sequence.check()
return ramsey_sequence
##################################################################
# AllXY
##################################################################
def make_allxy_seq(pi_duration, sigma_cuttoff, channels=[1, 2, 4],
pulse_mod=False):
"""
Oh dear.
"""
p_dict = get_calibration_dict()
pi_amp = p_dict['pi_pulse_amp']
pi_half_amp = p_dict['pi_half_pulse_amp']
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
total_points = round(p_dict['cycle_duration'] / resolution)
pi_pulse = gaussian_array(pi_duration, sigma_cuttoff, pi_amp,
p_dict['sample_rate'])
pi_half_pulse = gaussian_array(pi_duration, sigma_cuttoff, pi_half_amp,
p_dict['sample_rate'])
readout_waveform = Waveform(length=total_points, channel=channels[2])
readout_waveform.wave[
readout_start_points:readout_start_points + readout_points] = 1
readout_waveform.add_marker(1, readout_marker_start_points, marker_points)
pulse_points = len(pi_pulse)
pulse_mod_points = int(pi_duration * sigma_cuttoff * 2 / resolution)
seq = Sequence(name='allxy', variable='operation_combination',
variable_label='Operation Combination Id')
elem_0 = Element()
x_waveform_0 = Waveform(length=total_points, channel=1)
y_waveform_0 = Waveform(length=total_points, channel=2)
readout_first = readout_waveform.copy()
readout_first.add_marker(2, 0, marker_points)
if pulse_mod:
x_waveform_0.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_0.add_waveform(x_waveform_0)
elem_0.add_waveform(y_waveform_0)
elem_0.add_waveform(readout_first)
seq.add_element(elem_0)
elem_1 = Element()
x_waveform_1 = Waveform(length=total_points, channel=channels[0])
y_waveform_1 = Waveform(length=total_points, channel=channels[1])
x_waveform_1.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
x_waveform_1.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulse
if pulse_mod:
x_waveform_1.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_1.add_waveform(x_waveform_1)
elem_1.add_waveform(y_waveform_1)
elem_1.add_waveform(readout_first)
seq.add_element(elem_1)
elem_2 = Element()
x_waveform_2 = Waveform(length=total_points, channel=channels[0])
y_waveform_2 = Waveform(length=total_points, channel=channels[1])
y_waveform_2.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
y_waveform_2.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulse
if pulse_mod:
x_waveform_2.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_2.add_waveform(x_waveform_2)
elem_2.add_waveform(y_waveform_2)
elem_2.add_waveform(readout_waveform)
seq.add_element(elem_2)
elem_3 = Element()
x_waveform_3 = Waveform(length=total_points, channel=channels[0])
y_waveform_3 = Waveform(length=total_points, channel=channels[1])
x_waveform_3.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
y_waveform_3.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulse
if pulse_mod:
x_waveform_3.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_3.add_waveform(x_waveform_3)
elem_3.add_waveform(y_waveform_3)
elem_3.add_waveform(readout_waveform)
seq.add_element(elem_3)
elem_4 = Element()
x_waveform_4 = Waveform(length=total_points, channel=channels[0])
y_waveform_4 = Waveform(length=total_points, channel=channels[1])
y_waveform_4.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
x_waveform_4.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulse
if pulse_mod:
x_waveform_4.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_4.add_waveform(x_waveform_4)
elem_4.add_waveform(y_waveform_4)
elem_4.add_waveform(readout_waveform)
seq.add_element(elem_4)
elem_5 = Element()
x_waveform_5 = Waveform(length=total_points, channel=channels[0])
y_waveform_5 = Waveform(length=total_points, channel=channels[1])
x_waveform_5.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
if pulse_mod:
x_waveform_5.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_5.add_waveform(x_waveform_5)
elem_5.add_waveform(y_waveform_5)
elem_5.add_waveform(readout_waveform)
seq.add_element(elem_5)
elem_6 = Element()
x_waveform_6 = Waveform(length=total_points, channel=channels[0])
y_waveform_6 = Waveform(length=total_points, channel=channels[1])
y_waveform_6.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
if pulse_mod:
x_waveform_6.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_6.add_waveform(x_waveform_6)
elem_6.add_waveform(y_waveform_6)
elem_6.add_waveform(readout_waveform)
seq.add_element(elem_6)
elem_7 = Element()
x_waveform_7 = Waveform(length=total_points, channel=channels[0])
y_waveform_7 = Waveform(length=total_points, channel=channels[1])
x_waveform_7.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
y_waveform_7.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulse
if pulse_mod:
x_waveform_7.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_7.add_waveform(x_waveform_7)
elem_7.add_waveform(y_waveform_7)
elem_7.add_waveform(readout_waveform)
seq.add_element(elem_7)
elem_8 = Element()
x_waveform_8 = Waveform(length=total_points, channel=channels[0])
y_waveform_8 = Waveform(length=total_points, channel=channels[1])
y_waveform_8.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
x_waveform_8.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulse
if pulse_mod:
x_waveform_8.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_8.add_waveform(x_waveform_8)
elem_8.add_waveform(y_waveform_8)
elem_8.add_waveform(readout_waveform)
seq.add_element(elem_8)
elem_9 = Element()
x_waveform_9 = Waveform(length=total_points, channel=channels[0])
y_waveform_9 = Waveform(length=total_points, channel=channels[1])
x_waveform_9.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
y_waveform_9.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulse
if pulse_mod:
x_waveform_9.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_9.add_waveform(x_waveform_9)
elem_9.add_waveform(y_waveform_9)
elem_9.add_waveform(readout_waveform)
seq.add_element(elem_9)
elem_10 = Element()
x_waveform_10 = Waveform(length=total_points, channel=channels[0])
y_waveform_10 = Waveform(length=total_points, channel=channels[1])
y_waveform_10.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
x_waveform_10.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulse
if pulse_mod:
x_waveform_10.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_10.add_waveform(x_waveform_10)
elem_10.add_waveform(y_waveform_10)
elem_10.add_waveform(readout_waveform)
seq.add_element(elem_10)
elem_11 = Element()
x_waveform_11 = Waveform(length=total_points, channel=channels[0])
y_waveform_11 = Waveform(length=total_points, channel=channels[1])
x_waveform_11.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
y_waveform_11.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulse
if pulse_mod:
x_waveform_11.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_11.add_waveform(x_waveform_11)
elem_11.add_waveform(y_waveform_11)
elem_11.add_waveform(readout_waveform)
seq.add_element(elem_11)
elem_12 = Element()
x_waveform_12 = Waveform(length=total_points, channel=channels[0])
y_waveform_12 = Waveform(length=total_points, channel=channels[1])
y_waveform_12.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
x_waveform_12.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulse
if pulse_mod:
x_waveform_12.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_12.add_waveform(x_waveform_12)
elem_12.add_waveform(y_waveform_12)
elem_12.add_waveform(readout_waveform)
seq.add_element(elem_12)
elem_13 = Element()
x_waveform_13 = Waveform(length=total_points, channel=channels[0])
y_waveform_13 = Waveform(length=total_points, channel=channels[1])
x_waveform_13.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
x_waveform_13.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulse
if pulse_mod:
x_waveform_13.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_13.add_waveform(x_waveform_13)
elem_13.add_waveform(y_waveform_13)
elem_13.add_waveform(readout_waveform)
seq.add_element(elem_13)
elem_14 = Element()
x_waveform_14 = Waveform(length=total_points, channel=channels[0])
y_waveform_14 = Waveform(length=total_points, channel=channels[1])
x_waveform_14.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
x_waveform_14.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulse
if pulse_mod:
x_waveform_14.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_14.add_waveform(x_waveform_14)
elem_14.add_waveform(y_waveform_14)
elem_14.add_waveform(readout_waveform)
seq.add_element(elem_14)
elem_15 = Element()
x_waveform_15 = Waveform(length=total_points, channel=channels[0])
y_waveform_15 = Waveform(length=total_points, channel=channels[1])
y_waveform_15.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
y_waveform_15.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulse
if pulse_mod:
x_waveform_15.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_15.add_waveform(x_waveform_15)
elem_15.add_waveform(y_waveform_15)
elem_15.add_waveform(readout_waveform)
seq.add_element(elem_15)
elem_16 = Element()
x_waveform_16 = Waveform(length=total_points, channel=channels[0])
y_waveform_16 = Waveform(length=total_points, channel=channels[1])
y_waveform_16.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
y_waveform_16.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulse
if pulse_mod:
x_waveform_16.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_16.add_waveform(x_waveform_16)
elem_16.add_waveform(y_waveform_16)
elem_16.add_waveform(readout_waveform)
seq.add_element(elem_16)
elem_17 = Element()
x_waveform_17 = Waveform(length=total_points, channel=channels[0])
y_waveform_17 = Waveform(length=total_points, channel=channels[1])
x_waveform_17.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
if pulse_mod:
x_waveform_17.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_17.add_waveform(x_waveform_17)
elem_17.add_waveform(y_waveform_17)
elem_17.add_waveform(readout_waveform)
seq.add_element(elem_17)
elem_18 = Element()
x_waveform_18 = Waveform(length=total_points, channel=channels[0])
y_waveform_18 = Waveform(length=total_points, channel=channels[1])
y_waveform_18.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulse
if pulse_mod:
x_waveform_18.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_18.add_waveform(x_waveform_18)
elem_18.add_waveform(y_waveform_18)
elem_18.add_waveform(readout_waveform)
seq.add_element(elem_18)
elem_19 = Element()
x_waveform_19 = Waveform(length=total_points, channel=channels[0])
y_waveform_19 = Waveform(length=total_points, channel=channels[1])
x_waveform_19.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
x_waveform_19.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulse
if pulse_mod:
x_waveform_19.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_19.add_waveform(x_waveform_19)
elem_19.add_waveform(y_waveform_19)
elem_19.add_waveform(readout_waveform)
seq.add_element(elem_19)
elem_20 = Element()
x_waveform_20 = Waveform(length=total_points, channel=channels[0])
y_waveform_20 = Waveform(length=total_points, channel=channels[1])
y_waveform_20.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulse
y_waveform_20.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulse
if pulse_mod:
x_waveform_20.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_20.add_waveform(x_waveform_20)
elem_20.add_waveform(y_waveform_20)
elem_20.add_waveform(readout_waveform)
seq.add_element(elem_20)
return seq
def make_allxySSB_seq(pi_duration, pi_amp, SSBfreq, sigma_cuttoff,
channels=[1, 2, 4], pulse_mod=False):
"""
Oh dear. Part 2
"""
p_dict = get_calibration_dict()
pi_amp = p_dict['pi_pulse_amp']
pi_half_amp = p_dict['pi_half_pulse_amp']
resolution = 1 / p_dict['sample_rate']
readout_start = p_dict['pulse_end'] + p_dict['pulse_readout_delay']
readout_marker_start = readout_start - p_dict['marker_readout_delay']
readout_start_points = round(readout_start / resolution)
readout_marker_start_points = round(readout_marker_start / resolution)
readout_points = round(p_dict['readout_time'] / resolution)
pulse_end_points = round(p_dict['pulse_end'] / resolution)
marker_points = round(p_dict['marker_time'] / resolution)
total_points = round(p_dict['cycle_duration'] / resolution)
pi_pulseI_x = cos_gaussian_array(pi_duration, sigma_cuttoff, SSBfreq,
pi_amp, p_dict['sample_rate'])
pi_pulseQ_x = sin_gaussian_array(pi_duration, sigma_cuttoff, SSBfreq,
pi_amp, p_dict['sample_rate'],
positive=False)
pi_half_pulseI_x = cos_gaussian_array(pi_duration, sigma_cuttoff, SSBfreq,
pi_half_amp, p_dict['sample_rate'])
pi_half_pulseQ_x = sin_gaussian_array(pi_duration, sigma_cuttoff, SSBfreq,
pi_half_amp, p_dict['sample_rate'],
positive=False)
pi_pulseI_y = sin_gaussian_array(p_dict['sample_rate'], pi_duration,
sigma_cuttoff, pi_amp, SSBfreq)
pi_pulseQ_y = cos_gaussian_array(p_dict['sample_rate'], pi_duration,
sigma_cuttoff, pi_amp, SSBfreq)
pi_half_pulseI_y = sin_gaussian_array(p_dict['sample_rate'],
pi_duration, sigma_cuttoff,
pi_half_amp, SSBfreq)
pi_half_pulseQ_y = cos_gaussian_array(p_dict['sample_rate'],
pi_duration, sigma_cuttoff,
pi_half_amp, SSBfreq)
readout_waveform = Waveform(length=total_points, channel=channels[2])
readout_waveform.wave[
readout_start_points:readout_start_points + readout_points] = 1
readout_waveform.add_marker(1, readout_marker_start_points, marker_points)
pulse_points = len(pi_pulseI_x)
pulse_mod_points = int(pi_duration * sigma_cuttoff * 4 / resolution)
seq = Sequence(name='allxy', variable='operation_combination',
variable_label='Operation Combination Id')
elem_0 = Element()
x_waveform_0 = Waveform(length=total_points, channel=1)
y_waveform_0 = Waveform(length=total_points, channel=2)
readout_first = readout_waveform.copy()
readout_first.marker_2[10:10 + marker_points] = 1
if pulse_mod:
x_waveform_0.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_0.add_waveform(x_waveform_0)
elem_0.add_waveform(y_waveform_0)
elem_0.add_waveform(readout_first)
seq.add_element(elem_0)
elem_1 = Element()
x_waveform_1 = Waveform(length=total_points, channel=channels[0])
y_waveform_1 = Waveform(length=total_points, channel=channels[1])
x_waveform_1.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_x
y_waveform_1.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_x
x_waveform_1.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_x
y_waveform_1.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseQ_x
if pulse_mod:
x_waveform_1.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_1.add_waveform(x_waveform_1)
elem_1.add_waveform(y_waveform_1)
elem_1.add_waveform(readout_first)
seq.add_element(elem_1)
elem_2 = Element()
x_waveform_2 = Waveform(length=total_points, channel=channels[0])
y_waveform_2 = Waveform(length=total_points, channel=channels[1])
x_waveform_2.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_y
y_waveform_2.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_y
x_waveform_2.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_y
y_waveform_2.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseQ_y
if pulse_mod:
x_waveform_2.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_2.add_waveform(x_waveform_2)
elem_2.add_waveform(y_waveform_2)
elem_2.add_waveform(readout_waveform)
seq.add_element(elem_2)
elem_3 = Element()
x_waveform_3 = Waveform(length=total_points, channel=channels[0])
y_waveform_3 = Waveform(length=total_points, channel=channels[1])
x_waveform_3.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_x
y_waveform_3.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_x
x_waveform_3.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_y
y_waveform_3.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseQ_y
if pulse_mod:
x_waveform_3.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_3.add_waveform(x_waveform_3)
elem_3.add_waveform(y_waveform_3)
elem_3.add_waveform(readout_waveform)
seq.add_element(elem_3)
elem_4 = Element()
x_waveform_4 = Waveform(length=total_points, channel=channels[0])
y_waveform_4 = Waveform(length=total_points, channel=channels[1])
x_waveform_4.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_y
y_waveform_4.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_y
x_waveform_4.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_x
y_waveform_4.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseQ_x
if pulse_mod:
x_waveform_4.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_4.add_waveform(x_waveform_4)
elem_4.add_waveform(y_waveform_4)
elem_4.add_waveform(readout_waveform)
seq.add_element(elem_4)
elem_5 = Element()
x_waveform_5 = Waveform(length=total_points, channel=channels[0])
y_waveform_5 = Waveform(length=total_points, channel=channels[1])
x_waveform_5.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_x
y_waveform_5.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseQ_x
if pulse_mod:
x_waveform_5.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_5.add_waveform(x_waveform_5)
elem_5.add_waveform(y_waveform_5)
elem_5.add_waveform(readout_waveform)
seq.add_element(elem_5)
elem_6 = Element()
x_waveform_6 = Waveform(length=total_points, channel=channels[0])
y_waveform_6 = Waveform(length=total_points, channel=channels[1])
x_waveform_6.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_y
y_waveform_6.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseQ_y
if pulse_mod:
x_waveform_6.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_6.add_waveform(x_waveform_6)
elem_6.add_waveform(y_waveform_6)
elem_6.add_waveform(readout_waveform)
seq.add_element(elem_6)
elem_7 = Element()
x_waveform_7 = Waveform(length=total_points, channel=channels[0])
y_waveform_7 = Waveform(length=total_points, channel=channels[1])
x_waveform_7.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_x
y_waveform_7.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseQ_x
x_waveform_7.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_y
y_waveform_7.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_y
if pulse_mod:
x_waveform_7.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_7.add_waveform(x_waveform_7)
elem_7.add_waveform(y_waveform_7)
elem_7.add_waveform(readout_waveform)
seq.add_element(elem_7)
elem_8 = Element()
x_waveform_8 = Waveform(length=total_points, channel=channels[0])
y_waveform_8 = Waveform(length=total_points, channel=channels[1])
x_waveform_8.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_y
y_waveform_8.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseQ_y
x_waveform_8.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_x
y_waveform_8.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_x
if pulse_mod:
x_waveform_8.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_8.add_waveform(x_waveform_8)
elem_8.add_waveform(y_waveform_8)
elem_8.add_waveform(readout_waveform)
seq.add_element(elem_8)
elem_9 = Element()
x_waveform_9 = Waveform(length=total_points, channel=channels[0])
y_waveform_9 = Waveform(length=total_points, channel=channels[1])
x_waveform_9.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_x
y_waveform_9.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseQ_x
x_waveform_9.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_y
y_waveform_9.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_y
if pulse_mod:
x_waveform_9.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_9.add_waveform(x_waveform_9)
elem_9.add_waveform(y_waveform_9)
elem_9.add_waveform(readout_waveform)
seq.add_element(elem_9)
elem_10 = Element()
x_waveform_10 = Waveform(length=total_points, channel=channels[0])
y_waveform_10 = Waveform(length=total_points, channel=channels[1])
x_waveform_10.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_y
y_waveform_10.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseQ_y
x_waveform_10.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_y
y_waveform_10.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_y
if pulse_mod:
x_waveform_10.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_10.add_waveform(x_waveform_10)
elem_10.add_waveform(y_waveform_10)
elem_10.add_waveform(readout_waveform)
seq.add_element(elem_10)
elem_11 = Element()
x_waveform_11 = Waveform(length=total_points, channel=channels[0])
y_waveform_11 = Waveform(length=total_points, channel=channels[1])
x_waveform_11.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_x
y_waveform_11.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_x
x_waveform_11.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_y
y_waveform_11.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_y
if pulse_mod:
x_waveform_11.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_11.add_waveform(x_waveform_11)
elem_11.add_waveform(y_waveform_11)
elem_11.add_waveform(readout_waveform)
seq.add_element(elem_11)
elem_12 = Element()
x_waveform_12 = Waveform(length=total_points, channel=channels[0])
y_waveform_12 = Waveform(length=total_points, channel=channels[1])
x_waveform_12.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_y
y_waveform_12.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_y
x_waveform_12.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_x
y_waveform_12.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_x
if pulse_mod:
x_waveform_12.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_12.add_waveform(x_waveform_12)
elem_12.add_waveform(y_waveform_12)
elem_12.add_waveform(readout_waveform)
seq.add_element(elem_12)
elem_13 = Element()
x_waveform_13 = Waveform(length=total_points, channel=channels[0])
y_waveform_13 = Waveform(length=total_points, channel=channels[1])
x_waveform_13.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_x
y_waveform_13.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseQ_x
x_waveform_13.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_x
y_waveform_13.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_x
if pulse_mod:
x_waveform_13.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_13.add_waveform(x_waveform_13)
elem_13.add_waveform(y_waveform_13)
elem_13.add_waveform(readout_waveform)
seq.add_element(elem_13)
elem_14 = Element()
x_waveform_14 = Waveform(length=total_points, channel=channels[0])
y_waveform_14 = Waveform(length=total_points, channel=channels[1])
x_waveform_14.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_x
y_waveform_14.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_x
x_waveform_14.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_x
y_waveform_14.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_x
if pulse_mod:
x_waveform_14.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_14.add_waveform(x_waveform_14)
elem_14.add_waveform(y_waveform_14)
elem_14.add_waveform(readout_waveform)
seq.add_element(elem_14)
elem_15 = Element()
x_waveform_15 = Waveform(length=total_points, channel=channels[0])
y_waveform_15 = Waveform(length=total_points, channel=channels[1])
x_waveform_15.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_y
y_waveform_15.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseQ_y
x_waveform_15.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_y
y_waveform_15.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_pulseI_y
if pulse_mod:
x_waveform_15.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_15.add_waveform(x_waveform_15)
elem_15.add_waveform(y_waveform_15)
elem_15.add_waveform(readout_waveform)
seq.add_element(elem_15)
elem_16 = Element()
x_waveform_16 = Waveform(length=total_points, channel=channels[0])
y_waveform_16 = Waveform(length=total_points, channel=channels[1])
x_waveform_16.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_y
y_waveform_16.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_y
x_waveform_16.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_y
y_waveform_16.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_y
if pulse_mod:
x_waveform_16.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_16.add_waveform(x_waveform_16)
elem_16.add_waveform(y_waveform_16)
elem_16.add_waveform(readout_waveform)
seq.add_element(elem_16)
elem_17 = Element()
x_waveform_17 = Waveform(length=total_points, channel=channels[0])
y_waveform_17 = Waveform(length=total_points, channel=channels[1])
x_waveform_17.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_x
y_waveform_17.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_x
if pulse_mod:
x_waveform_17.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_17.add_waveform(x_waveform_17)
elem_17.add_waveform(y_waveform_17)
elem_17.add_waveform(readout_waveform)
seq.add_element(elem_17)
elem_18 = Element()
x_waveform_18 = Waveform(length=total_points, channel=channels[0])
y_waveform_18 = Waveform(length=total_points, channel=channels[1])
x_waveform_18.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseI_y
y_waveform_18.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_pulseQ_y
if pulse_mod:
x_waveform_18.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_18.add_waveform(x_waveform_18)
elem_18.add_waveform(y_waveform_18)
elem_18.add_waveform(readout_waveform)
seq.add_element(elem_18)
elem_19 = Element()
x_waveform_19 = Waveform(length=total_points, channel=channels[0])
y_waveform_19 = Waveform(length=total_points, channel=channels[1])
x_waveform_19.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_x
y_waveform_19.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_x
x_waveform_19.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_x
y_waveform_19.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_x
if pulse_mod:
x_waveform_19.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_19.add_waveform(x_waveform_19)
elem_19.add_waveform(y_waveform_19)
elem_19.add_waveform(readout_waveform)
seq.add_element(elem_19)
elem_20 = Element()
x_waveform_20 = Waveform(length=total_points, channel=channels[0])
y_waveform_20 = Waveform(length=total_points, channel=channels[1])
x_waveform_20.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_y
y_waveform_20.wave[pulse_end_points - 2 * pulse_points:
pulse_end_points - pulse_points] = pi_half_pulseI_y
x_waveform_20.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_y
y_waveform_20.wave[pulse_end_points - pulse_points:
pulse_end_points] = pi_half_pulseI_y
if pulse_mod:
x_waveform_20.add_marker(1, pulse_end_points - pulse_mod_points,
pulse_mod_points)
elem_20.add_waveform(x_waveform_20)
elem_20.add_waveform(y_waveform_20)
elem_20.add_waveform(readout_waveform)
seq.add_element(elem_20)
return seq
| QCoDeS/Qcodes-contrib | qdev_transmon_helpers/sequencing/legacy.py | Python | mit | 61,861 | [
"Gaussian"
] | d8e97d67590b29efd5f195c730e2c86cb6b64ccda8444f743d002d385dbac537 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution # pylint: disable=line-too-long
from tensorflow.contrib.distributions.python.ops import kullback_leibler # pylint: disable=line-too-long
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Normal(distribution.Distribution):
"""The scalar Normal distribution with mean and stddev parameters mu, sigma.
#### Mathematical details
The PDF of this distribution is:
```f(x) = sqrt(1/(2*pi*sigma^2)) exp(-(x-mu)^2/(2*sigma^2))```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Normal distribution.
dist = tf.contrib.distributions.Normal(mu=0, sigma=3)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tf.contrib.distributions.Normal(mu=[1, 2.], sigma=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.pdf([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample(3)
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tf.contrib.distributions.Normal(mu=1, sigma=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.pdf(3.0)
```
"""
def __init__(
self, mu, sigma, strict=True, strict_statistics=True, name="Normal"):
"""Construct Normal distributions with mean and stddev `mu` and `sigma`.
The parameters `mu` and `sigma` must be shaped in a way that supports
broadcasting (e.g. `mu + sigma` is a valid operation).
Args:
mu: `float` or `double` tensor, the means of the distribution(s).
sigma: `float` or `double` tensor, the stddevs of the distribution(s).
sigma must contain only positive values.
strict: Whether to assert that `sigma > 0`. If `strict` is False,
correct output is not guaranteed when input is invalid.
strict_statistics: Boolean, default True. If True, raise an exception if
a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
If False, batch members with valid parameters leading to undefined
statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if mu and sigma are different dtypes.
"""
self._strict_statistics = strict_statistics
self._strict = strict
with ops.op_scope([mu, sigma], name):
mu = ops.convert_to_tensor(mu)
sigma = ops.convert_to_tensor(sigma)
with ops.control_dependencies(
[check_ops.assert_positive(sigma)] if strict else []):
self._name = name
self._mu = array_ops.identity(mu, name="mu")
self._sigma = array_ops.identity(sigma, name="sigma")
self._batch_shape = self._ones().get_shape()
self._event_shape = tensor_shape.TensorShape([])
contrib_tensor_util.assert_same_float_dtype((mu, sigma))
@property
def strict_statistics(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._strict_statistics
@property
def strict(self):
"""Boolean describing behavior on invalid input."""
return self._strict
@property
def name(self):
return self._name
@property
def dtype(self):
return self._mu.dtype
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op.
Returns:
`Tensor` `batch_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return array_ops.shape(self._ones())
def get_batch_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch shape
"""
return self._batch_shape
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op.
Returns:
`Tensor` `event_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event shape
"""
return self._event_shape
@property
def mu(self):
"""Distribution parameter for the mean."""
return self._mu
@property
def sigma(self):
"""Distribution parameter for standard deviation."""
return self._sigma
def mean(self, name="mean"):
"""Mean of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._sigma, self._mu], name):
return self._mu * array_ops.ones_like(self._sigma)
def mode(self, name="mode"):
"""Mode of this distribution."""
return self.mean(name="mode")
def std(self, name="std"):
"""Standard deviation of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._sigma, self._mu], name):
return self._sigma * array_ops.ones_like(self._mu)
def variance(self, name="variance"):
"""Variance of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return math_ops.square(self.std())
def log_prob(self, x, name="log_prob"):
"""Log prob of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
log_2_pi = constant_op.constant(math.log(2 * math.pi), dtype=self.dtype)
return (-0.5*log_2_pi - math_ops.log(self._sigma)
-0.5*math_ops.square((x - self._mu) / self._sigma))
def cdf(self, x, name="cdf"):
"""CDF of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
# TODO(ebrevdo): wrap this in a Defun with a custom Defun
# gradient because the analytic gradient may be faster than
# automatic differentiation.
return (0.5 + 0.5*math_ops.erf(
1.0/(math.sqrt(2.0) * self._sigma)*(x - self._mu)))
def log_cdf(self, x, name="log_cdf"):
"""Log CDF of observations `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, x], name):
return math_ops.log(self.cdf(x))
def prob(self, x, name="prob"):
"""The PDF of observations in `x` under these Normal distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `mu` and `sigma`.
name: The name to give this op.
Returns:
prob: tensor of dtype `dtype`, the prob values of `x`.
"""
return super(Normal, self).prob(x, name=name)
def entropy(self, name="entropy"):
"""The entropy of Normal distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma], name):
two_pi_e1 = constant_op.constant(
2 * math.pi * math.exp(1), dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
sigma = self._sigma * array_ops.ones_like(self._mu)
return 0.5 * math_ops.log(two_pi_e1 * math_ops.square(sigma))
def sample(self, n, seed=None, name="sample"):
"""Sample `n` observations from the Normal Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the hyperparameters.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._mu, self._sigma, n], name):
broadcast_shape = (self._mu + self._sigma).get_shape()
n = ops.convert_to_tensor(n)
shape = array_ops.concat(
0, [array_ops.pack([n]), array_ops.shape(self.mean())])
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self._mu.dtype, seed=seed)
# Provide some hints to shape inference
n_val = tensor_util.constant_value(n)
final_shape = tensor_shape.vector(n_val).concatenate(broadcast_shape)
sampled.set_shape(final_shape)
return sampled * self._sigma + self._mu
@property
def is_reparameterized(self):
return True
def _ones(self):
return array_ops.ones_like(self._mu + self._sigma)
def _zeros(self):
return array_ops.zeros_like(self._mu + self._sigma)
@property
def is_continuous(self):
return True
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.op_scope([n_a.mu, n_b.mu], name, "kl_normal_normal"):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.sigma)
s_b_squared = math_ops.square(n_b.sigma)
ratio = s_a_squared / s_b_squared
return (math_ops.square(n_a.mu - n_b.mu) / (two * s_b_squared)
+ half * (ratio - one - math_ops.log(ratio)))
| HaebinShin/tensorflow | tensorflow/contrib/distributions/python/ops/normal.py | Python | apache-2.0 | 12,593 | [
"Gaussian"
] | d0c0c04f01a85302539a0ebb61e06fe5e0a1a60d978342397a681265705bcc39 |
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
import datetime
import random
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.urlresolvers import reverse
from wger.core.demo import create_demo_entries
from wger.core.demo import create_temporary_user
from wger.core.tests.base_testcase import WorkoutManagerTestCase
from wger.manager.models import Day
from wger.manager.models import Schedule
from wger.manager.models import ScheduleStep
from wger.manager.models import Workout
from wger.manager.models import WorkoutLog
from wger.nutrition.models import Meal
from wger.nutrition.models import NutritionPlan
from wger.weight.models import WeightEntry
class DemoUserTestCase(WorkoutManagerTestCase):
'''
Tests the demo user
'''
@staticmethod
def count_temp_users():
'''
Counts the number of temporary users
'''
return User.objects.filter(userprofile__is_temporary=1).count()
def test_demo_data(self):
'''
Tests that the helper function creates demo data (workout, etc.)
for the demo users
'''
self.client.get(reverse('core:dashboard'))
self.assertEqual(self.count_temp_users(), 2)
user = User.objects.get(pk=User.objects.latest('id').id)
self.assertEqual(user.userprofile.is_temporary, True)
self.assertEqual(Workout.objects.filter(user=user).count(), 0)
create_demo_entries(user)
# Workout
self.assertEqual(Workout.objects.filter(user=user).count(), 4)
self.assertEqual(Day.objects.filter(training__user=user).count(), 2)
self.assertEqual(WorkoutLog.objects.filter(user=user).count(), 56)
# Schedule
self.assertEqual(Schedule.objects.filter(user=user).count(), 3)
self.assertEqual(ScheduleStep.objects.filter(schedule__user=user).count(), 6)
# Nutrition
self.assertEqual(NutritionPlan.objects.filter(user=user).count(), 1)
self.assertEqual(Meal.objects.filter(plan__user=user).count(), 3)
# Body weight
self.assertEqual(WeightEntry.objects.filter(user=user).count(), 19)
def test_demo_data_body_weight(self):
'''
Tests that the helper function that creates demo data filters out
existing dates for the weight entries
'''
self.client.get(reverse('core:dashboard'))
self.assertEqual(self.count_temp_users(), 2)
user = User.objects.get(pk=4)
temp = []
for i in range(1, 5):
creation_date = datetime.date.today() - datetime.timedelta(days=i)
entry = WeightEntry(user=user,
weight=80 + 0.5 * i + random.randint(1, 3),
date=creation_date)
temp.append(entry)
WeightEntry.objects.bulk_create(temp)
create_demo_entries(user)
# Body weight
self.assertEqual(WeightEntry.objects.filter(user=user).count(), 19)
def test_demo_user(self):
'''
Tests that temporary users are automatically created when visiting
URLs that need a user present
'''
self.assertEqual(self.count_temp_users(), 1)
# These pages should not create a user
self.client.get(reverse('core:contact'))
self.assertEqual(self.count_temp_users(), 1)
self.client.get(reverse('software:code'))
self.assertEqual(self.count_temp_users(), 1)
self.client.get(reverse('exercise:exercise:overview'))
self.assertEqual(self.count_temp_users(), 1)
self.client.get(reverse('nutrition:ingredient:list'))
self.assertEqual(self.count_temp_users(), 1)
self.user_logout()
self.client.get(reverse('manager:workout:overview'))
self.assertEqual(self.count_temp_users(), 1)
self.user_logout()
reverse('weight:overview', kwargs={'username': 'test'})
self.assertEqual(self.count_temp_users(), 1)
self.user_logout()
self.client.get(reverse('nutrition:plan:overview'))
self.assertEqual(self.count_temp_users(), 1)
# This page will create one
self.client.get(reverse('core:dashboard'))
self.assertEqual(self.count_temp_users(), 2)
# The new user is automatically logged in, so no new user is created
# after the first visit
self.client.get(reverse('core:dashboard'))
self.assertEqual(self.count_temp_users(), 2)
def test_demo_user_notice(self):
'''
Tests that demo users see a notice on every page
'''
demo_notice_text = 'You are using a guest account'
self.user_login('demo')
self.assertContains(self.client.get(reverse('core:dashboard')), demo_notice_text)
self.assertContains(self.client.get(reverse('manager:workout:overview')),
demo_notice_text)
self.assertContains(self.client.get(reverse('exercise:exercise:overview')),
demo_notice_text)
self.assertContains(self.client.get(reverse('exercise:muscle:overview')), demo_notice_text)
self.assertContains(self.client.get(reverse('nutrition:plan:overview')),
demo_notice_text)
self.assertContains(self.client.get(reverse('software:issues')), demo_notice_text)
self.assertContains(self.client.get(reverse('software:license')), demo_notice_text)
def test_command_delete_old_users(self):
'''
Tests that old demo users are deleted by the management command
'''
# Create some new demo users
for i in range(0, 15):
create_temporary_user()
User.objects.filter().update(date_joined='2013-01-01 00:00+01:00')
# These ones keep the date_joined field
create_temporary_user()
create_temporary_user()
# Check and delete
self.assertEqual(self.count_temp_users(), 18)
call_command('delete-temp-users')
self.assertEqual(self.count_temp_users(), 2)
| DeveloperMal/wger | wger/core/tests/test_temporary_users.py | Python | agpl-3.0 | 6,675 | [
"VisIt"
] | 78c7d09c33ea7fd05214b6499defb530c8139335eec4cd48e5504c2354eff522 |
#!/usr/bin/env python
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""
Bio.Wise contains modules for running and processing the output of
some of the models in the Wise2 package by Ewan Birney available from:
ftp://ftp.ebi.ac.uk/pub/software/unix/wise2/
http://www.ebi.ac.uk/Wise2/
Bio.Wise.psw is for protein Smith-Waterman alignments
Bio.Wise.dnal is for Smith-Waterman DNA alignments
"""
from __future__ import print_function
import os
import re
import sys
from Bio import Wise
_CMDLINE_PSW = ["psw", "-l", "-F"]
_OPTION_GAP_START = "-g"
_OPTION_GAP_EXTENSION = "-e"
_OPTION_SCORES = "-m"
class AlignmentColumnFullException(Exception):
pass
class Alignment(list):
def append(self, column_unit):
try:
self[-1].append(column_unit)
except AlignmentColumnFullException:
list.append(self, AlignmentColumn(column_unit))
except IndexError:
list.append(self, AlignmentColumn(column_unit))
class AlignmentColumn(list):
def _set_kind(self, column_unit):
if self.kind == "SEQUENCE":
self.kind = column_unit.kind
def __init__(self, column_unit):
assert column_unit.unit == 0
self.kind = column_unit.kind
list.__init__(self, [column_unit.column, None])
def __repr__(self):
return "%s(%s, %s)" % (self.kind, self[0], self[1])
def append(self, column_unit):
if self[1] is not None:
raise AlignmentColumnFullException
assert column_unit.unit == 1
self._set_kind(column_unit)
self[1] = column_unit.column
class ColumnUnit(object):
def __init__(self, unit, column, kind):
self.unit = unit
self.column = column
self.kind = kind
def __str__(self):
return "ColumnUnit(unit=%s, column=%s, %s)" % (self.unit, self.column, self.kind)
__repr__ = __str__
_re_unit = re.compile(r"^Unit +([01])- \[ *(-?\d+)- *(-?\d+)\] \[(\w+)\]$")
def parse_line(line):
"""
>>> print(parse_line("Column 0:"))
None
>>> parse_line("Unit 0- [ -1- 0] [SEQUENCE]")
ColumnUnit(unit=0, column=0, SEQUENCE)
>>> parse_line("Unit 1- [ 85- 86] [SEQUENCE]")
ColumnUnit(unit=1, column=86, SEQUENCE)
"""
match = _re_unit.match(line.rstrip())
if not match:
return
return ColumnUnit(int(match.group(1)), int(match.group(3)), match.group(4))
def parse(iterable):
"""
format
Column 0:
Unit 0- [ -1- 0] [SEQUENCE]
Unit 1- [ 85- 86] [SEQUENCE]
means that seq1[0] == seq2[86] (0-based)
"""
alignment = Alignment()
for line in iterable:
try:
if os.environ["WISE_PY_DEBUG"]:
print(line)
except KeyError:
pass
column_unit = parse_line(line)
if column_unit:
alignment.append(column_unit)
return alignment
def align(pair,
scores=None,
gap_start=None,
gap_extension=None,
*args, **keywds):
cmdline = _CMDLINE_PSW[:]
if scores:
cmdline.extend((_OPTION_SCORES, scores))
if gap_start:
cmdline.extend((_OPTION_GAP_START, str(gap_start)))
if gap_extension:
cmdline.extend((_OPTION_GAP_EXTENSION, str(gap_extension)))
temp_file = Wise.align(cmdline, pair, *args, **keywds)
return parse(temp_file)
def main():
print(align(sys.argv[1:3]))
def _test(*args, **keywds):
import doctest
doctest.testmod(sys.modules[__name__], *args, **keywds)
if __name__ == "__main__":
if __debug__:
_test()
main()
| zjuchenyuan/BioWeb | Lib/Bio/Wise/psw.py | Python | mit | 3,715 | [
"Biopython"
] | f6c3532585a9ddb62195d0267651b0acd2a1ba0940db608b7a48b8b6b171c915 |
# ===========================================================================
# eXe
# Copyright 2004-2005, University of Auckland
# Copyright 2004-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
WebsiteExport will export a package as a website of HTML pages
"""
import sys
import logging
import re
import imp
from shutil import rmtree
from exe.engine.path import Path, TempDirPath
from exe.export.pages import uniquifyNames
from exe.export.websitepage import WebsitePage
from exe.engine.resource import Resource
from zipfile import ZipFile, ZIP_DEFLATED
from exe.webui import common
from exe.webui.livepage import allSessionClients
from exe import globals as G
import os
import mimetypes
from tempfile import mkdtemp
from exe.engine.persist import encodeObject
from exe.engine.persistxml import encodeObjectToXML
from twisted.internet import threads
import httplib2
from oauth2client.client import AccessTokenCredentials
from apiclient import errors
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from nevow.livepage import jquote
log = logging.getLogger(__name__)
# ===========================================================================
class WebsiteExport(object):
"""
WebsiteExport will export a package as a website of HTML pages
"""
def __init__(self, config, styleDir, filename, prefix="", report=False):
"""
'stylesDir' is the directory where we can copy the stylesheets from
'outputDir' is the directory that will be [over]written
with the website
"""
self.config = config
self.imagesDir = config.webDir/"images"
self.scriptsDir = config.webDir/"scripts"
self.cssDir = config.webDir/"css"
self.templatesDir = config.webDir/"templates"
self.stylesDir = Path(styleDir)
self.filename = Path(filename)
self.pages = []
self.prefix = prefix
self.report = report
self.styleSecureMode = config.styleSecureMode
def gDriveNotificationStatus(self, client, mesg):
client.sendScript("eXe.controller.eXeViewport.prototype.gDriveNotificationStatus('%s');" % (mesg), filter_func=allSessionClients)
def gDriveNotificationNotice(self, client, mesg, type):
client.sendScript("eXe.controller.eXeViewport.prototype.gDriveNotificationNotice('%s', '%s');" % (mesg, type), filter_func=allSessionClients)
def exportGoogleDrive(self, package, client, auth_token, user_agent):
"""
Creates an authorized HTTP conexion, exports the current package as
'webSite' to a temporary directory and uploads the exported files to
Google Drive
"""
num_total = 0;
file_num = 0;
def finishExport(public_folder, outputDir):
self.gDriveNotificationStatus(client, _(u'Deleting temporary directory'))
rmtree(outputDir)
link_url = 'http://googledrive.com/host/%s'%(public_folder['id'])
link_text = public_folder['title']
self.gDriveNotificationStatus(client, _(u'Package exported to <a href="%s" target="_blank" title="Click to visit exported site">%s</a>') %(link_url,link_text))
return public_folder
def insertFile(public_folder, drive, upload_file, file_num):
"""
Creates the deferred that will upload files to the public folder
once it is created
"""
upload_content_d = threads.deferToThread(uploadContent, public_folder, drive, upload_file, file_num)
upload_content_d.addCallbacks(uploadContent_onSuccess, uploadContent_onFail)
return upload_content_d
def uploadContent(public_folder, drive, upload_file, file_num):
"""
Uploads one file to the given GDrive folder
"""
try :
filepath = os.path.join(self.filename, upload_file)
filetype = mimetypes.guess_type(filepath, False)
if filetype[0] is None :
# Hard-coded types for special cases not detected by mimetypes.guess_type()
if upload_file == 'content.data' :
filetype = ('application/octet-stream', None)
if filetype[0] is not None :
link_url = 'http://googledrive.com/host/%s'%(public_folder['id'])
link_text = public_folder['title']
#self.gDriveNotificationStatus(client, _(u'Package exported to <a href="%s" target="_blank" title="Click to visit exported site">%s</<a>') %(link_url,link_text))
self.gDriveNotificationStatus(client, _(u'Uploading <em>%s</em> to public folder <a href="%s" target="_blank" title="Click to visit exported site">%s</a> (%d/%d)')
%(upload_file, link_url, link_text, file_num, num_total))
mimetype = filetype[0]
meta = {
'title': upload_file,
'mimeType': mimetype,
'parents' : [{'id' : public_folder['id']}]
}
media_body = MediaFileUpload(filepath, mimetype, resumable=True)
drive.files().insert(body=meta, media_body=media_body).execute()
else :
self.gDriveNotificationNotice(client, _(u'File <em>%s</em> skipped, unknown filetype (%d/%d)') %(upload_file, file_num, num_total), 'warning')
except Exception, e :
log.error(str(e))
self.gDriveNotificationNotice(client, _(u'Failed upload of file %s (%d/%d)') % (upload_file, file_num, num_total), 'error')
return public_folder
def uploadContent_onFail(err):
log.error(str(err))
self.gDriveNotificationNotice(client, _(u'Failed exporting to GoogleDrive'), 'error')
return err
def uploadContent_onSuccess(public_folder):
# self.gDriveNotificationStatus(client, _(u'Exported to GoogleDrive: %s') % public_folder['title'])
return public_folder
def publicFolder(drive, folder_name):
"""
Creates a Public Web folder, that can be read as a web site with any
web browser, and populates it with the content of the given directory
"""
# Create public folder
self.gDriveNotificationStatus(client, _(u'Creating public folder to host published web site'))
body = {
'title': folder_name,
'mimeType': 'application/vnd.google-apps.folder'
}
public_folder = drive.files().insert(body=body).execute()
permission = {
'value': '',
'type': 'anyone',
'role': 'reader'
}
drive.permissions().insert(fileId=public_folder['id'], body=permission).execute()
return public_folder
def publicFolder_onFail(err):
log.error(str(err))
self.gDriveNotificationNotice(client, _(u'Failed exporting to GoogleDrive'), 'error')
return err
def publicFolder_onSuccess(public_folder):
self.gDriveNotificationStatus(client, _(u'Created public folder to host published web site: %s') % (public_folder['title']))
return public_folder
try:
# Creates a new temporary dir to export the package to, it will be deleted
# once the export process is finished
self.filename = Path(mkdtemp())
self.gDriveNotificationStatus(client, _(u'Exporting package as web site in: %s') % (jquote(self.filename)))
outputDir = self.export(package)
self.gDriveNotificationStatus(client, _(u'Starting authorized connection to Google Drive API'))
credentials = AccessTokenCredentials(auth_token, user_agent)
ca_certs = None
if hasattr(sys, 'frozen'):
ca_certs = 'cacerts.txt'
http = httplib2.Http(ca_certs=ca_certs)
http = credentials.authorize(http)
drive_service = build('drive', 'v2', http=http)
publicFolder_d = threads.deferToThread(publicFolder, drive_service, package.name)
publicFolder_d.addCallbacks(publicFolder_onSuccess, publicFolder_onFail)
# Loop through files in exportDir, each upload will be called from
# a chained callback function. These callbacks will return a
# deferred so the file N upload will start when file N-1 has
# successfully called its own callback
# (Deferred chain, see: http://krondo.com/?p=2159#attachment_2196)
for upload_file in os.listdir(outputDir):
file_num = file_num + 1
publicFolder_d.addCallback(insertFile, drive_service, upload_file, file_num)
num_total = file_num
publicFolder_d.addCallback(finishExport, outputDir)
# TODO clean exportDir after uploading has finished
except Exception, e:
log.error(str(e))
self.gDriveNotificationNotice(client, _('EXPORT FAILED!'), 'error')
raise
#client.alert(_(u'Exported to %s') % filename)
def exportZip(self, package):
"""
Export web site
Cleans up the previous packages pages and performs the export
"""
outputDir = TempDirPath()
# Import the Website Page class , if the secure mode is off. If the style has it's own page class
# use that, else use the default one.
if self.styleSecureMode=="0":
if (self.stylesDir/"websitepage.py").exists():
global WebsitePage
module = imp.load_source("websitepage",
self.stylesDir/"websitepage.py")
WebsitePage = module.WebsitePage
self.pages = [ WebsitePage("index", 0, package.root) ]
self.generatePages(package.root, 1)
uniquifyNames(self.pages)
prevPage = None
thisPage = self.pages[0]
for nextPage in self.pages[1:]:
thisPage.save(outputDir, prevPage, nextPage, self.pages)
prevPage = thisPage
thisPage = nextPage
thisPage.save(outputDir, prevPage, None, self.pages)
self.copyFiles(package, outputDir)
# Zip up the website package
self.filename.safeSave(self.doZip, _('EXPORT FAILED!\nLast succesful export is %s.'), outputDir)
# Clean up the temporary dir
outputDir.rmtree()
def doZip(self, fileObj, outputDir):
"""
Actually saves the zip data. Called by 'Path.safeSave'
"""
zipped = ZipFile(fileObj, "w")
for scormFile in outputDir.files():
zipped.write(scormFile, scormFile.basename().encode('utf8'), ZIP_DEFLATED)
zipped.close()
def appendPageReport(self, page, package):
if not page.node.idevices:self.report += u'"%s","%s",%d,"%s",,,,,,\n' % (package.filename,page.node.title, page.depth, page.name + '.html')
for idevice in page.node.idevices:
if not idevice.userResources:self.report += u'"%s","%s",%d,"%s","%s","%s",,,,\n' % (package.filename,page.node.title, page.depth, page.name + '.html', idevice.klass, idevice.title)
for resource in idevice.userResources:
if type(resource) == Resource:
self.report += u'"%s","%s",%d,"%s","%s","%s","%s","%s","%s","%s"\n' % (package.filename,page.node.title, page.depth, page.name + '.html', idevice.klass, idevice.title, resource.storageName, resource.userName, resource.path, resource.checksum)
else:
self.report += u'"%s",%d,"%s","%s","%s","%s",,,\n' % (package.filename,page.node.title, page.depth, page.name + '.html', idevice.klass, idevice.title, resource)
def export(self, package):
"""
Export web site
Cleans up the previous packages pages and performs the export
"""
if not self.report:
outputDir = self.filename
if not outputDir.exists():
outputDir.mkdir()
# Import the Website Page class. If the style has it's own page class
# use that, else use the default one.
if (self.stylesDir/"websitepage.py").exists():
global WebsitePage
module = imp.load_source("websitepage",
self.stylesDir/"websitepage.py")
WebsitePage = module.WebsitePage
self.pages = [ WebsitePage(self.prefix + "index", 0, package.root) ]
self.generatePages(package.root, 1)
uniquifyNames(self.pages)
prevPage = None
thisPage = self.pages[0]
if self.report:
self.report = u'"%s","%s","%s","%s","%s","%s","%s","%s","%s","%s"\n' % ('File','Page Name', 'Level', 'Page File Name', 'Idevice Type', 'Idevice Title', 'Resource File Name', 'Resource User Name', 'Resource Path', 'Resource Checksum')
self.appendPageReport(thisPage,package)
for nextPage in self.pages[1:]:
if self.report:
self.appendPageReport(nextPage,package)
else:
thisPage.save(outputDir, prevPage, nextPage, self.pages)
prevPage = thisPage
thisPage = nextPage
if not self.report:
thisPage.save(outputDir, prevPage, None, self.pages)
if self.prefix == "":
self.copyFiles(package, outputDir)
else:
self.filename.write_text(self.report, 'utf-8')
return outputDir
def copyFiles(self, package, outputDir):
"""
Copy all the files used by the website.
"""
if os.path.isdir(self.stylesDir):
# Copy the style sheet files to the output dir
styleFiles = [self.stylesDir/'..'/'base.css']
styleFiles += [self.stylesDir/'..'/'popup_bg.gif']
styleFiles += self.stylesDir.files("*.css")
styleFiles += self.stylesDir.files("*.jpg")
styleFiles += self.stylesDir.files("*.gif")
styleFiles += self.stylesDir.files("*.png")
styleFiles += self.stylesDir.files("*.js")
styleFiles += self.stylesDir.files("*.html")
styleFiles += self.stylesDir.files("*.ico")
styleFiles += self.stylesDir.files("*.ttf")
styleFiles += self.stylesDir.files("*.eot")
styleFiles += self.stylesDir.files("*.otf")
styleFiles += self.stylesDir.files("*.woff")
self.stylesDir.copylist(styleFiles, outputDir)
# copy the package's resource files
package.resourceDir.copyfiles(outputDir)
# copy script files.
my_style = G.application.config.styleStore.getStyle(package.style)
# jQuery
if my_style.hasValidConfig:
if my_style.get_jquery() == True:
jsFile = (self.scriptsDir/'exe_jquery.js')
jsFile.copyfile(outputDir/'exe_jquery.js')
else:
jsFile = (self.scriptsDir/'exe_jquery.js')
jsFile.copyfile(outputDir/'exe_jquery.js')
jsFile = (self.scriptsDir/'common.js')
jsFile.copyfile(outputDir/'common.js')
#dT = common.getExportDocType()
dT=common.getExportDocType();
if dT == "HTML5":
jsFile = (self.scriptsDir/'exe_html5.js')
jsFile.copyfile(outputDir/'exe_html5.js')
# Incluide eXe's icon if the Style doesn't have one
themePath = Path(G.application.config.stylesDir/package.style)
themeFavicon = themePath.joinpath("favicon.ico")
if not themeFavicon.exists():
faviconFile = (self.imagesDir/'favicon.ico')
faviconFile.copyfile(outputDir/'favicon.ico')
# copy players for media idevices.
hasFlowplayer = False
hasMagnifier = False
hasXspfplayer = False
hasGallery = False
hasFX = False
hasGames = False
hasWikipedia = False
isBreak = False
hasInstructions = False
hasMediaelement = False
hasTooltips = False
for page in self.pages:
if isBreak:
break
for idevice in page.node.idevices:
if (hasFlowplayer and hasMagnifier and hasXspfplayer and hasGallery and hasFX and hasGames and hasWikipedia and hasInstructions and hasMediaelement and hasTooltips):
isBreak = True
break
if not hasFlowplayer:
if 'flowPlayer.swf' in idevice.systemResources:
hasFlowplayer = True
if not hasMagnifier:
if 'mojomagnify.js' in idevice.systemResources:
hasMagnifier = True
if not hasXspfplayer:
if 'xspf_player.swf' in idevice.systemResources:
hasXspfplayer = True
if not hasGallery:
hasGallery = common.ideviceHasGallery(idevice)
if not hasFX:
hasFX = common.ideviceHasFX(idevice)
if not hasGames:
hasGames = common.ideviceHasGames(idevice)
if not hasWikipedia:
if 'WikipediaIdevice' == idevice.klass:
hasWikipedia = True
if not hasInstructions:
if 'TrueFalseIdevice' == idevice.klass or 'MultichoiceIdevice' == idevice.klass or 'VerdaderofalsofpdIdevice' == idevice.klass or 'EleccionmultiplefpdIdevice' == idevice.klass:
hasInstructions = True
if not hasMediaelement:
hasMediaelement = common.ideviceHasMediaelement(idevice)
if not hasTooltips:
hasTooltips = common.ideviceHasTooltips(idevice)
if hasFlowplayer:
videofile = (self.templatesDir/'flowPlayer.swf')
videofile.copyfile(outputDir/'flowPlayer.swf')
controlsfile = (self.templatesDir/'flowplayer.controls.swf')
controlsfile.copyfile(outputDir/'flowplayer.controls.swf')
if hasMagnifier:
videofile = (self.templatesDir/'mojomagnify.js')
videofile.copyfile(outputDir/'mojomagnify.js')
if hasXspfplayer:
videofile = (self.templatesDir/'xspf_player.swf')
videofile.copyfile(outputDir/'xspf_player.swf')
if hasGallery:
exeLightbox = (self.scriptsDir/'exe_lightbox')
exeLightbox.copyfiles(outputDir)
if hasFX:
exeEffects = (self.scriptsDir/'exe_effects')
exeEffects.copyfiles(outputDir)
if hasGames:
exeGames = (self.scriptsDir/'exe_games')
exeGames.copyfiles(outputDir)
if hasWikipedia:
wikipediaCSS = (self.cssDir/'exe_wikipedia.css')
wikipediaCSS.copyfile(outputDir/'exe_wikipedia.css')
if hasInstructions:
common.copyFileIfNotInStyle('panel-amusements.png', self, outputDir)
common.copyFileIfNotInStyle('stock-stop.png', self, outputDir)
if hasMediaelement:
mediaelement = (self.scriptsDir/'mediaelement')
mediaelement.copyfiles(outputDir)
dT = common.getExportDocType()
if dT != "HTML5":
jsFile = (self.scriptsDir/'exe_html5.js')
jsFile.copyfile(outputDir/'exe_html5.js')
if hasTooltips:
exe_tooltips = (self.scriptsDir/'exe_tooltips')
exe_tooltips.copyfiles(outputDir)
if hasattr(package, 'exportSource') and package.exportSource:
(G.application.config.webDir/'templates'/'content.xsd').copyfile(outputDir/'content.xsd')
(outputDir/'content.data').write_bytes(encodeObject(package))
(outputDir/'contentv3.xml').write_bytes(encodeObjectToXML(package))
if package.license == "license GFDL":
# include a copy of the GNU Free Documentation Licence
(self.templatesDir/'fdl.html').copyfile(outputDir/'fdl.html')
def generatePages(self, node, depth):
"""
Recursively generate pages and store in pages member variable
for retrieving later
"""
for child in node.children:
# assure lower pagename, without whitespaces or alphanumeric characters:
pageName = child.titleShort.lower().replace(" ", "_")
pageName = re.sub(r"\W", "", pageName)
if not pageName:
pageName = "__"
self.pages.append(WebsitePage(self.prefix + pageName, depth, child))
self.generatePages(child, depth + 1)
| mclois/iteexe | exe/export/websiteexport.py | Python | gpl-2.0 | 22,456 | [
"VisIt"
] | 3abec6232cf1a5cb234d5c351bdf8b7b7f95abf9b02071eb013432994f84163d |
# setuptools installation of GromacsWrapper
# Copyright (c) 2008-2011 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
#
# See the files INSTALL and README for details or visit
# https://github.com/Becksteinlab/GromacsWrapper
from __future__ import with_statement
from setuptools import setup, find_packages
import versioneer
with open("README.rst") as readme:
long_description = readme.read()
setup(name="GromacsWrapper",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="A Python wrapper around the Gromacs tools.",
long_description=long_description,
long_description_content_type="text/x-rst",
author="Oliver Beckstein",
author_email="orbeckst@gmail.com",
license="GPLv3",
url="https://github.com/Becksteinlab/GromacsWrapper",
download_url="https://github.com/Becksteinlab/GromacsWrapper/downloads",
keywords="science Gromacs analysis 'molecular dynamics'",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows ',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(
exclude=['scripts', 'tests', 'tests.*', 'extras', 'doc/examples']),
scripts=[
'scripts/gw-join_parts.py',
'scripts/gw-merge_topologies.py',
'scripts/gw-forcefield.py',
'scripts/gw-partial_tempering.py',
],
package_data={'gromacs': ['templates/*.sge', 'templates/*.pbs', # template files
'templates/*.ll', 'templates/*.sh',
'templates/*.mdp', 'templates/*.cfg'
],
},
install_requires=['numpy>=1.0',
'six', # towards py 3 compatibility
'numkit', # numerical helpers
'matplotlib',
],
tests_require=['pytest', 'numpy>=1.0', 'pandas>=0.17'],
zip_safe=True,
)
| Becksteinlab/GromacsWrapper | setup.py | Python | gpl-3.0 | 2,912 | [
"Gromacs",
"VisIt"
] | eabbe59a2ff88e4a92f787ae03d53029ca64c516bf3e86ad251afcca59a654e1 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Scour
#
# Copyright 2010 Jeff Schiller
# Copyright 2010 Louis Simard
#
# This file is part of Scour, http://www.codedread.com/scour/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Notes:
# rubys' path-crunching ideas here: http://intertwingly.net/code/svgtidy/spec.rb
# (and implemented here: http://intertwingly.net/code/svgtidy/svgtidy.rb )
# Yet more ideas here: http://wiki.inkscape.org/wiki/index.php/Save_Cleaned_SVG
#
# * Process Transformations
# * Collapse all group based transformations
# Even more ideas here: http://esw.w3.org/topic/SvgTidy
# * analysis of path elements to see if rect can be used instead? (must also need to look
# at rounded corners)
# Next Up:
# - why are marker-start, -end not removed from the style attribute?
# - why are only overflow style properties considered and not attributes?
# - only remove unreferenced elements if they are not children of a referenced element
# - add an option to remove ids if they match the Inkscape-style of IDs
# - investigate point-reducing algorithms
# - parse transform attribute
# - if a <g> has only one element in it, collapse the <g> (ensure transform, etc are carried down)
# necessary to get true division
from __future__ import division
import os
import sys
import xml.dom.minidom
import re
import math
from svg_regex import svg_parser
from svg_transform import svg_transform_parser
import optparse
from yocto_css import parseCssString
# Python 2.3- did not have Decimal
try:
from decimal import *
except ImportError:
print >>sys.stderr, "Scour requires Python 2.4."
# Import Psyco if available
try:
import psyco
psyco.full()
except ImportError:
pass
APP = 'scour'
VER = '0.28'
COPYRIGHT = 'Copyright Jeff Schiller, Louis Simard, 2010'
NS = { 'SVG': 'http://www.w3.org/2000/svg',
'XLINK': 'http://www.w3.org/1999/xlink',
'SODIPODI': 'http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd',
'INKSCAPE': 'http://www.inkscape.org/namespaces/inkscape',
'ADOBE_ILLUSTRATOR': 'http://ns.adobe.com/AdobeIllustrator/10.0/',
'ADOBE_GRAPHS': 'http://ns.adobe.com/Graphs/1.0/',
'ADOBE_SVG_VIEWER': 'http://ns.adobe.com/AdobeSVGViewerExtensions/3.0/',
'ADOBE_VARIABLES': 'http://ns.adobe.com/Variables/1.0/',
'ADOBE_SFW': 'http://ns.adobe.com/SaveForWeb/1.0/',
'ADOBE_EXTENSIBILITY': 'http://ns.adobe.com/Extensibility/1.0/',
'ADOBE_FLOWS': 'http://ns.adobe.com/Flows/1.0/',
'ADOBE_IMAGE_REPLACEMENT': 'http://ns.adobe.com/ImageReplacement/1.0/',
'ADOBE_CUSTOM': 'http://ns.adobe.com/GenericCustomNamespace/1.0/',
'ADOBE_XPATH': 'http://ns.adobe.com/XPath/1.0/'
}
unwanted_ns = [ NS['SODIPODI'], NS['INKSCAPE'], NS['ADOBE_ILLUSTRATOR'],
NS['ADOBE_GRAPHS'], NS['ADOBE_SVG_VIEWER'], NS['ADOBE_VARIABLES'],
NS['ADOBE_SFW'], NS['ADOBE_EXTENSIBILITY'], NS['ADOBE_FLOWS'],
NS['ADOBE_IMAGE_REPLACEMENT'], NS['ADOBE_CUSTOM'], NS['ADOBE_XPATH'] ]
svgAttributes = [
'clip-rule',
'display',
'fill',
'fill-opacity',
'fill-rule',
'filter',
'font-family',
'font-size',
'font-stretch',
'font-style',
'font-variant',
'font-weight',
'line-height',
'marker',
'marker-end',
'marker-mid',
'marker-start',
'opacity',
'overflow',
'stop-color',
'stop-opacity',
'stroke',
'stroke-dasharray',
'stroke-dashoffset',
'stroke-linecap',
'stroke-linejoin',
'stroke-miterlimit',
'stroke-opacity',
'stroke-width',
'visibility'
]
colors = {
'aliceblue': 'rgb(240, 248, 255)',
'antiquewhite': 'rgb(250, 235, 215)',
'aqua': 'rgb( 0, 255, 255)',
'aquamarine': 'rgb(127, 255, 212)',
'azure': 'rgb(240, 255, 255)',
'beige': 'rgb(245, 245, 220)',
'bisque': 'rgb(255, 228, 196)',
'black': 'rgb( 0, 0, 0)',
'blanchedalmond': 'rgb(255, 235, 205)',
'blue': 'rgb( 0, 0, 255)',
'blueviolet': 'rgb(138, 43, 226)',
'brown': 'rgb(165, 42, 42)',
'burlywood': 'rgb(222, 184, 135)',
'cadetblue': 'rgb( 95, 158, 160)',
'chartreuse': 'rgb(127, 255, 0)',
'chocolate': 'rgb(210, 105, 30)',
'coral': 'rgb(255, 127, 80)',
'cornflowerblue': 'rgb(100, 149, 237)',
'cornsilk': 'rgb(255, 248, 220)',
'crimson': 'rgb(220, 20, 60)',
'cyan': 'rgb( 0, 255, 255)',
'darkblue': 'rgb( 0, 0, 139)',
'darkcyan': 'rgb( 0, 139, 139)',
'darkgoldenrod': 'rgb(184, 134, 11)',
'darkgray': 'rgb(169, 169, 169)',
'darkgreen': 'rgb( 0, 100, 0)',
'darkgrey': 'rgb(169, 169, 169)',
'darkkhaki': 'rgb(189, 183, 107)',
'darkmagenta': 'rgb(139, 0, 139)',
'darkolivegreen': 'rgb( 85, 107, 47)',
'darkorange': 'rgb(255, 140, 0)',
'darkorchid': 'rgb(153, 50, 204)',
'darkred': 'rgb(139, 0, 0)',
'darksalmon': 'rgb(233, 150, 122)',
'darkseagreen': 'rgb(143, 188, 143)',
'darkslateblue': 'rgb( 72, 61, 139)',
'darkslategray': 'rgb( 47, 79, 79)',
'darkslategrey': 'rgb( 47, 79, 79)',
'darkturquoise': 'rgb( 0, 206, 209)',
'darkviolet': 'rgb(148, 0, 211)',
'deeppink': 'rgb(255, 20, 147)',
'deepskyblue': 'rgb( 0, 191, 255)',
'dimgray': 'rgb(105, 105, 105)',
'dimgrey': 'rgb(105, 105, 105)',
'dodgerblue': 'rgb( 30, 144, 255)',
'firebrick': 'rgb(178, 34, 34)',
'floralwhite': 'rgb(255, 250, 240)',
'forestgreen': 'rgb( 34, 139, 34)',
'fuchsia': 'rgb(255, 0, 255)',
'gainsboro': 'rgb(220, 220, 220)',
'ghostwhite': 'rgb(248, 248, 255)',
'gold': 'rgb(255, 215, 0)',
'goldenrod': 'rgb(218, 165, 32)',
'gray': 'rgb(128, 128, 128)',
'grey': 'rgb(128, 128, 128)',
'green': 'rgb( 0, 128, 0)',
'greenyellow': 'rgb(173, 255, 47)',
'honeydew': 'rgb(240, 255, 240)',
'hotpink': 'rgb(255, 105, 180)',
'indianred': 'rgb(205, 92, 92)',
'indigo': 'rgb( 75, 0, 130)',
'ivory': 'rgb(255, 255, 240)',
'khaki': 'rgb(240, 230, 140)',
'lavender': 'rgb(230, 230, 250)',
'lavenderblush': 'rgb(255, 240, 245)',
'lawngreen': 'rgb(124, 252, 0)',
'lemonchiffon': 'rgb(255, 250, 205)',
'lightblue': 'rgb(173, 216, 230)',
'lightcoral': 'rgb(240, 128, 128)',
'lightcyan': 'rgb(224, 255, 255)',
'lightgoldenrodyellow': 'rgb(250, 250, 210)',
'lightgray': 'rgb(211, 211, 211)',
'lightgreen': 'rgb(144, 238, 144)',
'lightgrey': 'rgb(211, 211, 211)',
'lightpink': 'rgb(255, 182, 193)',
'lightsalmon': 'rgb(255, 160, 122)',
'lightseagreen': 'rgb( 32, 178, 170)',
'lightskyblue': 'rgb(135, 206, 250)',
'lightslategray': 'rgb(119, 136, 153)',
'lightslategrey': 'rgb(119, 136, 153)',
'lightsteelblue': 'rgb(176, 196, 222)',
'lightyellow': 'rgb(255, 255, 224)',
'lime': 'rgb( 0, 255, 0)',
'limegreen': 'rgb( 50, 205, 50)',
'linen': 'rgb(250, 240, 230)',
'magenta': 'rgb(255, 0, 255)',
'maroon': 'rgb(128, 0, 0)',
'mediumaquamarine': 'rgb(102, 205, 170)',
'mediumblue': 'rgb( 0, 0, 205)',
'mediumorchid': 'rgb(186, 85, 211)',
'mediumpurple': 'rgb(147, 112, 219)',
'mediumseagreen': 'rgb( 60, 179, 113)',
'mediumslateblue': 'rgb(123, 104, 238)',
'mediumspringgreen': 'rgb( 0, 250, 154)',
'mediumturquoise': 'rgb( 72, 209, 204)',
'mediumvioletred': 'rgb(199, 21, 133)',
'midnightblue': 'rgb( 25, 25, 112)',
'mintcream': 'rgb(245, 255, 250)',
'mistyrose': 'rgb(255, 228, 225)',
'moccasin': 'rgb(255, 228, 181)',
'navajowhite': 'rgb(255, 222, 173)',
'navy': 'rgb( 0, 0, 128)',
'oldlace': 'rgb(253, 245, 230)',
'olive': 'rgb(128, 128, 0)',
'olivedrab': 'rgb(107, 142, 35)',
'orange': 'rgb(255, 165, 0)',
'orangered': 'rgb(255, 69, 0)',
'orchid': 'rgb(218, 112, 214)',
'palegoldenrod': 'rgb(238, 232, 170)',
'palegreen': 'rgb(152, 251, 152)',
'paleturquoise': 'rgb(175, 238, 238)',
'palevioletred': 'rgb(219, 112, 147)',
'papayawhip': 'rgb(255, 239, 213)',
'peachpuff': 'rgb(255, 218, 185)',
'peru': 'rgb(205, 133, 63)',
'pink': 'rgb(255, 192, 203)',
'plum': 'rgb(221, 160, 221)',
'powderblue': 'rgb(176, 224, 230)',
'purple': 'rgb(128, 0, 128)',
'red': 'rgb(255, 0, 0)',
'rosybrown': 'rgb(188, 143, 143)',
'royalblue': 'rgb( 65, 105, 225)',
'saddlebrown': 'rgb(139, 69, 19)',
'salmon': 'rgb(250, 128, 114)',
'sandybrown': 'rgb(244, 164, 96)',
'seagreen': 'rgb( 46, 139, 87)',
'seashell': 'rgb(255, 245, 238)',
'sienna': 'rgb(160, 82, 45)',
'silver': 'rgb(192, 192, 192)',
'skyblue': 'rgb(135, 206, 235)',
'slateblue': 'rgb(106, 90, 205)',
'slategray': 'rgb(112, 128, 144)',
'slategrey': 'rgb(112, 128, 144)',
'snow': 'rgb(255, 250, 250)',
'springgreen': 'rgb( 0, 255, 127)',
'steelblue': 'rgb( 70, 130, 180)',
'tan': 'rgb(210, 180, 140)',
'teal': 'rgb( 0, 128, 128)',
'thistle': 'rgb(216, 191, 216)',
'tomato': 'rgb(255, 99, 71)',
'turquoise': 'rgb( 64, 224, 208)',
'violet': 'rgb(238, 130, 238)',
'wheat': 'rgb(245, 222, 179)',
'white': 'rgb(255, 255, 255)',
'whitesmoke': 'rgb(245, 245, 245)',
'yellow': 'rgb(255, 255, 0)',
'yellowgreen': 'rgb(154, 205, 50)',
}
default_attributes = { # excluded all attributes with 'auto' as default
# SVG 1.1 presentation attributes
'baseline-shift': 'baseline',
'clip-path': 'none',
'clip-rule': 'nonzero',
'color': '#000',
'color-interpolation-filters': 'linearRGB',
'color-interpolation': 'sRGB',
'direction': 'ltr',
'display': 'inline',
'enable-background': 'accumulate',
'fill': '#000',
'fill-opacity': '1',
'fill-rule': 'nonzero',
'filter': 'none',
'flood-color': '#000',
'flood-opacity': '1',
'font-size-adjust': 'none',
'font-size': 'medium',
'font-stretch': 'normal',
'font-style': 'normal',
'font-variant': 'normal',
'font-weight': 'normal',
'glyph-orientation-horizontal': '0deg',
'letter-spacing': 'normal',
'lighting-color': '#fff',
'marker': 'none',
'marker-start': 'none',
'marker-mid': 'none',
'marker-end': 'none',
'mask': 'none',
'opacity': '1',
'pointer-events': 'visiblePainted',
'stop-color': '#000',
'stop-opacity': '1',
'stroke': 'none',
'stroke-dasharray': 'none',
'stroke-dashoffset': '0',
'stroke-linecap': 'butt',
'stroke-linejoin': 'miter',
'stroke-miterlimit': '4',
'stroke-opacity': '1',
'stroke-width': '1',
'text-anchor': 'start',
'text-decoration': 'none',
'unicode-bidi': 'normal',
'visibility': 'visible',
'word-spacing': 'normal',
'writing-mode': 'lr-tb',
# SVG 1.2 tiny properties
'audio-level': '1',
'solid-color': '#000',
'solid-opacity': '1',
'text-align': 'start',
'vector-effect': 'none',
'viewport-fill': 'none',
'viewport-fill-opacity': '1',
}
def isSameSign(a,b): return (a <= 0 and b <= 0) or (a >= 0 and b >= 0)
scinumber = re.compile(r"[-+]?(\d*\.?)?\d+[eE][-+]?\d+")
number = re.compile(r"[-+]?(\d*\.?)?\d+")
sciExponent = re.compile(r"[eE]([-+]?\d+)")
unit = re.compile("(em|ex|px|pt|pc|cm|mm|in|%){1,1}$")
class Unit(object):
# Integer constants for units.
INVALID = -1
NONE = 0
PCT = 1
PX = 2
PT = 3
PC = 4
EM = 5
EX = 6
CM = 7
MM = 8
IN = 9
# String to Unit. Basically, converts unit strings to their integer constants.
s2u = {
'': NONE,
'%': PCT,
'px': PX,
'pt': PT,
'pc': PC,
'em': EM,
'ex': EX,
'cm': CM,
'mm': MM,
'in': IN,
}
# Unit to String. Basically, converts unit integer constants to their corresponding strings.
u2s = {
NONE: '',
PCT: '%',
PX: 'px',
PT: 'pt',
PC: 'pc',
EM: 'em',
EX: 'ex',
CM: 'cm',
MM: 'mm',
IN: 'in',
}
# @staticmethod
def get(unitstr):
if unitstr is None: return Unit.NONE
try:
return Unit.s2u[unitstr]
except KeyError:
return Unit.INVALID
# @staticmethod
def str(unitint):
try:
return Unit.u2s[unitint]
except KeyError:
return 'INVALID'
get = staticmethod(get)
str = staticmethod(str)
class SVGLength(object):
def __init__(self, str):
try: # simple unitless and no scientific notation
self.value = float(str)
if int(self.value) == self.value:
self.value = int(self.value)
self.units = Unit.NONE
except ValueError:
# we know that the length string has an exponent, a unit, both or is invalid
# parse out number, exponent and unit
self.value = 0
unitBegin = 0
scinum = scinumber.match(str)
if scinum != None:
# this will always match, no need to check it
numMatch = number.match(str)
expMatch = sciExponent.search(str, numMatch.start(0))
self.value = (float(numMatch.group(0)) *
10 ** float(expMatch.group(1)))
unitBegin = expMatch.end(1)
else:
# unit or invalid
numMatch = number.match(str)
if numMatch != None:
self.value = float(numMatch.group(0))
unitBegin = numMatch.end(0)
if int(self.value) == self.value:
self.value = int(self.value)
if unitBegin != 0 :
unitMatch = unit.search(str, unitBegin)
if unitMatch != None :
self.units = Unit.get(unitMatch.group(0))
# invalid
else:
# TODO: this needs to set the default for the given attribute (how?)
self.value = 0
self.units = Unit.INVALID
def findElementsWithId(node, elems=None):
"""
Returns all elements with id attributes
"""
if elems is None:
elems = {}
id = node.getAttribute('id')
if id != '' :
elems[id] = node
if node.hasChildNodes() :
for child in node.childNodes:
# from http://www.w3.org/TR/DOM-Level-2-Core/idl-definitions.html
# we are only really interested in nodes of type Element (1)
if child.nodeType == 1 :
findElementsWithId(child, elems)
return elems
referencingProps = ['fill', 'stroke', 'filter', 'clip-path', 'mask', 'marker-start',
'marker-end', 'marker-mid']
def findReferencedElements(node, ids=None):
"""
Returns the number of times an ID is referenced as well as all elements
that reference it. node is the node at which to start the search. The
return value is a map which has the id as key and each value is an array
where the first value is a count and the second value is a list of nodes
that referenced it.
Currently looks at fill, stroke, clip-path, mask, marker, and
xlink:href attributes.
"""
global referencingProps
if ids is None:
ids = {}
# TODO: input argument ids is clunky here (see below how it is called)
# GZ: alternative to passing dict, use **kwargs
# if this node is a style element, parse its text into CSS
if node.nodeName == 'style' and node.namespaceURI == NS['SVG']:
# one stretch of text, please! (we could use node.normalize(), but
# this actually modifies the node, and we don't want to keep
# whitespace around if there's any)
stylesheet = "".join([child.nodeValue for child in node.childNodes])
if stylesheet != '':
cssRules = parseCssString(stylesheet)
for rule in cssRules:
for propname in rule['properties']:
propval = rule['properties'][propname]
findReferencingProperty(node, propname, propval, ids)
return ids
# else if xlink:href is set, then grab the id
href = node.getAttributeNS(NS['XLINK'],'href')
if href != '' and len(href) > 1 and href[0] == '#':
# we remove the hash mark from the beginning of the id
id = href[1:]
if id in ids:
ids[id][0] += 1
ids[id][1].append(node)
else:
ids[id] = [1,[node]]
# now get all style properties and the fill, stroke, filter attributes
styles = node.getAttribute('style').split(';')
for attr in referencingProps:
styles.append(':'.join([attr, node.getAttribute(attr)]))
for style in styles:
propval = style.split(':')
if len(propval) == 2 :
prop = propval[0].strip()
val = propval[1].strip()
findReferencingProperty(node, prop, val, ids)
if node.hasChildNodes() :
for child in node.childNodes:
if child.nodeType == 1 :
findReferencedElements(child, ids)
return ids
def findReferencingProperty(node, prop, val, ids):
global referencingProps
if prop in referencingProps and val != '' :
if len(val) >= 7 and val[0:5] == 'url(#' :
id = val[5:val.find(')')]
if ids.has_key(id) :
ids[id][0] += 1
ids[id][1].append(node)
else:
ids[id] = [1,[node]]
# if the url has a quote in it, we need to compensate
elif len(val) >= 8 :
id = None
# double-quote
if val[0:6] == 'url("#' :
id = val[6:val.find('")')]
# single-quote
elif val[0:6] == "url('#" :
id = val[6:val.find("')")]
if id != None:
if ids.has_key(id) :
ids[id][0] += 1
ids[id][1].append(node)
else:
ids[id] = [1,[node]]
numIDsRemoved = 0
numElemsRemoved = 0
numAttrsRemoved = 0
numRastersEmbedded = 0
numPathSegmentsReduced = 0
numCurvesStraightened = 0
numBytesSavedInPathData = 0
numBytesSavedInColors = 0
numBytesSavedInIDs = 0
numBytesSavedInLengths = 0
numBytesSavedInTransforms = 0
numPointsRemovedFromPolygon = 0
numCommentBytes = 0
def removeUnusedDefs(doc, defElem, elemsToRemove=None):
if elemsToRemove is None:
elemsToRemove = []
identifiedElements = findElementsWithId(doc.documentElement)
referencedIDs = findReferencedElements(doc.documentElement)
keepTags = ['font', 'style', 'metadata', 'script', 'title', 'desc']
for elem in defElem.childNodes:
# only look at it if an element and not referenced anywhere else
if elem.nodeType == 1 and (elem.getAttribute('id') == '' or \
(not elem.getAttribute('id') in referencedIDs)):
# we only inspect the children of a group in a defs if the group
# is not referenced anywhere else
if elem.nodeName == 'g' and elem.namespaceURI == NS['SVG']:
elemsToRemove = removeUnusedDefs(doc, elem, elemsToRemove)
# we only remove if it is not one of our tags we always keep (see above)
elif not elem.nodeName in keepTags:
elemsToRemove.append(elem)
return elemsToRemove
def removeUnreferencedElements(doc):
"""
Removes all unreferenced elements except for <svg>, <font>, <metadata>, <title>, and <desc>.
Also vacuums the defs of any non-referenced renderable elements.
Returns the number of unreferenced elements removed from the document.
"""
global numElemsRemoved
num = 0
# Remove certain unreferenced elements outside of defs
removeTags = ['linearGradient', 'radialGradient', 'pattern']
identifiedElements = findElementsWithId(doc.documentElement)
referencedIDs = findReferencedElements(doc.documentElement)
for id in identifiedElements:
if not id in referencedIDs:
goner = identifiedElements[id]
if goner != None and goner.parentNode != None and goner.nodeName in removeTags:
goner.parentNode.removeChild(goner)
num += 1
numElemsRemoved += 1
# Remove most unreferenced elements inside defs
defs = doc.documentElement.getElementsByTagName('defs')
for aDef in defs:
elemsToRemove = removeUnusedDefs(doc, aDef)
for elem in elemsToRemove:
elem.parentNode.removeChild(elem)
numElemsRemoved += 1
num += 1
return num
def shortenIDs(doc, prefix, unprotectedElements=None):
"""
Shortens ID names used in the document. ID names referenced the most often are assigned the
shortest ID names.
If the list unprotectedElements is provided, only IDs from this list will be shortened.
Returns the number of bytes saved by shortening ID names in the document.
"""
num = 0
identifiedElements = findElementsWithId(doc.documentElement)
if unprotectedElements is None:
unprotectedElements = identifiedElements
referencedIDs = findReferencedElements(doc.documentElement)
# Make idList (list of idnames) sorted by reference count
# descending, so the highest reference count is first.
# First check that there's actually a defining element for the current ID name.
# (Cyn: I've seen documents with #id references but no element with that ID!)
idList = [(referencedIDs[rid][0], rid) for rid in referencedIDs
if rid in unprotectedElements]
idList.sort(reverse=True)
idList = [rid for count, rid in idList]
curIdNum = 1
for rid in idList:
curId = intToID(curIdNum, prefix)
# First make sure that *this* element isn't already using
# the ID name we want to give it.
if curId != rid:
# Then, skip ahead if the new ID is already in identifiedElement.
while curId in identifiedElements:
curIdNum += 1
curId = intToID(curIdNum, prefix)
# Then go rename it.
num += renameID(doc, rid, curId, identifiedElements, referencedIDs)
curIdNum += 1
return num
def intToID(idnum, prefix):
"""
Returns the ID name for the given ID number, spreadsheet-style, i.e. from a to z,
then from aa to az, ba to bz, etc., until zz.
"""
rid = ''
while idnum > 0:
idnum -= 1
rid = chr((idnum % 26) + ord('a')) + rid
idnum = int(idnum / 26)
return prefix + rid
def renameID(doc, idFrom, idTo, identifiedElements, referencedIDs):
"""
Changes the ID name from idFrom to idTo, on the declaring element
as well as all references in the document doc.
Updates identifiedElements and referencedIDs.
Does not handle the case where idTo is already the ID name
of another element in doc.
Returns the number of bytes saved by this replacement.
"""
num = 0
definingNode = identifiedElements[idFrom]
definingNode.setAttribute("id", idTo)
del identifiedElements[idFrom]
identifiedElements[idTo] = definingNode
referringNodes = referencedIDs[idFrom]
# Look for the idFrom ID name in each of the referencing elements,
# exactly like findReferencedElements would.
# Cyn: Duplicated processing!
for node in referringNodes[1]:
# if this node is a style element, parse its text into CSS
if node.nodeName == 'style' and node.namespaceURI == NS['SVG']:
# node.firstChild will be either a CDATA or a Text node now
if node.firstChild != None:
# concatenate the value of all children, in case
# there's a CDATASection node surrounded by whitespace
# nodes
# (node.normalize() will NOT work here, it only acts on Text nodes)
oldValue = "".join([child.nodeValue for child in node.childNodes])
# not going to reparse the whole thing
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url(#'" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url(#"' + idFrom + '")', 'url(#' + idTo + ')')
# and now replace all the children with this new stylesheet.
# again, this is in case the stylesheet was a CDATASection
node.childNodes[:] = [node.ownerDocument.createTextNode(newValue)]
num += len(oldValue) - len(newValue)
# if xlink:href is set to #idFrom, then change the id
href = node.getAttributeNS(NS['XLINK'],'href')
if href == '#' + idFrom:
node.setAttributeNS(NS['XLINK'],'href', '#' + idTo)
num += len(idFrom) - len(idTo)
# if the style has url(#idFrom), then change the id
styles = node.getAttribute('style')
if styles != '':
newValue = styles.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute('style', newValue)
num += len(styles) - len(newValue)
# now try the fill, stroke, filter attributes
for attr in referencingProps:
oldValue = node.getAttribute(attr)
if oldValue != '':
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute(attr, newValue)
num += len(oldValue) - len(newValue)
del referencedIDs[idFrom]
referencedIDs[idTo] = referringNodes
return num
def unprotected_ids(doc, options):
u"""Returns a list of unprotected IDs within the document doc."""
identifiedElements = findElementsWithId(doc.documentElement)
if not (options.protect_ids_noninkscape or
options.protect_ids_list or
options.protect_ids_prefix):
return identifiedElements
if options.protect_ids_list:
protect_ids_list = options.protect_ids_list.split(",")
if options.protect_ids_prefix:
protect_ids_prefixes = options.protect_ids_prefix.split(",")
for id in identifiedElements.keys():
protected = False
if options.protect_ids_noninkscape and not id[-1].isdigit():
protected = True
if options.protect_ids_list and id in protect_ids_list:
protected = True
if options.protect_ids_prefix:
for prefix in protect_ids_prefixes:
if id.startswith(prefix):
protected = True
if protected:
del identifiedElements[id]
return identifiedElements
def removeUnreferencedIDs(referencedIDs, identifiedElements):
"""
Removes the unreferenced ID attributes.
Returns the number of ID attributes removed
"""
global numIDsRemoved
keepTags = ['font']
num = 0;
for id in identifiedElements.keys():
node = identifiedElements[id]
if referencedIDs.has_key(id) == False and not node.nodeName in keepTags:
node.removeAttribute('id')
numIDsRemoved += 1
num += 1
return num
def removeNamespacedAttributes(node, namespaces):
global numAttrsRemoved
num = 0
if node.nodeType == 1 :
# remove all namespace'd attributes from this element
attrList = node.attributes
attrsToRemove = []
for attrNum in xrange(attrList.length):
attr = attrList.item(attrNum)
if attr != None and attr.namespaceURI in namespaces:
attrsToRemove.append(attr.nodeName)
for attrName in attrsToRemove :
num += 1
numAttrsRemoved += 1
node.removeAttribute(attrName)
# now recurse for children
for child in node.childNodes:
num += removeNamespacedAttributes(child, namespaces)
return num
def removeNamespacedElements(node, namespaces):
global numElemsRemoved
num = 0
if node.nodeType == 1 :
# remove all namespace'd child nodes from this element
childList = node.childNodes
childrenToRemove = []
for child in childList:
if child != None and child.namespaceURI in namespaces:
childrenToRemove.append(child)
for child in childrenToRemove :
num += 1
numElemsRemoved += 1
node.removeChild(child)
# now recurse for children
for child in node.childNodes:
num += removeNamespacedElements(child, namespaces)
return num
def removeMetadataElements(doc):
global numElemsRemoved
num = 0
# clone the list, as the tag list is live from the DOM
elementsToRemove = [element for element in doc.documentElement.getElementsByTagName('metadata')]
for element in elementsToRemove:
element.parentNode.removeChild(element)
num += 1
numElemsRemoved += 1
return num
def removeNestedGroups(node):
"""
This walks further and further down the tree, removing groups
which do not have any attributes or a title/desc child and
promoting their children up one level
"""
global numElemsRemoved
num = 0
groupsToRemove = []
# Only consider <g> elements for promotion if this element isn't a <switch>.
# (partial fix for bug 594930, required by the SVG spec however)
if not (node.nodeType == 1 and node.nodeName == 'switch'):
for child in node.childNodes:
if child.nodeName == 'g' and child.namespaceURI == NS['SVG'] and len(child.attributes) == 0:
# only collapse group if it does not have a title or desc as a direct descendant,
for grandchild in child.childNodes:
if grandchild.nodeType == 1 and grandchild.namespaceURI == NS['SVG'] and \
grandchild.nodeName in ['title','desc']:
break
else:
groupsToRemove.append(child)
for g in groupsToRemove:
while g.childNodes.length > 0:
g.parentNode.insertBefore(g.firstChild, g)
g.parentNode.removeChild(g)
numElemsRemoved += 1
num += 1
# now recurse for children
for child in node.childNodes:
if child.nodeType == 1:
num += removeNestedGroups(child)
return num
def moveCommonAttributesToParentGroup(elem, referencedElements):
"""
This recursively calls this function on all children of the passed in element
and then iterates over all child elements and removes common inheritable attributes
from the children and places them in the parent group. But only if the parent contains
nothing but element children and whitespace. The attributes are only removed from the
children if the children are not referenced by other elements in the document.
"""
num = 0
childElements = []
# recurse first into the children (depth-first)
for child in elem.childNodes:
if child.nodeType == 1:
# only add and recurse if the child is not referenced elsewhere
if not child.getAttribute('id') in referencedElements:
childElements.append(child)
num += moveCommonAttributesToParentGroup(child, referencedElements)
# else if the parent has non-whitespace text children, do not
# try to move common attributes
elif child.nodeType == 3 and child.nodeValue.strip():
return num
# only process the children if there are more than one element
if len(childElements) <= 1: return num
commonAttrs = {}
# add all inheritable properties of the first child element
# FIXME: Note there is a chance that the first child is a set/animate in which case
# its fill attribute is not what we want to look at, we should look for the first
# non-animate/set element
attrList = childElements[0].attributes
for num in xrange(attrList.length):
attr = attrList.item(num)
# this is most of the inheritable properties from http://www.w3.org/TR/SVG11/propidx.html
# and http://www.w3.org/TR/SVGTiny12/attributeTable.html
if attr.nodeName in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
# we just add all the attributes from the first child
commonAttrs[attr.nodeName] = attr.nodeValue
# for each subsequent child element
for childNum in xrange(len(childElements)):
# skip first child
if childNum == 0:
continue
child = childElements[childNum]
# if we are on an animateXXX/set element, ignore it (due to the 'fill' attribute)
if child.localName in ['set', 'animate', 'animateColor', 'animateTransform', 'animateMotion']:
continue
distinctAttrs = []
# loop through all current 'common' attributes
for name in commonAttrs.keys():
# if this child doesn't match that attribute, schedule it for removal
if child.getAttribute(name) != commonAttrs[name]:
distinctAttrs.append(name)
# remove those attributes which are not common
for name in distinctAttrs:
del commonAttrs[name]
# commonAttrs now has all the inheritable attributes which are common among all child elements
for name in commonAttrs.keys():
for child in childElements:
child.removeAttribute(name)
elem.setAttribute(name, commonAttrs[name])
# update our statistic (we remove N*M attributes and add back in M attributes)
num += (len(childElements)-1) * len(commonAttrs)
return num
def createGroupsForCommonAttributes(elem):
"""
Creates <g> elements to contain runs of 3 or more
consecutive child elements having at least one common attribute.
Common attributes are not promoted to the <g> by this function.
This is handled by moveCommonAttributesToParentGroup.
If all children have a common attribute, an extra <g> is not created.
This function acts recursively on the given element.
"""
num = 0
global numElemsRemoved
# TODO perhaps all of the Presentation attributes in http://www.w3.org/TR/SVG/struct.html#GElement
# could be added here
# Cyn: These attributes are the same as in moveAttributesToParentGroup, and must always be
for curAttr in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
# Iterate through the children in reverse order, so item(i) for
# items we have yet to visit still returns the correct nodes.
curChild = elem.childNodes.length - 1
while curChild >= 0:
childNode = elem.childNodes.item(curChild)
if childNode.nodeType == 1 and childNode.getAttribute(curAttr) != '':
# We're in a possible run! Track the value and run length.
value = childNode.getAttribute(curAttr)
runStart, runEnd = curChild, curChild
# Run elements includes only element tags, no whitespace/comments/etc.
# Later, we calculate a run length which includes these.
runElements = 1
# Backtrack to get all the nodes having the same
# attribute value, preserving any nodes in-between.
while runStart > 0:
nextNode = elem.childNodes.item(runStart - 1)
if nextNode.nodeType == 1:
if nextNode.getAttribute(curAttr) != value: break
else:
runElements += 1
runStart -= 1
else: runStart -= 1
if runElements >= 3:
# Include whitespace/comment/etc. nodes in the run.
while runEnd < elem.childNodes.length - 1:
if elem.childNodes.item(runEnd + 1).nodeType == 1: break
else: runEnd += 1
runLength = runEnd - runStart + 1
if runLength == elem.childNodes.length: # Every child has this
# If the current parent is a <g> already,
if elem.nodeName == 'g' and elem.namespaceURI == NS['SVG']:
# do not act altogether on this attribute; all the
# children have it in common.
# Let moveCommonAttributesToParentGroup do it.
curChild = -1
continue
# otherwise, it might be an <svg> element, and
# even if all children have the same attribute value,
# it's going to be worth making the <g> since
# <svg> doesn't support attributes like 'stroke'.
# Fall through.
# Create a <g> element from scratch.
# We need the Document for this.
document = elem.ownerDocument
group = document.createElementNS(NS['SVG'], 'g')
# Move the run of elements to the group.
# a) ADD the nodes to the new group.
group.childNodes[:] = elem.childNodes[runStart:runEnd + 1]
for child in group.childNodes:
child.parentNode = group
# b) REMOVE the nodes from the element.
elem.childNodes[runStart:runEnd + 1] = []
# Include the group in elem's children.
elem.childNodes.insert(runStart, group)
group.parentNode = elem
num += 1
curChild = runStart - 1
numElemsRemoved -= 1
else:
curChild -= 1
else:
curChild -= 1
# each child gets the same treatment, recursively
for childNode in elem.childNodes:
if childNode.nodeType == 1:
num += createGroupsForCommonAttributes(childNode)
return num
def removeUnusedAttributesOnParent(elem):
"""
This recursively calls this function on all children of the element passed in,
then removes any unused attributes on this elem if none of the children inherit it
"""
num = 0
childElements = []
# recurse first into the children (depth-first)
for child in elem.childNodes:
if child.nodeType == 1:
childElements.append(child)
num += removeUnusedAttributesOnParent(child)
# only process the children if there are more than one element
if len(childElements) <= 1: return num
# get all attribute values on this parent
attrList = elem.attributes
unusedAttrs = {}
for num in xrange(attrList.length):
attr = attrList.item(num)
if attr.nodeName in ['clip-rule',
'display-align',
'fill', 'fill-opacity', 'fill-rule',
'font', 'font-family', 'font-size', 'font-size-adjust', 'font-stretch',
'font-style', 'font-variant', 'font-weight',
'letter-spacing',
'pointer-events', 'shape-rendering',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width',
'text-anchor', 'text-decoration', 'text-rendering', 'visibility',
'word-spacing', 'writing-mode']:
unusedAttrs[attr.nodeName] = attr.nodeValue
# for each child, if at least one child inherits the parent's attribute, then remove
for childNum in xrange(len(childElements)):
child = childElements[childNum]
inheritedAttrs = []
for name in unusedAttrs.keys():
val = child.getAttribute(name)
if val == '' or val == None or val == 'inherit':
inheritedAttrs.append(name)
for a in inheritedAttrs:
del unusedAttrs[a]
# unusedAttrs now has all the parent attributes that are unused
for name in unusedAttrs.keys():
elem.removeAttribute(name)
num += 1
return num
def removeDuplicateGradientStops(doc):
global numElemsRemoved
num = 0
for gradType in ['linearGradient', 'radialGradient']:
for grad in doc.getElementsByTagName(gradType):
stops = {}
stopsToRemove = []
for stop in grad.getElementsByTagName('stop'):
# convert percentages into a floating point number
offsetU = SVGLength(stop.getAttribute('offset'))
if offsetU.units == Unit.PCT:
offset = offsetU.value / 100.0
elif offsetU.units == Unit.NONE:
offset = offsetU.value
else:
offset = 0
# set the stop offset value to the integer or floating point equivalent
if int(offset) == offset: stop.setAttribute('offset', str(int(offset)))
else: stop.setAttribute('offset', str(offset))
color = stop.getAttribute('stop-color')
opacity = stop.getAttribute('stop-opacity')
style = stop.getAttribute('style')
if stops.has_key(offset) :
oldStop = stops[offset]
if oldStop[0] == color and oldStop[1] == opacity and oldStop[2] == style:
stopsToRemove.append(stop)
stops[offset] = [color, opacity, style]
for stop in stopsToRemove:
stop.parentNode.removeChild(stop)
num += 1
numElemsRemoved += 1
# linear gradients
return num
def collapseSinglyReferencedGradients(doc):
global numElemsRemoved
num = 0
identifiedElements = findElementsWithId(doc.documentElement)
# make sure to reset the ref'ed ids for when we are running this in testscour
for rid,nodeCount in findReferencedElements(doc.documentElement).iteritems():
count = nodeCount[0]
nodes = nodeCount[1]
# Make sure that there's actually a defining element for the current ID name.
# (Cyn: I've seen documents with #id references but no element with that ID!)
if count == 1 and rid in identifiedElements:
elem = identifiedElements[rid]
if elem != None and elem.nodeType == 1 and elem.nodeName in ['linearGradient', 'radialGradient'] \
and elem.namespaceURI == NS['SVG']:
# found a gradient that is referenced by only 1 other element
refElem = nodes[0]
if refElem.nodeType == 1 and refElem.nodeName in ['linearGradient', 'radialGradient'] \
and refElem.namespaceURI == NS['SVG']:
# elem is a gradient referenced by only one other gradient (refElem)
# add the stops to the referencing gradient (this removes them from elem)
if len(refElem.getElementsByTagName('stop')) == 0:
stopsToAdd = elem.getElementsByTagName('stop')
for stop in stopsToAdd:
refElem.appendChild(stop)
# adopt the gradientUnits, spreadMethod, gradientTransform attributes if
# they are unspecified on refElem
for attr in ['gradientUnits','spreadMethod','gradientTransform']:
if refElem.getAttribute(attr) == '' and not elem.getAttribute(attr) == '':
refElem.setAttributeNS(None, attr, elem.getAttribute(attr))
# if both are radialGradients, adopt elem's fx,fy,cx,cy,r attributes if
# they are unspecified on refElem
if elem.nodeName == 'radialGradient' and refElem.nodeName == 'radialGradient':
for attr in ['fx','fy','cx','cy','r']:
if refElem.getAttribute(attr) == '' and not elem.getAttribute(attr) == '':
refElem.setAttributeNS(None, attr, elem.getAttribute(attr))
# if both are linearGradients, adopt elem's x1,y1,x2,y2 attributes if
# they are unspecified on refElem
if elem.nodeName == 'linearGradient' and refElem.nodeName == 'linearGradient':
for attr in ['x1','y1','x2','y2']:
if refElem.getAttribute(attr) == '' and not elem.getAttribute(attr) == '':
refElem.setAttributeNS(None, attr, elem.getAttribute(attr))
# now remove the xlink:href from refElem
refElem.removeAttributeNS(NS['XLINK'], 'href')
# now delete elem
elem.parentNode.removeChild(elem)
numElemsRemoved += 1
num += 1
return num
def removeDuplicateGradients(doc):
global numElemsRemoved
num = 0
gradientsToRemove = {}
duplicateToMaster = {}
for gradType in ['linearGradient', 'radialGradient']:
grads = doc.getElementsByTagName(gradType)
for grad in grads:
# TODO: should slice grads from 'grad' here to optimize
for ograd in grads:
# do not compare gradient to itself
if grad == ograd: continue
# compare grad to ograd (all properties, then all stops)
# if attributes do not match, go to next gradient
someGradAttrsDoNotMatch = False
for attr in ['gradientUnits','spreadMethod','gradientTransform','x1','y1','x2','y2','cx','cy','fx','fy','r']:
if grad.getAttribute(attr) != ograd.getAttribute(attr):
someGradAttrsDoNotMatch = True
break;
if someGradAttrsDoNotMatch: continue
# compare xlink:href values too
if grad.getAttributeNS(NS['XLINK'], 'href') != ograd.getAttributeNS(NS['XLINK'], 'href'):
continue
# all gradient properties match, now time to compare stops
stops = grad.getElementsByTagName('stop')
ostops = ograd.getElementsByTagName('stop')
if stops.length != ostops.length: continue
# now compare stops
stopsNotEqual = False
for i in xrange(stops.length):
if stopsNotEqual: break
stop = stops.item(i)
ostop = ostops.item(i)
for attr in ['offset', 'stop-color', 'stop-opacity', 'style']:
if stop.getAttribute(attr) != ostop.getAttribute(attr):
stopsNotEqual = True
break
if stopsNotEqual: continue
# ograd is a duplicate of grad, we schedule it to be removed UNLESS
# ograd is ALREADY considered a 'master' element
if not gradientsToRemove.has_key(ograd):
if not duplicateToMaster.has_key(ograd):
if not gradientsToRemove.has_key(grad):
gradientsToRemove[grad] = []
gradientsToRemove[grad].append( ograd )
duplicateToMaster[ograd] = grad
# get a collection of all elements that are referenced and their referencing elements
referencedIDs = findReferencedElements(doc.documentElement)
for masterGrad in gradientsToRemove.keys():
master_id = masterGrad.getAttribute('id')
# print 'master='+master_id
for dupGrad in gradientsToRemove[masterGrad]:
# if the duplicate gradient no longer has a parent that means it was
# already re-mapped to another master gradient
if not dupGrad.parentNode: continue
dup_id = dupGrad.getAttribute('id')
# print 'dup='+dup_id
# print referencedIDs[dup_id]
# for each element that referenced the gradient we are going to remove
for elem in referencedIDs[dup_id][1]:
# find out which attribute referenced the duplicate gradient
for attr in ['fill', 'stroke']:
v = elem.getAttribute(attr)
if v == 'url(#'+dup_id+')' or v == 'url("#'+dup_id+'")' or v == "url('#"+dup_id+"')":
elem.setAttribute(attr, 'url(#'+master_id+')')
if elem.getAttributeNS(NS['XLINK'], 'href') == '#'+dup_id:
elem.setAttributeNS(NS['XLINK'], 'href', '#'+master_id)
styles = _getStyle(elem)
for style in styles:
v = styles[style]
if v == 'url(#'+dup_id+')' or v == 'url("#'+dup_id+'")' or v == "url('#"+dup_id+"')":
styles[style] = 'url(#'+master_id+')'
_setStyle(elem, styles)
# now that all referencing elements have been re-mapped to the master
# it is safe to remove this gradient from the document
dupGrad.parentNode.removeChild(dupGrad)
numElemsRemoved += 1
num += 1
return num
def _getStyle(node):
u"""Returns the style attribute of a node as a dictionary."""
if node.nodeType == 1 and len(node.getAttribute('style')) > 0 :
styleMap = { }
rawStyles = node.getAttribute('style').split(';')
for style in rawStyles:
propval = style.split(':')
if len(propval) == 2 :
styleMap[propval[0].strip()] = propval[1].strip()
return styleMap
else:
return {}
def _setStyle(node, styleMap):
u"""Sets the style attribute of a node to the dictionary ``styleMap``."""
fixedStyle = ';'.join([prop + ':' + styleMap[prop] for prop in styleMap.keys()])
if fixedStyle != '' :
node.setAttribute('style', fixedStyle)
elif node.getAttribute('style'):
node.removeAttribute('style')
return node
def repairStyle(node, options):
num = 0
styleMap = _getStyle(node)
if styleMap:
# I've seen this enough to know that I need to correct it:
# fill: url(#linearGradient4918) rgb(0, 0, 0);
for prop in ['fill', 'stroke'] :
if styleMap.has_key(prop) :
chunk = styleMap[prop].split(') ')
if len(chunk) == 2 and (chunk[0][:5] == 'url(#' or chunk[0][:6] == 'url("#' or chunk[0][:6] == "url('#") and chunk[1] == 'rgb(0, 0, 0)' :
styleMap[prop] = chunk[0] + ')'
num += 1
# Here is where we can weed out unnecessary styles like:
# opacity:1
if styleMap.has_key('opacity') :
opacity = float(styleMap['opacity'])
# if opacity='0' then all fill and stroke properties are useless, remove them
if opacity == 0.0 :
for uselessStyle in ['fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-linejoin',
'stroke-opacity', 'stroke-miterlimit', 'stroke-linecap', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-opacity'] :
if styleMap.has_key(uselessStyle):
del styleMap[uselessStyle]
num += 1
# if stroke:none, then remove all stroke-related properties (stroke-width, etc)
# TODO: should also detect if the computed value of this element is stroke="none"
if styleMap.has_key('stroke') and styleMap['stroke'] == 'none' :
for strokestyle in [ 'stroke-width', 'stroke-linejoin', 'stroke-miterlimit',
'stroke-linecap', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-opacity'] :
if styleMap.has_key(strokestyle) :
del styleMap[strokestyle]
num += 1
# TODO: This is actually a problem if a parent element has a specified stroke
# we need to properly calculate computed values
del styleMap['stroke']
# if fill:none, then remove all fill-related properties (fill-rule, etc)
if styleMap.has_key('fill') and styleMap['fill'] == 'none' :
for fillstyle in [ 'fill-rule', 'fill-opacity' ] :
if styleMap.has_key(fillstyle) :
del styleMap[fillstyle]
num += 1
# fill-opacity: 0
if styleMap.has_key('fill-opacity') :
fillOpacity = float(styleMap['fill-opacity'])
if fillOpacity == 0.0 :
for uselessFillStyle in [ 'fill', 'fill-rule' ] :
if styleMap.has_key(uselessFillStyle):
del styleMap[uselessFillStyle]
num += 1
# stroke-opacity: 0
if styleMap.has_key('stroke-opacity') :
strokeOpacity = float(styleMap['stroke-opacity'])
if strokeOpacity == 0.0 :
for uselessStrokeStyle in [ 'stroke', 'stroke-width', 'stroke-linejoin', 'stroke-linecap',
'stroke-dasharray', 'stroke-dashoffset' ] :
if styleMap.has_key(uselessStrokeStyle):
del styleMap[uselessStrokeStyle]
num += 1
# stroke-width: 0
if styleMap.has_key('stroke-width') :
strokeWidth = SVGLength(styleMap['stroke-width'])
if strokeWidth.value == 0.0 :
for uselessStrokeStyle in [ 'stroke', 'stroke-linejoin', 'stroke-linecap',
'stroke-dasharray', 'stroke-dashoffset', 'stroke-opacity' ] :
if styleMap.has_key(uselessStrokeStyle):
del styleMap[uselessStrokeStyle]
num += 1
# remove font properties for non-text elements
# I've actually observed this in real SVG content
if not mayContainTextNodes(node):
for fontstyle in [ 'font-family', 'font-size', 'font-stretch', 'font-size-adjust',
'font-style', 'font-variant', 'font-weight',
'letter-spacing', 'line-height', 'kerning',
'text-align', 'text-anchor', 'text-decoration',
'text-rendering', 'unicode-bidi',
'word-spacing', 'writing-mode'] :
if styleMap.has_key(fontstyle) :
del styleMap[fontstyle]
num += 1
# remove inkscape-specific styles
# TODO: need to get a full list of these
for inkscapeStyle in ['-inkscape-font-specification']:
if styleMap.has_key(inkscapeStyle):
del styleMap[inkscapeStyle]
num += 1
if styleMap.has_key('overflow') :
# overflow specified on element other than svg, marker, pattern
if not node.nodeName in ['svg','marker','pattern']:
del styleMap['overflow']
num += 1
# it is a marker, pattern or svg
# as long as this node is not the document <svg>, then only
# remove overflow='hidden'. See
# http://www.w3.org/TR/2010/WD-SVG11-20100622/masking.html#OverflowProperty
elif node != node.ownerDocument.documentElement:
if styleMap['overflow'] == 'hidden':
del styleMap['overflow']
num += 1
# else if outer svg has a overflow="visible", we can remove it
elif styleMap['overflow'] == 'visible':
del styleMap['overflow']
num += 1
# now if any of the properties match known SVG attributes we prefer attributes
# over style so emit them and remove them from the style map
if options.style_to_xml:
for propName in styleMap.keys() :
if propName in svgAttributes :
node.setAttribute(propName, styleMap[propName])
del styleMap[propName]
_setStyle(node, styleMap)
# recurse for our child elements
for child in node.childNodes :
num += repairStyle(child,options)
return num
def mayContainTextNodes(node):
"""
Returns True if the passed-in node is probably a text element, or at least
one of its descendants is probably a text element.
If False is returned, it is guaranteed that the passed-in node has no
business having text-based attributes.
If True is returned, the passed-in node should not have its text-based
attributes removed.
"""
# Cached result of a prior call?
try:
return node.mayContainTextNodes
except AttributeError:
pass
result = True # Default value
# Comment, text and CDATA nodes don't have attributes and aren't containers
if node.nodeType != 1:
result = False
# Non-SVG elements? Unknown elements!
elif node.namespaceURI != NS['SVG']:
result = True
# Blacklisted elements. Those are guaranteed not to be text elements.
elif node.nodeName in ['rect', 'circle', 'ellipse', 'line', 'polygon',
'polyline', 'path', 'image', 'stop']:
result = False
# Group elements. If we're missing any here, the default of True is used.
elif node.nodeName in ['g', 'clipPath', 'marker', 'mask', 'pattern',
'linearGradient', 'radialGradient', 'symbol']:
result = False
for child in node.childNodes:
if mayContainTextNodes(child):
result = True
# Everything else should be considered a future SVG-version text element
# at best, or an unknown element at worst. result will stay True.
# Cache this result before returning it.
node.mayContainTextNodes = result
return result
def taint(taintedSet, taintedAttribute):
u"""Adds an attribute to a set of attributes.
Related attributes are also included."""
taintedSet.add(taintedAttribute)
if taintedAttribute == 'marker':
taintedSet |= set(['marker-start', 'marker-mid', 'marker-end'])
if taintedAttribute in ['marker-start', 'marker-mid', 'marker-end']:
taintedSet.add('marker')
return taintedSet
def removeDefaultAttributeValues(node, options, tainted=set()):
u"""'tainted' keeps a set of attributes defined in parent nodes.
For such attributes, we don't delete attributes with default values."""
num = 0
if node.nodeType != 1: return 0
# gradientUnits: objectBoundingBox
if node.getAttribute('gradientUnits') == 'objectBoundingBox':
node.removeAttribute('gradientUnits')
num += 1
# spreadMethod: pad
if node.getAttribute('spreadMethod') == 'pad':
node.removeAttribute('spreadMethod')
num += 1
# x1: 0%
if node.getAttribute('x1') != '':
x1 = SVGLength(node.getAttribute('x1'))
if x1.value == 0:
node.removeAttribute('x1')
num += 1
# y1: 0%
if node.getAttribute('y1') != '':
y1 = SVGLength(node.getAttribute('y1'))
if y1.value == 0:
node.removeAttribute('y1')
num += 1
# x2: 100%
if node.getAttribute('x2') != '':
x2 = SVGLength(node.getAttribute('x2'))
if (x2.value == 100 and x2.units == Unit.PCT) or (x2.value == 1 and x2.units == Unit.NONE):
node.removeAttribute('x2')
num += 1
# y2: 0%
if node.getAttribute('y2') != '':
y2 = SVGLength(node.getAttribute('y2'))
if y2.value == 0:
node.removeAttribute('y2')
num += 1
# fx: equal to rx
if node.getAttribute('fx') != '':
if node.getAttribute('fx') == node.getAttribute('cx'):
node.removeAttribute('fx')
num += 1
# fy: equal to ry
if node.getAttribute('fy') != '':
if node.getAttribute('fy') == node.getAttribute('cy'):
node.removeAttribute('fy')
num += 1
# cx: 50%
if node.getAttribute('cx') != '':
cx = SVGLength(node.getAttribute('cx'))
if (cx.value == 50 and cx.units == Unit.PCT) or (cx.value == 0.5 and cx.units == Unit.NONE):
node.removeAttribute('cx')
num += 1
# cy: 50%
if node.getAttribute('cy') != '':
cy = SVGLength(node.getAttribute('cy'))
if (cy.value == 50 and cy.units == Unit.PCT) or (cy.value == 0.5 and cy.units == Unit.NONE):
node.removeAttribute('cy')
num += 1
# r: 50%
if node.getAttribute('r') != '':
r = SVGLength(node.getAttribute('r'))
if (r.value == 50 and r.units == Unit.PCT) or (r.value == 0.5 and r.units == Unit.NONE):
node.removeAttribute('r')
num += 1
# Summarily get rid of some more attributes
attributes = [node.attributes.item(i).nodeName
for i in range(node.attributes.length)]
for attribute in attributes:
if attribute not in tainted:
if attribute in default_attributes.keys():
if node.getAttribute(attribute) == default_attributes[attribute]:
node.removeAttribute(attribute)
num += 1
else:
tainted = taint(tainted, attribute)
# These attributes might also occur as styles
styles = _getStyle(node)
for attribute in styles.keys():
if attribute not in tainted:
if attribute in default_attributes.keys():
if styles[attribute] == default_attributes[attribute]:
del styles[attribute]
num += 1
else:
tainted = taint(tainted, attribute)
_setStyle(node, styles)
# recurse for our child elements
for child in node.childNodes :
num += removeDefaultAttributeValues(child, options, tainted.copy())
return num
rgb = re.compile(r"\s*rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)\s*")
rgbp = re.compile(r"\s*rgb\(\s*(\d*\.?\d+)%\s*,\s*(\d*\.?\d+)%\s*,\s*(\d*\.?\d+)%\s*\)\s*")
def convertColor(value):
"""
Converts the input color string and returns a #RRGGBB (or #RGB if possible) string
"""
s = value
if s in colors.keys():
s = colors[s]
rgbpMatch = rgbp.match(s)
if rgbpMatch != None :
r = int(float(rgbpMatch.group(1)) * 255.0 / 100.0)
g = int(float(rgbpMatch.group(2)) * 255.0 / 100.0)
b = int(float(rgbpMatch.group(3)) * 255.0 / 100.0)
s = '#%02x%02x%02x' % (r, g, b)
else:
rgbMatch = rgb.match(s)
if rgbMatch != None :
r = int( rgbMatch.group(1) )
g = int( rgbMatch.group(2) )
b = int( rgbMatch.group(3) )
s = '#%02x%02x%02x' % (r, g, b)
if s[0] == '#':
s = s.lower()
if len(s)==7 and s[1]==s[2] and s[3]==s[4] and s[5]==s[6]:
s = '#'+s[1]+s[3]+s[5]
return s
def convertColors(element) :
"""
Recursively converts all color properties into #RRGGBB format if shorter
"""
numBytes = 0
if element.nodeType != 1: return 0
# set up list of color attributes for each element type
attrsToConvert = []
if element.nodeName in ['rect', 'circle', 'ellipse', 'polygon', \
'line', 'polyline', 'path', 'g', 'a']:
attrsToConvert = ['fill', 'stroke']
elif element.nodeName in ['stop']:
attrsToConvert = ['stop-color']
elif element.nodeName in ['solidColor']:
attrsToConvert = ['solid-color']
# now convert all the color formats
styles = _getStyle(element)
for attr in attrsToConvert:
oldColorValue = element.getAttribute(attr)
if oldColorValue != '':
newColorValue = convertColor(oldColorValue)
oldBytes = len(oldColorValue)
newBytes = len(newColorValue)
if oldBytes > newBytes:
element.setAttribute(attr, newColorValue)
numBytes += (oldBytes - len(element.getAttribute(attr)))
# colors might also hide in styles
if attr in styles.keys():
oldColorValue = styles[attr]
newColorValue = convertColor(oldColorValue)
oldBytes = len(oldColorValue)
newBytes = len(newColorValue)
if oldBytes > newBytes:
styles[attr] = newColorValue
numBytes += (oldBytes - len(element.getAttribute(attr)))
_setStyle(element, styles)
# now recurse for our child elements
for child in element.childNodes :
numBytes += convertColors(child)
return numBytes
# TODO: go over what this method does and see if there is a way to optimize it
# TODO: go over the performance of this method and see if I can save memory/speed by
# reusing data structures, etc
def cleanPath(element, options) :
"""
Cleans the path string (d attribute) of the element
"""
global numBytesSavedInPathData
global numPathSegmentsReduced
global numCurvesStraightened
# this gets the parser object from svg_regex.py
oldPathStr = element.getAttribute('d')
path = svg_parser.parse(oldPathStr)
# This determines whether the stroke has round linecaps. If it does,
# we do not want to collapse empty segments, as they are actually rendered.
withRoundLineCaps = element.getAttribute('stroke-linecap') == 'round'
# The first command must be a moveto, and whether it's relative (m)
# or absolute (M), the first set of coordinates *is* absolute. So
# the first iteration of the loop below will get x,y and startx,starty.
# convert absolute coordinates into relative ones.
# Reuse the data structure 'path', since we're not adding or removing subcommands.
# Also reuse the coordinate lists since we're not adding or removing any.
for pathIndex in xrange(0, len(path)):
cmd, data = path[pathIndex] # Changes to cmd don't get through to the data structure
i = 0
# adjust abs to rel
# only the A command has some values that we don't want to adjust (radii, rotation, flags)
if cmd == 'A':
for i in xrange(i, len(data), 7):
data[i+5] -= x
data[i+6] -= y
x += data[i+5]
y += data[i+6]
path[pathIndex] = ('a', data)
elif cmd == 'a':
x += sum(data[5::7])
y += sum(data[6::7])
elif cmd == 'H':
for i in xrange(i, len(data)):
data[i] -= x
x += data[i]
path[pathIndex] = ('h', data)
elif cmd == 'h':
x += sum(data)
elif cmd == 'V':
for i in xrange(i, len(data)):
data[i] -= y
y += data[i]
path[pathIndex] = ('v', data)
elif cmd == 'v':
y += sum(data)
elif cmd == 'M':
startx, starty = data[0], data[1]
# If this is a path starter, don't convert its first
# coordinate to relative; that would just make it (0, 0)
if pathIndex != 0:
data[0] -= x
data[1] -= y
x, y = startx, starty
i = 2
for i in xrange(i, len(data), 2):
data[i] -= x
data[i+1] -= y
x += data[i]
y += data[i+1]
path[pathIndex] = ('m', data)
elif cmd in ['L','T']:
for i in xrange(i, len(data), 2):
data[i] -= x
data[i+1] -= y
x += data[i]
y += data[i+1]
path[pathIndex] = (cmd.lower(), data)
elif cmd in ['m']:
if pathIndex == 0:
# START OF PATH - this is an absolute moveto
# followed by relative linetos
startx, starty = data[0], data[1]
x, y = startx, starty
i = 2
else:
startx = x + data[0]
starty = y + data[1]
for i in xrange(i, len(data), 2):
x += data[i]
y += data[i+1]
elif cmd in ['l','t']:
x += sum(data[0::2])
y += sum(data[1::2])
elif cmd in ['S','Q']:
for i in xrange(i, len(data), 4):
data[i] -= x
data[i+1] -= y
data[i+2] -= x
data[i+3] -= y
x += data[i+2]
y += data[i+3]
path[pathIndex] = (cmd.lower(), data)
elif cmd in ['s','q']:
x += sum(data[2::4])
y += sum(data[3::4])
elif cmd == 'C':
for i in xrange(i, len(data), 6):
data[i] -= x
data[i+1] -= y
data[i+2] -= x
data[i+3] -= y
data[i+4] -= x
data[i+5] -= y
x += data[i+4]
y += data[i+5]
path[pathIndex] = ('c', data)
elif cmd == 'c':
x += sum(data[4::6])
y += sum(data[5::6])
elif cmd in ['z','Z']:
x, y = startx, starty
path[pathIndex] = ('z', data)
# remove empty segments
# Reuse the data structure 'path' and the coordinate lists, even if we're
# deleting items, because these deletions are relatively cheap.
if not withRoundLineCaps:
for pathIndex in xrange(0, len(path)):
cmd, data = path[pathIndex]
i = 0
if cmd in ['m','l','t']:
if cmd == 'm':
# remove m0,0 segments
if pathIndex > 0 and data[0] == data[i+1] == 0:
# 'm0,0 x,y' can be replaces with 'lx,y',
# except the first m which is a required absolute moveto
path[pathIndex] = ('l', data[2:])
numPathSegmentsReduced += 1
else: # else skip move coordinate
i = 2
while i < len(data):
if data[i] == data[i+1] == 0:
del data[i:i+2]
numPathSegmentsReduced += 1
else:
i += 2
elif cmd == 'c':
while i < len(data):
if data[i] == data[i+1] == data[i+2] == data[i+3] == data[i+4] == data[i+5] == 0:
del data[i:i+6]
numPathSegmentsReduced += 1
else:
i += 6
elif cmd == 'a':
while i < len(data):
if data[i+5] == data[i+6] == 0:
del data[i:i+7]
numPathSegmentsReduced += 1
else:
i += 7
elif cmd == 'q':
while i < len(data):
if data[i] == data[i+1] == data[i+2] == data[i+3] == 0:
del data[i:i+4]
numPathSegmentsReduced += 1
else:
i += 4
elif cmd in ['h','v']:
oldLen = len(data)
path[pathIndex] = (cmd, [coord for coord in data if coord != 0])
numPathSegmentsReduced += len(path[pathIndex][1]) - oldLen
# fixup: Delete subcommands having no coordinates.
path = [elem for elem in path if len(elem[1]) > 0 or elem[0] == 'z']
# convert straight curves into lines
newPath = [path[0]]
for (cmd,data) in path[1:]:
i = 0
newData = data
if cmd == 'c':
newData = []
while i < len(data):
# since all commands are now relative, we can think of previous point as (0,0)
# and new point (dx,dy) is (data[i+4],data[i+5])
# eqn of line will be y = (dy/dx)*x or if dx=0 then eqn of line is x=0
(p1x,p1y) = (data[i],data[i+1])
(p2x,p2y) = (data[i+2],data[i+3])
dx = data[i+4]
dy = data[i+5]
foundStraightCurve = False
if dx == 0:
if p1x == 0 and p2x == 0:
foundStraightCurve = True
else:
m = dy/dx
if p1y == m*p1x and p2y == m*p2x:
foundStraightCurve = True
if foundStraightCurve:
# flush any existing curve coords first
if newData:
newPath.append( (cmd,newData) )
newData = []
# now create a straight line segment
newPath.append( ('l', [dx,dy]) )
numCurvesStraightened += 1
else:
newData.extend(data[i:i+6])
i += 6
if newData or cmd == 'z' or cmd == 'Z':
newPath.append( (cmd,newData) )
path = newPath
# collapse all consecutive commands of the same type into one command
prevCmd = ''
prevData = []
newPath = []
for (cmd,data) in path:
# flush the previous command if it is not the same type as the current command
if prevCmd != '':
if cmd != prevCmd or cmd == 'm':
newPath.append( (prevCmd, prevData) )
prevCmd = ''
prevData = []
# if the previous and current commands are the same type,
# or the previous command is moveto and the current is lineto, collapse,
# but only if they are not move commands (since move can contain implicit lineto commands)
if (cmd == prevCmd or (cmd == 'l' and prevCmd == 'm')) and cmd != 'm':
prevData.extend(data)
# save last command and data
else:
prevCmd = cmd
prevData = data
# flush last command and data
if prevCmd != '':
newPath.append( (prevCmd, prevData) )
path = newPath
# convert to shorthand path segments where possible
newPath = []
for (cmd,data) in path:
# convert line segments into h,v where possible
if cmd == 'l':
i = 0
lineTuples = []
while i < len(data):
if data[i] == 0:
# vertical
if lineTuples:
# flush the existing line command
newPath.append( ('l', lineTuples) )
lineTuples = []
# append the v and then the remaining line coords
newPath.append( ('v', [data[i+1]]) )
numPathSegmentsReduced += 1
elif data[i+1] == 0:
if lineTuples:
# flush the line command, then append the h and then the remaining line coords
newPath.append( ('l', lineTuples) )
lineTuples = []
newPath.append( ('h', [data[i]]) )
numPathSegmentsReduced += 1
else:
lineTuples.extend(data[i:i+2])
i += 2
if lineTuples:
newPath.append( ('l', lineTuples) )
# also handle implied relative linetos
elif cmd == 'm':
i = 2
lineTuples = [data[0], data[1]]
while i < len(data):
if data[i] == 0:
# vertical
if lineTuples:
# flush the existing m/l command
newPath.append( (cmd, lineTuples) )
lineTuples = []
cmd = 'l' # dealing with linetos now
# append the v and then the remaining line coords
newPath.append( ('v', [data[i+1]]) )
numPathSegmentsReduced += 1
elif data[i+1] == 0:
if lineTuples:
# flush the m/l command, then append the h and then the remaining line coords
newPath.append( (cmd, lineTuples) )
lineTuples = []
cmd = 'l' # dealing with linetos now
newPath.append( ('h', [data[i]]) )
numPathSegmentsReduced += 1
else:
lineTuples.extend(data[i:i+2])
i += 2
if lineTuples:
newPath.append( (cmd, lineTuples) )
# convert Bézier curve segments into s where possible
elif cmd == 'c':
bez_ctl_pt = (0,0)
i = 0
curveTuples = []
while i < len(data):
# rotate by 180deg means negate both coordinates
# if the previous control point is equal then we can substitute a
# shorthand bezier command
if bez_ctl_pt[0] == data[i] and bez_ctl_pt[1] == data[i+1]:
if curveTuples:
newPath.append( ('c', curveTuples) )
curveTuples = []
# append the s command
newPath.append( ('s', [data[i+2], data[i+3], data[i+4], data[i+5]]) )
numPathSegmentsReduced += 1
else:
j = 0
while j <= 5:
curveTuples.append(data[i+j])
j += 1
# set up control point for next curve segment
bez_ctl_pt = (data[i+4]-data[i+2], data[i+5]-data[i+3])
i += 6
if curveTuples:
newPath.append( ('c', curveTuples) )
# convert quadratic curve segments into t where possible
elif cmd == 'q':
quad_ctl_pt = (0,0)
i = 0
curveTuples = []
while i < len(data):
if quad_ctl_pt[0] == data[i] and quad_ctl_pt[1] == data[i+1]:
if curveTuples:
newPath.append( ('q', curveTuples) )
curveTuples = []
# append the t command
newPath.append( ('t', [data[i+2], data[i+3]]) )
numPathSegmentsReduced += 1
else:
j = 0;
while j <= 3:
curveTuples.append(data[i+j])
j += 1
quad_ctl_pt = (data[i+2]-data[i], data[i+3]-data[i+1])
i += 4
if curveTuples:
newPath.append( ('q', curveTuples) )
else:
newPath.append( (cmd, data) )
path = newPath
# for each h or v, collapse unnecessary coordinates that run in the same direction
# i.e. "h-100-100" becomes "h-200" but "h300-100" does not change
# Reuse the data structure 'path', since we're not adding or removing subcommands.
# Also reuse the coordinate lists, even if we're deleting items, because these
# deletions are relatively cheap.
for pathIndex in xrange(1, len(path)):
cmd, data = path[pathIndex]
if cmd in ['h','v'] and len(data) > 1:
coordIndex = 1
while coordIndex < len(data):
if isSameSign(data[coordIndex - 1], data[coordIndex]):
data[coordIndex - 1] += data[coordIndex]
del data[coordIndex]
numPathSegmentsReduced += 1
else:
coordIndex += 1
# it is possible that we have consecutive h, v, c, t commands now
# so again collapse all consecutive commands of the same type into one command
prevCmd = ''
prevData = []
newPath = [path[0]]
for (cmd,data) in path[1:]:
# flush the previous command if it is not the same type as the current command
if prevCmd != '':
if cmd != prevCmd or cmd == 'm':
newPath.append( (prevCmd, prevData) )
prevCmd = ''
prevData = []
# if the previous and current commands are the same type, collapse
if cmd == prevCmd and cmd != 'm':
prevData.extend(data)
# save last command and data
else:
prevCmd = cmd
prevData = data
# flush last command and data
if prevCmd != '':
newPath.append( (prevCmd, prevData) )
path = newPath
newPathStr = serializePath(path, options)
numBytesSavedInPathData += ( len(oldPathStr) - len(newPathStr) )
element.setAttribute('d', newPathStr)
def parseListOfPoints(s):
"""
Parse string into a list of points.
Returns a list of containing an even number of coordinate strings
"""
i = 0
# (wsp)? comma-or-wsp-separated coordinate pairs (wsp)?
# coordinate-pair = coordinate comma-or-wsp coordinate
# coordinate = sign? integer
# comma-wsp: (wsp+ comma? wsp*) | (comma wsp*)
ws_nums = re.split(r"\s*,?\s*", s.strip())
nums = []
# also, if 100-100 is found, split it into two also
# <polygon points="100,-100,100-100,100-100-100,-100-100" />
for i in xrange(len(ws_nums)):
negcoords = ws_nums[i].split("-")
# this string didn't have any negative coordinates
if len(negcoords) == 1:
nums.append(negcoords[0])
# we got negative coords
else:
for j in xrange(len(negcoords)):
# first number could be positive
if j == 0:
if negcoords[0] != '':
nums.append(negcoords[0])
# otherwise all other strings will be negative
else:
# unless we accidentally split a number that was in scientific notation
# and had a negative exponent (500.00e-1)
prev = nums[len(nums)-1]
if prev[len(prev)-1] in ['e', 'E']:
nums[len(nums)-1] = prev + '-' + negcoords[j]
else:
nums.append( '-'+negcoords[j] )
# if we have an odd number of points, return empty
if len(nums) % 2 != 0: return []
# now resolve into Decimal values
i = 0
while i < len(nums):
try:
nums[i] = getcontext().create_decimal(nums[i])
nums[i + 1] = getcontext().create_decimal(nums[i + 1])
except decimal.InvalidOperation: # one of the lengths had a unit or is an invalid number
return []
i += 2
return nums
def cleanPolygon(elem, options):
"""
Remove unnecessary closing point of polygon points attribute
"""
global numPointsRemovedFromPolygon
pts = parseListOfPoints(elem.getAttribute('points'))
N = len(pts)/2
if N >= 2:
(startx,starty) = pts[:2]
(endx,endy) = pts[-2:]
if startx == endx and starty == endy:
del pts[-2:]
numPointsRemovedFromPolygon += 1
elem.setAttribute('points', scourCoordinates(pts, options, True))
def cleanPolyline(elem, options):
"""
Scour the polyline points attribute
"""
pts = parseListOfPoints(elem.getAttribute('points'))
elem.setAttribute('points', scourCoordinates(pts, options, True))
def serializePath(pathObj, options):
"""
Reserializes the path data with some cleanups.
"""
# elliptical arc commands must have comma/wsp separating the coordinates
# this fixes an issue outlined in Fix https://bugs.launchpad.net/scour/+bug/412754
return ''.join([cmd + scourCoordinates(data, options, (cmd == 'a')) for cmd, data in pathObj])
def serializeTransform(transformObj):
"""
Reserializes the transform data with some cleanups.
"""
return ' '.join(
[command + '(' + ' '.join(
[scourUnitlessLength(number) for number in numbers]
) + ')'
for command, numbers in transformObj]
)
def scourCoordinates(data, options, forceCommaWsp = False):
"""
Serializes coordinate data with some cleanups:
- removes all trailing zeros after the decimal
- integerize coordinates if possible
- removes extraneous whitespace
- adds spaces between values in a subcommand if required (or if forceCommaWsp is True)
"""
if data != None:
newData = []
c = 0
previousCoord = ''
for coord in data:
scouredCoord = scourUnitlessLength(coord, needsRendererWorkaround=options.renderer_workaround)
# only need the comma if the current number starts with a digit
# (numbers can start with - without needing a comma before)
# or if forceCommaWsp is True
# or if this number starts with a dot and the previous number
# had *no* dot or exponent (so we can go like -5.5.5 for -5.5,0.5
# and 4e4.5 for 40000,0.5)
if c > 0 and (forceCommaWsp
or scouredCoord[0].isdigit()
or (scouredCoord[0] == '.' and not ('.' in previousCoord or 'e' in previousCoord))
):
newData.append( ' ' )
# add the scoured coordinate to the path string
newData.append( scouredCoord )
previousCoord = scouredCoord
c += 1
# What we need to do to work around GNOME bugs 548494, 563933 and
# 620565, which are being fixed and unfixed in Ubuntu, is
# to make sure that a dot doesn't immediately follow a command
# (so 'h50' and 'h0.5' are allowed, but not 'h.5').
# Then, we need to add a space character after any coordinates
# having an 'e' (scientific notation), so as to have the exponent
# separate from the next number.
if options.renderer_workaround:
if len(newData) > 0:
for i in xrange(1, len(newData)):
if newData[i][0] == '-' and 'e' in newData[i - 1]:
newData[i - 1] += ' '
return ''.join(newData)
else:
return ''.join(newData)
return ''
def scourLength(length):
"""
Scours a length. Accepts units.
"""
length = SVGLength(length)
return scourUnitlessLength(length.value) + Unit.str(length.units)
def scourUnitlessLength(length, needsRendererWorkaround=False): # length is of a numeric type
"""
Scours the numeric part of a length only. Does not accept units.
This is faster than scourLength on elements guaranteed not to
contain units.
"""
# reduce to the proper number of digits
if not isinstance(length, Decimal):
length = getcontext().create_decimal(str(length))
# if the value is an integer, it may still have .0[...] attached to it for some reason
# remove those
if int(length) == length:
length = getcontext().create_decimal(int(length))
# gather the non-scientific notation version of the coordinate.
# this may actually be in scientific notation if the value is
# sufficiently large or small, so this is a misnomer.
nonsci = unicode(length).lower().replace("e+", "e")
if not needsRendererWorkaround:
if len(nonsci) > 2 and nonsci[:2] == '0.':
nonsci = nonsci[1:] # remove the 0, leave the dot
elif len(nonsci) > 3 and nonsci[:3] == '-0.':
nonsci = '-' + nonsci[2:] # remove the 0, leave the minus and dot
if len(nonsci) > 3: # avoid calling normalize unless strictly necessary
# and then the scientific notation version, with E+NUMBER replaced with
# just eNUMBER, since SVG accepts this.
sci = unicode(length.normalize()).lower().replace("e+", "e")
if len(sci) < len(nonsci): return sci
else: return nonsci
else: return nonsci
def reducePrecision(element) :
"""
Because opacities, letter spacings, stroke widths and all that don't need
to be preserved in SVG files with 9 digits of precision.
Takes all of these attributes, in the given element node and its children,
and reduces their precision to the current Decimal context's precision.
Also checks for the attributes actually being lengths, not 'inherit', 'none'
or anything that isn't an SVGLength.
Returns the number of bytes saved after performing these reductions.
"""
num = 0
styles = _getStyle(element)
for lengthAttr in ['opacity', 'flood-opacity', 'fill-opacity',
'stroke-opacity', 'stop-opacity', 'stroke-miterlimit',
'stroke-dashoffset', 'letter-spacing', 'word-spacing',
'kerning', 'font-size-adjust', 'font-size',
'stroke-width']:
val = element.getAttribute(lengthAttr)
if val != '':
valLen = SVGLength(val)
if valLen.units != Unit.INVALID: # not an absolute/relative size or inherit, can be % though
newVal = scourLength(val)
if len(newVal) < len(val):
num += len(val) - len(newVal)
element.setAttribute(lengthAttr, newVal)
# repeat for attributes hidden in styles
if lengthAttr in styles.keys():
val = styles[lengthAttr]
valLen = SVGLength(val)
if valLen.units != Unit.INVALID:
newVal = scourLength(val)
if len(newVal) < len(val):
num += len(val) - len(newVal)
styles[lengthAttr] = newVal
_setStyle(element, styles)
for child in element.childNodes:
if child.nodeType == 1:
num += reducePrecision(child)
return num
def optimizeAngle(angle):
"""
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
"""
# First, we put the new angle in the range ]-360, 360[.
# The modulo operator yields results with the sign of the
# divisor, so for negative dividends, we preserve the sign
# of the angle.
if angle < 0: angle %= -360
else: angle %= 360
# 720 degrees is unneccessary, as 360 covers all angles.
# As "-x" is shorter than "35x" and "-xxx" one character
# longer than positive angles <= 260, we constrain angle
# range to [-90, 270[ (or, equally valid: ]-100, 260]).
if angle >= 270: angle -= 360
elif angle < -90: angle += 360
return angle
def optimizeTransform(transform):
"""
Optimises a series of transformations parsed from a single
transform="" attribute.
The transformation list is modified in-place.
"""
# FIXME: reordering these would optimize even more cases:
# first: Fold consecutive runs of the same transformation
# extra: Attempt to cast between types to create sameness:
# "matrix(0 1 -1 0 0 0) rotate(180) scale(-1)" all
# are rotations (90, 180, 180) -- thus "rotate(90)"
# second: Simplify transforms where numbers are optional.
# third: Attempt to simplify any single remaining matrix()
#
# if there's only one transformation and it's a matrix,
# try to make it a shorter non-matrix transformation
# NOTE: as matrix(a b c d e f) in SVG means the matrix:
# |¯ a c e ¯| make constants |¯ A1 A2 A3 ¯|
# | b d f | translating them | B1 B2 B3 |
# |_ 0 0 1 _| to more readable |_ 0 0 1 _|
if len(transform) == 1 and transform[0][0] == 'matrix':
matrix = A1, B1, A2, B2, A3, B3 = transform[0][1]
# |¯ 1 0 0 ¯|
# | 0 1 0 | Identity matrix (no transformation)
# |_ 0 0 1 _|
if matrix == [1, 0, 0, 1, 0, 0]:
del transform[0]
# |¯ 1 0 X ¯|
# | 0 1 Y | Translation by (X, Y).
# |_ 0 0 1 _|
elif (A1 == 1 and A2 == 0
and B1 == 0 and B2 == 1):
transform[0] = ('translate', [A3, B3])
# |¯ X 0 0 ¯|
# | 0 Y 0 | Scaling by (X, Y).
# |_ 0 0 1 _|
elif ( A2 == 0 and A3 == 0
and B1 == 0 and B3 == 0):
transform[0] = ('scale', [A1, B2])
# |¯ cos(A) -sin(A) 0 ¯| Rotation by angle A,
# | sin(A) cos(A) 0 | clockwise, about the origin.
# |_ 0 0 1 _| A is in degrees, [-180...180].
elif (A1 == B2 and -1 <= A1 <= 1 and A3 == 0
and -B1 == A2 and -1 <= B1 <= 1 and B3 == 0
# as cos² A + sin² A == 1 and as decimal trig is approximate:
# FIXME: the "epsilon" term here should really be some function
# of the precision of the (sin|cos)_A terms, not 1e-15:
and abs((B1 ** 2) + (A1 ** 2) - 1) < Decimal("1e-15")):
sin_A, cos_A = B1, A1
# while asin(A) and acos(A) both only have an 180° range
# the sign of sin(A) and cos(A) varies across quadrants,
# letting us hone in on the angle the matrix represents:
# -- => < -90 | -+ => -90..0 | ++ => 0..90 | +- => >= 90
#
# http://en.wikipedia.org/wiki/File:Sine_cosine_plot.svg
# shows asin has the correct angle the middle quadrants:
A = Decimal(str(math.degrees(math.asin(float(sin_A)))))
if cos_A < 0: # otherwise needs adjusting from the edges
if sin_A < 0:
A = -180 - A
else:
A = 180 - A
transform[0] = ('rotate', [A])
# Simplify transformations where numbers are optional.
for type, args in transform:
if type == 'translate':
# Only the X coordinate is required for translations.
# If the Y coordinate is unspecified, it's 0.
if len(args) == 2 and args[1] == 0:
del args[1]
elif type == 'rotate':
args[0] = optimizeAngle(args[0]) # angle
# Only the angle is required for rotations.
# If the coordinates are unspecified, it's the origin (0, 0).
if len(args) == 3 and args[1] == args[2] == 0:
del args[1:]
elif type == 'scale':
# Only the X scaling factor is required.
# If the Y factor is unspecified, it's the same as X.
if len(args) == 2 and args[0] == args[1]:
del args[1]
# Attempt to coalesce runs of the same transformation.
# Translations followed immediately by other translations,
# rotations followed immediately by other rotations,
# scaling followed immediately by other scaling,
# are safe to add.
# Identity skewX/skewY are safe to remove, but how do they accrete?
# |¯ 1 0 0 ¯|
# | tan(A) 1 0 | skews X coordinates by angle A
# |_ 0 0 1 _|
#
# |¯ 1 tan(A) 0 ¯|
# | 0 1 0 | skews Y coordinates by angle A
# |_ 0 0 1 _|
#
# FIXME: A matrix followed immediately by another matrix
# would be safe to multiply together, too.
i = 1
while i < len(transform):
currType, currArgs = transform[i]
prevType, prevArgs = transform[i - 1]
if currType == prevType == 'translate':
prevArgs[0] += currArgs[0] # x
# for y, only add if the second translation has an explicit y
if len(currArgs) == 2:
if len(prevArgs) == 2:
prevArgs[1] += currArgs[1] # y
elif len(prevArgs) == 1:
prevArgs.append(currArgs[1]) # y
del transform[i]
if prevArgs[0] == prevArgs[1] == 0:
# Identity translation!
i -= 1
del transform[i]
elif (currType == prevType == 'rotate'
and len(prevArgs) == len(currArgs) == 1):
# Only coalesce if both rotations are from the origin.
prevArgs[0] = optimizeAngle(prevArgs[0] + currArgs[0])
del transform[i]
elif currType == prevType == 'scale':
prevArgs[0] *= currArgs[0] # x
# handle an implicit y
if len(prevArgs) == 2 and len(currArgs) == 2:
# y1 * y2
prevArgs[1] *= currArgs[1]
elif len(prevArgs) == 1 and len(currArgs) == 2:
# create y2 = uniformscalefactor1 * y2
prevArgs.append(prevArgs[0] * currArgs[1])
elif len(prevArgs) == 2 and len(currArgs) == 1:
# y1 * uniformscalefactor2
prevArgs[1] *= currArgs[0]
del transform[i]
if prevArgs[0] == prevArgs[1] == 1:
# Identity scale!
i -= 1
del transform[i]
else:
i += 1
# Some fixups are needed for single-element transformation lists, since
# the loop above was to coalesce elements with their predecessors in the
# list, and thus it required 2 elements.
i = 0
while i < len(transform):
currType, currArgs = transform[i]
if ((currType == 'skewX' or currType == 'skewY')
and len(currArgs) == 1 and currArgs[0] == 0):
# Identity skew!
del transform[i]
elif ((currType == 'rotate')
and len(currArgs) == 1 and currArgs[0] == 0):
# Identity rotation!
del transform[i]
else:
i += 1
def optimizeTransforms(element, options) :
"""
Attempts to optimise transform specifications on the given node and its children.
Returns the number of bytes saved after performing these reductions.
"""
num = 0
for transformAttr in ['transform', 'patternTransform', 'gradientTransform']:
val = element.getAttribute(transformAttr)
if val != '':
transform = svg_transform_parser.parse(val)
optimizeTransform(transform)
newVal = serializeTransform(transform)
if len(newVal) < len(val):
if len(newVal):
element.setAttribute(transformAttr, newVal)
else:
element.removeAttribute(transformAttr)
num += len(val) - len(newVal)
for child in element.childNodes:
if child.nodeType == 1:
num += optimizeTransforms(child, options)
return num
def removeComments(element) :
"""
Removes comments from the element and its children.
"""
global numCommentBytes
if isinstance(element, xml.dom.minidom.Document):
# must process the document object separately, because its
# documentElement's nodes have None as their parentNode
for subelement in element.childNodes:
if isinstance(element, xml.dom.minidom.Comment):
numCommentBytes += len(element.data)
element.documentElement.removeChild(subelement)
else:
removeComments(subelement)
elif isinstance(element, xml.dom.minidom.Comment):
numCommentBytes += len(element.data)
element.parentNode.removeChild(element)
else:
for subelement in element.childNodes:
removeComments(subelement)
def embedRasters(element, options) :
import base64
import urllib
"""
Converts raster references to inline images.
NOTE: there are size limits to base64-encoding handling in browsers
"""
global numRastersEmbedded
href = element.getAttributeNS(NS['XLINK'],'href')
# if xlink:href is set, then grab the id
if href != '' and len(href) > 1:
# find if href value has filename ext
ext = os.path.splitext(os.path.basename(href))[1].lower()[1:]
# look for 'png', 'jpg', and 'gif' extensions
if ext == 'png' or ext == 'jpg' or ext == 'gif':
# file:// URLs denote files on the local system too
if href[:7] == 'file://':
href = href[7:]
# does the file exist?
if os.path.isfile(href):
# if this is not an absolute path, set path relative
# to script file based on input arg
infilename = '.'
if options.infilename: infilename = options.infilename
href = os.path.join(os.path.dirname(infilename), href)
rasterdata = ''
# test if file exists locally
if os.path.isfile(href):
# open raster file as raw binary
raster = open( href, "rb")
rasterdata = raster.read()
elif href[:7] == 'http://':
webFile = urllib.urlopen( href )
rasterdata = webFile.read()
webFile.close()
# ... should we remove all images which don't resolve?
if rasterdata != '' :
# base64-encode raster
b64eRaster = base64.b64encode( rasterdata )
# set href attribute to base64-encoded equivalent
if b64eRaster != '':
# PNG and GIF both have MIME Type 'image/[ext]', but
# JPEG has MIME Type 'image/jpeg'
if ext == 'jpg':
ext = 'jpeg'
element.setAttributeNS(NS['XLINK'], 'href', 'data:image/' + ext + ';base64,' + b64eRaster)
numRastersEmbedded += 1
del b64eRaster
def properlySizeDoc(docElement, options):
# get doc width and height
w = SVGLength(docElement.getAttribute('width'))
h = SVGLength(docElement.getAttribute('height'))
# if width/height are not unitless or px then it is not ok to rewrite them into a viewBox.
# well, it may be OK for Web browsers and vector editors, but not for librsvg.
if options.renderer_workaround:
if ((w.units != Unit.NONE and w.units != Unit.PX) or
(h.units != Unit.NONE and h.units != Unit.PX)):
return
# else we have a statically sized image and we should try to remedy that
# parse viewBox attribute
vbSep = re.split("\\s*\\,?\\s*", docElement.getAttribute('viewBox'), 3)
# if we have a valid viewBox we need to check it
vbWidth,vbHeight = 0,0
if len(vbSep) == 4:
try:
# if x or y are specified and non-zero then it is not ok to overwrite it
vbX = float(vbSep[0])
vbY = float(vbSep[1])
if vbX != 0 or vbY != 0:
return
# if width or height are not equal to doc width/height then it is not ok to overwrite it
vbWidth = float(vbSep[2])
vbHeight = float(vbSep[3])
if vbWidth != w.value or vbHeight != h.value:
return
# if the viewBox did not parse properly it is invalid and ok to overwrite it
except ValueError:
pass
# at this point it's safe to set the viewBox and remove width/height
docElement.setAttribute('viewBox', '0 0 %s %s' % (w.value, h.value))
docElement.removeAttribute('width')
docElement.removeAttribute('height')
def remapNamespacePrefix(node, oldprefix, newprefix):
if node == None or node.nodeType != 1: return
if node.prefix == oldprefix:
localName = node.localName
namespace = node.namespaceURI
doc = node.ownerDocument
parent = node.parentNode
# create a replacement node
newNode = None
if newprefix != '':
newNode = doc.createElementNS(namespace, newprefix+":"+localName)
else:
newNode = doc.createElement(localName);
# add all the attributes
attrList = node.attributes
for i in xrange(attrList.length):
attr = attrList.item(i)
newNode.setAttributeNS( attr.namespaceURI, attr.localName, attr.nodeValue)
# clone and add all the child nodes
for child in node.childNodes:
newNode.appendChild(child.cloneNode(True))
# replace old node with new node
parent.replaceChild( newNode, node )
# set the node to the new node in the remapped namespace prefix
node = newNode
# now do all child nodes
for child in node.childNodes :
remapNamespacePrefix(child, oldprefix, newprefix)
def makeWellFormed(str):
xml_ents = { '<':'<', '>':'>', '&':'&', "'":''', '"':'"'}
# starr = []
# for c in str:
# if c in xml_ents:
# starr.append(xml_ents[c])
# else:
# starr.append(c)
# this list comprehension is short-form for the above for-loop:
return ''.join([xml_ents[c] if c in xml_ents else c for c in str])
# hand-rolled serialization function that has the following benefits:
# - pretty printing
# - somewhat judicious use of whitespace
# - ensure id attributes are first
def serializeXML(element, options, ind = 0, preserveWhitespace = False):
outParts = []
indent = ind
I=''
if options.indent_type == 'tab': I='\t'
elif options.indent_type == 'space': I=' '
outParts.extend([(I * ind), '<', element.nodeName])
# always serialize the id or xml:id attributes first
if element.getAttribute('id') != '':
id = element.getAttribute('id')
quot = '"'
if id.find('"') != -1:
quot = "'"
outParts.extend([' id=', quot, id, quot])
if element.getAttribute('xml:id') != '':
id = element.getAttribute('xml:id')
quot = '"'
if id.find('"') != -1:
quot = "'"
outParts.extend([' xml:id=', quot, id, quot])
# now serialize the other attributes
attrList = element.attributes
for num in xrange(attrList.length) :
attr = attrList.item(num)
if attr.nodeName == 'id' or attr.nodeName == 'xml:id': continue
# if the attribute value contains a double-quote, use single-quotes
quot = '"'
if attr.nodeValue.find('"') != -1:
quot = "'"
attrValue = makeWellFormed( attr.nodeValue )
outParts.append(' ')
# preserve xmlns: if it is a namespace prefix declaration
if attr.prefix != None:
outParts.extend([attr.prefix, ':'])
elif attr.namespaceURI != None:
if attr.namespaceURI == 'http://www.w3.org/2000/xmlns/' and attr.nodeName.find('xmlns') == -1:
outParts.append('xmlns:')
elif attr.namespaceURI == 'http://www.w3.org/1999/xlink':
outParts.append('xlink:')
outParts.extend([attr.localName, '=', quot, attrValue, quot])
if attr.nodeName == 'xml:space':
if attrValue == 'preserve':
preserveWhitespace = True
elif attrValue == 'default':
preserveWhitespace = False
# if no children, self-close
children = element.childNodes
if children.length > 0:
outParts.append('>')
onNewLine = False
for child in element.childNodes:
# element node
if child.nodeType == 1:
if preserveWhitespace:
outParts.append(serializeXML(child, options, 0, preserveWhitespace))
else:
outParts.extend(['\n', serializeXML(child, options, indent + 1, preserveWhitespace)])
onNewLine = True
# text node
elif child.nodeType == 3:
# trim it only in the case of not being a child of an element
# where whitespace might be important
if preserveWhitespace:
outParts.append(makeWellFormed(child.nodeValue))
else:
outParts.append(makeWellFormed(child.nodeValue.strip()))
# CDATA node
elif child.nodeType == 4:
outParts.extend(['<![CDATA[', child.nodeValue, ']]>'])
# Comment node
elif child.nodeType == 8:
outParts.extend(['<!--', child.nodeValue, '-->'])
# TODO: entities, processing instructions, what else?
else: # ignore the rest
pass
if onNewLine: outParts.append(I * ind)
outParts.extend(['</', element.nodeName, '>'])
if indent > 0: outParts.append('\n')
else:
outParts.append('/>')
if indent > 0: outParts.append('\n')
return "".join(outParts)
# this is the main method
# input is a string representation of the input XML
# returns a string representation of the output XML
def scourString(in_string, options=None):
if options is None:
options = _options_parser.get_default_values()
getcontext().prec = options.digits
global numAttrsRemoved
global numStylePropsFixed
global numElemsRemoved
global numBytesSavedInColors
global numCommentsRemoved
global numBytesSavedInIDs
global numBytesSavedInLengths
global numBytesSavedInTransforms
doc = xml.dom.minidom.parseString(in_string)
# for whatever reason this does not always remove all inkscape/sodipodi attributes/elements
# on the first pass, so we do it multiple times
# does it have to do with removal of children affecting the childlist?
if options.keep_editor_data == False:
while removeNamespacedElements( doc.documentElement, unwanted_ns ) > 0 :
pass
while removeNamespacedAttributes( doc.documentElement, unwanted_ns ) > 0 :
pass
# remove the xmlns: declarations now
xmlnsDeclsToRemove = []
attrList = doc.documentElement.attributes
for num in xrange(attrList.length) :
if attrList.item(num).nodeValue in unwanted_ns :
xmlnsDeclsToRemove.append(attrList.item(num).nodeName)
for attr in xmlnsDeclsToRemove :
doc.documentElement.removeAttribute(attr)
numAttrsRemoved += 1
# ensure namespace for SVG is declared
# TODO: what if the default namespace is something else (i.e. some valid namespace)?
if doc.documentElement.getAttribute('xmlns') != 'http://www.w3.org/2000/svg':
doc.documentElement.setAttribute('xmlns', 'http://www.w3.org/2000/svg')
# TODO: throw error or warning?
# check for redundant SVG namespace declaration
attrList = doc.documentElement.attributes
xmlnsDeclsToRemove = []
redundantPrefixes = []
for i in xrange(attrList.length):
attr = attrList.item(i)
name = attr.nodeName
val = attr.nodeValue
if name[0:6] == 'xmlns:' and val == 'http://www.w3.org/2000/svg':
redundantPrefixes.append(name[6:])
xmlnsDeclsToRemove.append(name)
for attrName in xmlnsDeclsToRemove:
doc.documentElement.removeAttribute(attrName)
for prefix in redundantPrefixes:
remapNamespacePrefix(doc.documentElement, prefix, '')
if options.strip_comments:
numCommentsRemoved = removeComments(doc)
# repair style (remove unnecessary style properties and change them into XML attributes)
numStylePropsFixed = repairStyle(doc.documentElement, options)
# convert colors to #RRGGBB format
if options.simple_colors:
numBytesSavedInColors = convertColors(doc.documentElement)
# remove <metadata> if the user wants to
if options.remove_metadata:
removeMetadataElements(doc)
# remove unreferenced gradients/patterns outside of defs
# and most unreferenced elements inside of defs
while removeUnreferencedElements(doc) > 0:
pass
# remove empty defs, metadata, g
# NOTE: these elements will be removed if they just have whitespace-only text nodes
for tag in ['defs', 'metadata', 'g'] :
for elem in doc.documentElement.getElementsByTagName(tag) :
removeElem = not elem.hasChildNodes()
if removeElem == False :
for child in elem.childNodes :
if child.nodeType in [1, 4, 8]:
break
elif child.nodeType == 3 and not child.nodeValue.isspace():
break
else:
removeElem = True
if removeElem :
elem.parentNode.removeChild(elem)
numElemsRemoved += 1
if options.strip_ids:
bContinueLooping = True
while bContinueLooping:
identifiedElements = unprotected_ids(doc, options)
referencedIDs = findReferencedElements(doc.documentElement)
bContinueLooping = (removeUnreferencedIDs(referencedIDs, identifiedElements) > 0)
while removeDuplicateGradientStops(doc) > 0:
pass
# remove gradients that are only referenced by one other gradient
while collapseSinglyReferencedGradients(doc) > 0:
pass
# remove duplicate gradients
while removeDuplicateGradients(doc) > 0:
pass
# create <g> elements if there are runs of elements with the same attributes.
# this MUST be before moveCommonAttributesToParentGroup.
if options.group_create:
createGroupsForCommonAttributes(doc.documentElement)
# move common attributes to parent group
# NOTE: the if the <svg> element's immediate children
# all have the same value for an attribute, it must not
# get moved to the <svg> element. The <svg> element
# doesn't accept fill=, stroke= etc.!
referencedIds = findReferencedElements(doc.documentElement)
for child in doc.documentElement.childNodes:
numAttrsRemoved += moveCommonAttributesToParentGroup(child, referencedIds)
# remove unused attributes from parent
numAttrsRemoved += removeUnusedAttributesOnParent(doc.documentElement)
# Collapse groups LAST, because we've created groups. If done before
# moveAttributesToParentGroup, empty <g>'s may remain.
if options.group_collapse:
while removeNestedGroups(doc.documentElement) > 0:
pass
# remove unnecessary closing point of polygons and scour points
for polygon in doc.documentElement.getElementsByTagName('polygon') :
cleanPolygon(polygon, options)
# scour points of polyline
for polyline in doc.documentElement.getElementsByTagName('polyline') :
cleanPolyline(polyline, options)
# clean path data
for elem in doc.documentElement.getElementsByTagName('path') :
if elem.getAttribute('d') == '':
elem.parentNode.removeChild(elem)
else:
cleanPath(elem, options)
# shorten ID names as much as possible
if options.shorten_ids:
numBytesSavedInIDs += shortenIDs(doc, options.shorten_ids_prefix, unprotected_ids(doc, options))
# scour lengths (including coordinates)
for type in ['svg', 'image', 'rect', 'circle', 'ellipse', 'line', 'linearGradient', 'radialGradient', 'stop', 'filter']:
for elem in doc.getElementsByTagName(type):
for attr in ['x', 'y', 'width', 'height', 'cx', 'cy', 'r', 'rx', 'ry',
'x1', 'y1', 'x2', 'y2', 'fx', 'fy', 'offset']:
if elem.getAttribute(attr) != '':
elem.setAttribute(attr, scourLength(elem.getAttribute(attr)))
# more length scouring in this function
numBytesSavedInLengths = reducePrecision(doc.documentElement)
# remove default values of attributes
numAttrsRemoved += removeDefaultAttributeValues(doc.documentElement, options)
# reduce the length of transformation attributes
numBytesSavedInTransforms = optimizeTransforms(doc.documentElement, options)
# convert rasters references to base64-encoded strings
if options.embed_rasters:
for elem in doc.documentElement.getElementsByTagName('image') :
embedRasters(elem, options)
# properly size the SVG document (ideally width/height should be 100% with a viewBox)
if options.enable_viewboxing:
properlySizeDoc(doc.documentElement, options)
# output the document as a pretty string with a single space for indent
# NOTE: removed pretty printing because of this problem:
# http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace/
# rolled our own serialize function here to save on space, put id first, customize indentation, etc
# out_string = doc.documentElement.toprettyxml(' ')
out_string = serializeXML(doc.documentElement, options) + '\n'
# now strip out empty lines
lines = []
# Get rid of empty lines
for line in out_string.splitlines(True):
if line.strip():
lines.append(line)
# return the string with its XML prolog and surrounding comments
if options.strip_xml_prolog == False:
total_output = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n'
else:
total_output = ""
for child in doc.childNodes:
if child.nodeType == 1:
total_output += "".join(lines)
else: # doctypes, entities, comments
total_output += child.toxml() + '\n'
return total_output
# used mostly by unit tests
# input is a filename
# returns the minidom doc representation of the SVG
def scourXmlFile(filename, options=None):
in_string = open(filename).read()
out_string = scourString(in_string, options)
return xml.dom.minidom.parseString(out_string.encode('utf-8'))
# GZ: Seems most other commandline tools don't do this, is it really wanted?
class HeaderedFormatter(optparse.IndentedHelpFormatter):
"""
Show application name, version number, and copyright statement
above usage information.
"""
def format_usage(self, usage):
return "%s %s\n%s\n%s" % (APP, VER, COPYRIGHT,
optparse.IndentedHelpFormatter.format_usage(self, usage))
# GZ: would prefer this to be in a function or class scope, but tests etc need
# access to the defaults anyway
_options_parser = optparse.OptionParser(
usage="%prog [-i input.svg] [-o output.svg] [OPTIONS]",
description=("If the input/output files are specified with a svgz"
" extension, then compressed SVG is assumed. If the input file is not"
" specified, stdin is used. If the output file is not specified, "
" stdout is used."),
formatter=HeaderedFormatter(max_help_position=30),
version=VER)
_options_parser.add_option("--disable-simplify-colors",
action="store_false", dest="simple_colors", default=True,
help="won't convert all colors to #RRGGBB format")
_options_parser.add_option("--disable-style-to-xml",
action="store_false", dest="style_to_xml", default=True,
help="won't convert styles into XML attributes")
_options_parser.add_option("--disable-group-collapsing",
action="store_false", dest="group_collapse", default=True,
help="won't collapse <g> elements")
_options_parser.add_option("--create-groups",
action="store_true", dest="group_create", default=False,
help="create <g> elements for runs of elements with identical attributes")
_options_parser.add_option("--enable-id-stripping",
action="store_true", dest="strip_ids", default=False,
help="remove all un-referenced ID attributes")
_options_parser.add_option("--enable-comment-stripping",
action="store_true", dest="strip_comments", default=False,
help="remove all <!-- --> comments")
_options_parser.add_option("--shorten-ids",
action="store_true", dest="shorten_ids", default=False,
help="shorten all ID attributes to the least number of letters possible")
_options_parser.add_option("--shorten-ids-prefix",
action="store", type="string", dest="shorten_ids_prefix", default="",
help="shorten all ID attributes with a custom prefix")
_options_parser.add_option("--disable-embed-rasters",
action="store_false", dest="embed_rasters", default=True,
help="won't embed rasters as base64-encoded data")
_options_parser.add_option("--keep-editor-data",
action="store_true", dest="keep_editor_data", default=False,
help="won't remove Inkscape, Sodipodi or Adobe Illustrator elements and attributes")
_options_parser.add_option("--remove-metadata",
action="store_true", dest="remove_metadata", default=False,
help="remove <metadata> elements (which may contain license metadata etc.)")
_options_parser.add_option("--renderer-workaround",
action="store_true", dest="renderer_workaround", default=True,
help="work around various renderer bugs (currently only librsvg) (default)")
_options_parser.add_option("--no-renderer-workaround",
action="store_false", dest="renderer_workaround", default=True,
help="do not work around various renderer bugs (currently only librsvg)")
_options_parser.add_option("--strip-xml-prolog",
action="store_true", dest="strip_xml_prolog", default=False,
help="won't output the <?xml ?> prolog")
_options_parser.add_option("--enable-viewboxing",
action="store_true", dest="enable_viewboxing", default=False,
help="changes document width/height to 100%/100% and creates viewbox coordinates")
# GZ: this is confusing, most people will be thinking in terms of
# decimal places, which is not what decimal precision is doing
_options_parser.add_option("-p", "--set-precision",
action="store", type=int, dest="digits", default=5,
help="set number of significant digits (default: %default)")
_options_parser.add_option("-i",
action="store", dest="infilename", help=optparse.SUPPRESS_HELP)
_options_parser.add_option("-o",
action="store", dest="outfilename", help=optparse.SUPPRESS_HELP)
_options_parser.add_option("-q", "--quiet",
action="store_true", dest="quiet", default=False,
help="suppress non-error output")
_options_parser.add_option("--indent",
action="store", type="string", dest="indent_type", default="space",
help="indentation of the output: none, space, tab (default: %default)")
_options_parser.add_option("--protect-ids-noninkscape",
action="store_true", dest="protect_ids_noninkscape", default=False,
help="Don't change IDs not ending with a digit")
_options_parser.add_option("--protect-ids-list",
action="store", type="string", dest="protect_ids_list", default=None,
help="Don't change IDs given in a comma-separated list")
_options_parser.add_option("--protect-ids-prefix",
action="store", type="string", dest="protect_ids_prefix", default=None,
help="Don't change IDs starting with the given prefix")
def maybe_gziped_file(filename, mode="r"):
if os.path.splitext(filename)[1].lower() in (".svgz", ".gz"):
import gzip
return gzip.GzipFile(filename, mode)
return file(filename, mode)
def parse_args(args=None):
options, rargs = _options_parser.parse_args(args)
if rargs:
_options_parser.error("Additional arguments not handled: %r, see --help" % rargs)
if options.digits < 0:
_options_parser.error("Can't have negative significant digits, see --help")
if not options.indent_type in ["tab", "space", "none"]:
_options_parser.error("Invalid value for --indent, see --help")
if options.infilename and options.outfilename and options.infilename == options.outfilename:
_options_parser.error("Input filename is the same as output filename")
if options.infilename:
infile = maybe_gziped_file(options.infilename)
# GZ: could catch a raised IOError here and report
else:
# GZ: could sniff for gzip compression here
infile = sys.stdin
if options.outfilename:
outfile = maybe_gziped_file(options.outfilename, "wb")
else:
outfile = sys.stdout
return options, [infile, outfile]
def getReport():
return ' Number of elements removed: ' + str(numElemsRemoved) + os.linesep + \
' Number of attributes removed: ' + str(numAttrsRemoved) + os.linesep + \
' Number of unreferenced id attributes removed: ' + str(numIDsRemoved) + os.linesep + \
' Number of style properties fixed: ' + str(numStylePropsFixed) + os.linesep + \
' Number of raster images embedded inline: ' + str(numRastersEmbedded) + os.linesep + \
' Number of path segments reduced/removed: ' + str(numPathSegmentsReduced) + os.linesep + \
' Number of bytes saved in path data: ' + str(numBytesSavedInPathData) + os.linesep + \
' Number of bytes saved in colors: ' + str(numBytesSavedInColors) + os.linesep + \
' Number of points removed from polygons: ' + str(numPointsRemovedFromPolygon) + os.linesep + \
' Number of bytes saved in comments: ' + str(numCommentBytes) + os.linesep + \
' Number of bytes saved in id attributes: ' + str(numBytesSavedInIDs) + os.linesep + \
' Number of bytes saved in lengths: ' + str(numBytesSavedInLengths) + os.linesep + \
' Number of bytes saved in transformations: ' + str(numBytesSavedInTransforms)
def generateDefaultOptions():
## FIXME: clean up this mess/hack and refactor arg parsing to argparse
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
d = parse_args()[0].__dict__.copy()
return Struct(**d)
def start(options, input, output):
if sys.platform == "win32":
from time import clock as get_tick
else:
# GZ: is this different from time.time() in any way?
def get_tick():
return os.times()[0]
start = get_tick()
if not options.quiet:
print >>sys.stderr, "%s %s\n%s" % (APP, VER, COPYRIGHT)
# do the work
in_string = input.read()
out_string = scourString(in_string, options).encode("UTF-8")
output.write(out_string)
# Close input and output files
input.close()
output.close()
end = get_tick()
# GZ: not using globals would be good too
if not options.quiet:
print >>sys.stderr, ' File:', input.name, \
os.linesep + ' Time taken:', str(end-start) + 's' + os.linesep, \
getReport()
oldsize = len(in_string)
newsize = len(out_string)
sizediff = (newsize / oldsize) * 100
print >>sys.stderr, ' Original file size:', oldsize, 'bytes;', \
'new file size:', newsize, 'bytes (' + str(sizediff)[:5] + '%)'
def run():
options, (input, output) = parse_args()
start(options, input, output)
if __name__ == '__main__':
run()
| flosse/scour | scour/scour.py | Python | apache-2.0 | 122,876 | [
"VisIt"
] | 4b93ae2367a856fc703354dc80cfe0e8dcfe85c2275d25f447d7fe8eb3418a51 |
#!/usr/bin/env python3
"""This module implements an interface for reading LOFAR TBB data.
This module is strongly based on pyCRtools module tbb.py by Pim Schellart, Tobias Winchen, and others.
However, it has been completely re-written for use with LOFAR-LIM
Author: Brian Hare
Definitions:
LOFAR is split into a number of different stations. There are three main types: Core Stations (CS), Remote Stations (RS), and international stations
Each station contains 96 low band antennas (LBA) and 48 high band antennas (HBA). Each antenna is dual polarized.
Each station is refered to by its name (e.g. "CS001"), which is a string, or its ID (e.g. 1), which is an integer. In general, these are different!
The mapping, however, is unique and is given in utilities.py
There are a few complications with reading the data.
1) The data from each station is often spread over multiple files
There is a class below that can combine multiple files (even from different stations)
2) It is entirely possible that one file could contain multiple stations
This feature is not used, so I assume that it isn't a problem (for now)
3) Each Station has unknown clock offsets. Technically the core stations are all on one clock, but there are some unknown cable delays
This is a difficult problem, not handled here
4) Each Antenna doesn't necisarily start reading data at precisely the same time.
The code below picks the latest start time so this problem can be ignored by the end user
5) The software that inserts metadata (namely antenna positions and callibrations) sometimes "forgets" to do its job
The code below will automatically read the metadata from other files when necisary
6) LOFAR is constantly changing
So..keeping code up-to-date and still backwards compatible will be an interesting challange
7) LOFAR only has 96 RCUs (reciever control units) per station (at the moment).
Each RCU is essentually one digitizer. Each antenna needs two RCS to record both polarizations. The result is only 1/3 of the antennas can
be read out each time.
LOFAR keeps track of things with two ways. First, the data is all refered to by its RCUid. 0 is the 0th RCU, ect... However, which antenna
corresponds to which RCU depends on the antennaSet. For LOFAR-LIM the antenna set will generally be "LBA_OUTER". This could change, and sometimes
the antenna set is spelled wrong in the data files. (This should be handeld here though)
In the code below each RCU is refered to by ANTENNA_NAME or antennaID. These are the same thing (I think). They are, however, a misnomer, as they
actually refer to the RCU, not antenna. The specific antenna depends on the antenna set. For the same antenna set, however, the ANTENNA_NAME will
always refer to the same antenna.
Each ANTENNA_NAME is a string of 9 digits. First three is the station ID (not name!), next three is the group (no idea, don't ask), final 3 is the RCU id
For LBA_INNER data set, even RCU ids refer to X-polarized dipoles and odd RCU ids refer to Y-polarized dipoles. This is flipped for LBA_OUTER antenna set.
X-polarization is NE-SW, and Y-polarization is NW-SE. antenna_responce.py, which handles the antenna function, assumes the data is LBA_OUTER.
This whole file suffers from being the oldest and most important file in LoLIM. As such, it maintains backwards compatibility with many 'old' ways of doing things.
"""
##### TODO:
## add a way to combine event that is spread across close timeID (is this necisary?)
## add proper fitting phase vs frequency to lines, adn func to return the frequency independand phase offset
import os
import datetime
import numpy as np
import h5py
import LoLIM.IO.metadata as md
import LoLIM.utilities as util
#nyquist_zone = {'LBA_10_90' : 1, 'LBA_30_90' : 1, 'HBA_110_190' : 2, 'HBA_170_230' : 3, 'HBA_210_250' : 3}
conversiondict = {"": 1.0, "kHz": 1000.0, "MHz": 10.0 ** 6, "GHz": 10.0 ** 9, "THz": 10.0 ** 12}
#### helper functions ####
def filePaths_by_stationName(timeID, raw_data_loc=None):
"""Given a timeID, and a location of raw data (default set in utilities.py), return a dictionary.
The keys of the dictionary are antenna names, the values are lists of file paths to data files that contain that station."""
data_file_path = util.raw_data_dir(timeID, raw_data_loc)
h5_files = [f for f in os.listdir(data_file_path) if f[-6:] == 'tbb.h5']
ret = {}
for fname in h5_files:
Fpath = data_file_path + '/' + fname
junk, sname, junk, junk = util.Fname_data(Fpath)
if sname not in ret:
ret[sname] = []
ret[sname].append( Fpath )
return ret
def eventData_filePaths(timeID, raw_data_loc=None):
"""Given a timeID, and a location of raw data (default set in utilities.py), return a list of file paths of data files"""
data_file_path = util.raw_data_dir(timeID, raw_data_loc)
return [f for f in os.listdir(data_file_path) if f[-6:] == 'tbb.h5']
######## The following four functions read what I call "correction files" these are corrections made to improve the data ##########
def read_antenna_pol_flips(fname):
antennas_to_flip = []
with open(fname) as fin:
for line in fin:
ant_name = line.split()[0]
antennas_to_flip.append( ant_name )
return antennas_to_flip
def read_bad_antennas(fname):
bad_antenna_data = []
def parse_line_v1(line):
ant_name, pol = line.split()[0:2]
# bad_antenna_data.append((ant_name,int(pol)))
if pol:
bad_antenna_data.append( util.even_antName_to_odd( ant_name ) )
else:
bad_antenna_data.append( ant_name )
def parse_line_v2(line):
ant_name = line.split()[0]
bad_antenna_data.append( ant_name )
# pol = 0
# if not util.antName_is_even(ant_name):
# ant_name = util.even_antName_to_odd(ant_name)
# pol = 1
# bad_antenna_data.append((ant_name,pol))
version = 1
with open(fname) as fin:
is_line_0 = True
for line in fin:
if is_line_0 and line[:2] == 'v2':
version = 2
else:
if version == 1:
parse_line_v1( line )
elif version == 2:
parse_line_v2( line )
if is_line_0:
is_line_0 = False
return bad_antenna_data
def read_antenna_delays(fname):
additional_ant_delays = {}
def parse_line_v1(line):
ant_name, pol_E_delay, pol_O_delay = line.split()[0:3]
additional_ant_delays[ant_name] = [float(pol_E_delay), float(pol_O_delay)]
def parse_line_v2(line):
ant_name, delay = line.split()[0:2]
pol = 0
if not util.antName_is_even(ant_name):
ant_name = util.even_antName_to_odd(ant_name)
pol = 1
if ant_name not in additional_ant_delays:
additional_ant_delays[ant_name] = [0.0, 0.0]
additional_ant_delays[ant_name][pol] = float(delay)
parse_function = parse_line_v1
with open(fname) as fin:
is_line_0=True
for line in fin:
if is_line_0 and line[0] == 'v':
if line[:2]=='v1':
pass
elif line[:2]=='v2':
parse_function = parse_line_v2
else:
parse_function(line)
if is_line_0:
is_line_0 = False
return additional_ant_delays
def read_station_delays(fname):
station_delays = {}
with open(fname) as fin:
for line in fin:
sname, delay = line.split()[0:2]
station_delays[sname] = float(delay)
return station_delays
### this replaces teh above four, and reads from one "ultimate" cal file
def read_cal_file(fname, pol_flips_are_bad):
station_delays = {}
bad_antenna_data = []
ant_delays = {}
antennas_to_flip = []
with open(fname) as fin:
fin.readline() ## version of type of file. Presently unused as we only have one version
mode = 1 ## 1 = bad_antennas 2 = pol_flips 3 = station_delays 4 = antenna_delays
for line in fin:
line_data = line.split()
if line_data[0][0] == '#':
## comment!
continue
## first check mode
if line_data[0] == "bad_antennas":
mode = 1
elif line_data[0] == "pol_flips":
mode = 2
elif line_data[0] == "station_delays":
mode = 3
elif line_data[0] == "antenna_delays":
mode = 4
### now we parse
elif mode == 1 : ## bad antennas
bad_antenna_data.append( line_data[0] )
elif mode == 2:
if pol_flips_are_bad:
bad_antenna_data.append( util.antName_to_even( line_data[0] ) )
bad_antenna_data.append( util.antName_to_odd( line_data[0] ) )
else:
antennas_to_flip.append( util.antName_to_even( line_data[0] ) )
elif mode == 3:
station_delays[ line_data[0] ] = line_data[1]
elif mode == 4:
ant_delays[ line_data[0] ] = line_data[1]
return bad_antenna_data, antennas_to_flip, station_delays, ant_delays
#### data reading class ####
# Note: ASTRON will "soon" release a new DAL (data )
def decode_if_needed(IN):
if not isinstance(IN, str):
return IN.decode()
return IN
class TBBData_Dal1:
"""a class for reading one station from one file. However, since one station is often spread between different files,
use filePaths_by_stationName combined with MultiFile_Dal1 below"""
def __init__(self, filename, force_metadata_ant_pos=False, forcemetadata_delays=True):
self.filename = filename
self.force_metadata_ant_pos = force_metadata_ant_pos
self.forcemetadata_delays = forcemetadata_delays
#### open file and set some basic info####
self.file = h5py.File(filename, "r")
stationKeys = [s for s in self.file.keys() if s.startswith('Station')]
## assume there is only one station in the file
if len(stationKeys) != 1:
print("WARNING! file", self.filename, "has more then one station")
self.stationKey = stationKeys[0]
self.antennaSet = decode_if_needed( self.file.attrs['ANTENNA_SET'][0] )
self.dipoleNames = list( self.file[ self.stationKey ].keys() )
self.StationID = self.file[ self.stationKey ][ self.dipoleNames[0] ].attrs["STATION_ID"][0]
self.StationName = util.SId_to_Sname[ self.StationID ]
## assume all antennas have the same sample frequency
self.SampleFrequency = self.file[ self.stationKey ][ self.dipoleNames[0] ].attrs["SAMPLE_FREQUENCY_VALUE"
][0]*conversiondict[ decode_if_needed( self.file[ self.stationKey ][ self.dipoleNames[0] ].attrs["SAMPLE_FREQUENCY_UNIT"][0] ) ]
## filter selection is typically "LBA_10_90"
self.FilterSelection = decode_if_needed( self.file.attrs['FILTER_SELECTION'][0] )
#### check that all antennas start in the same second, and record the same number of samples ####
self.Time = None
self.DataLengths = np.zeros(len(self.dipoleNames), dtype=int)
self.SampleNumbers = np.zeros(len(self.dipoleNames), dtype=int)
for dipole_i, dipole in enumerate(self.dipoleNames):
if self.Time is None:
self.Time = self.file[ self.stationKey ][ dipole ].attrs["TIME"][0]
else:
if self.Time != self.file[ self.stationKey ][ dipole ].attrs["TIME"][0]:
raise IOError("antennas do not start at same time in "+self.filename)
self.DataLengths[dipole_i] = self.file[ self.stationKey ][ dipole ].attrs["DATA_LENGTH"][0]
self.SampleNumbers[dipole_i] = self.file[ self.stationKey ][ dipole ].attrs["SAMPLE_NUMBER"][0]
#### get position and delay metadata...maybe####
self.have_metadata = 'DIPOLE_CALIBRATION_DELAY_VALUE' in self.file[ self.stationKey ][ self.dipoleNames[0] ].attrs
self.antenna_filter = md.make_antennaID_filter(self.dipoleNames)
# load antenna locations from metadata and from file. IF they are too far apart, then give warning, and use metadata
self.ITRF_dipole_positions = md.getItrfAntennaPosition(self.StationName, self.antennaSet)[ self.antenna_filter ] ## load positions from metadata file
if self.have_metadata and not self.force_metadata_ant_pos:
use_TBB_positions = True
TBB_ITRF_dipole_positions = np.empty((len(self.dipoleNames), 3), dtype=np.double)
for i,dipole in enumerate(self.dipoleNames):
TBB_ITRF_dipole_positions[i] = self.file[ self.stationKey ][dipole].attrs['ANTENNA_POSITION_VALUE']
dif = np.linalg.norm( TBB_ITRF_dipole_positions[i]-self.ITRF_dipole_positions[i] )
if dif > 1:
print("WARNING: station", self.StationName, "has suspicious antenna locations. Using metadata instead")
use_TBB_positions = False
if use_TBB_positions:
self.ITRF_dipole_positions = TBB_ITRF_dipole_positions
self.calibrationDelays = np.zeros( len(self.dipoleNames), dtype=np.double ) ## defined as callibration values in file. Never from external metadata!
if self.have_metadata:# and not self.forcemetadata_delays:
for i,dipole in enumerate(self.dipoleNames):
self.calibrationDelays[i] = self.file[ self.stationKey ][dipole].attrs['DIPOLE_CALIBRATION_DELAY_VALUE']
#### get the offset, in number of samples, needed so that each antenna starts at the same time ####
self.nominal_sample_number = np.max( self.SampleNumbers )
self.sample_offsets = self.nominal_sample_number - self.SampleNumbers
self.nominal_DataLengths = self.DataLengths - self.sample_offsets
#### PICKLING ####
## this is for multiprocessing. Note that doing this KILLS this file. Otherwise bugs result (I think?)
# it doesn't work
# def __getstate__(self):
# if self.file is not None:
# self.file.close()
# self.file = None
# d = dict( self.__dict__ )
# # d['file'] = None
# return d
# def __setstate__(self, d):
# self.__dict__ = d
# self.file = h5py.File(self.filename, "r")
#### GETTERS ####
def needs_metadata(self):
"""return true if this file does not have metadata"""
return not self.have_metadata
def get_station_name(self):
"""returns the name of the station, as a string"""
return self.StationName
def get_station_ID(self):
"""returns the ID of the station, as an integer. This is not the same as StationName. Mapping is givin in utilities"""
return self.StationID
def get_antenna_names(self):
"""return name of antenna as a list of strings. This is really the RCU id, and the physical antenna depends on the antennaSet"""
return self.dipoleNames
def get_antenna_set(self):
"""return the antenna set as a string. Typically "LBA_OUTER" """
return self.antennaSet
def get_sample_frequency(self):
"""gets samples per second. Typically 200 MHz."""
return self.SampleFrequency
def get_filter_selection(self):
"""return a string that represents the frequency filter used. Typically "LBA_10_90"""
return self.FilterSelection
def get_timestamp(self):
"""return the POSIX timestamp of the first data point"""
return self.Time
def get_full_data_lengths(self):
"""get the number of samples stored for each antenna. Note that due to the fact that the antennas do not start recording
at the exact same instant (in general), this full data length is not all usable
returns array of ints"""
return self.DataLengths
def get_all_sample_numbers(self):
"""return numpy array that contains the sample numbers of each antenna. Divide this by the sample frequency to get time
since the timestame of the first data point. Note that since these are, in general, different, they do NOT refer to sample
0 of "get_data" in general """
return self.SampleNumbers
def get_nominal_sample_number(self):
"""return the sample number of the 0th data sample returned by get_data.
Divide by sample_frequency to get time from timestamp of the 0th data sample"""
return self.nominal_sample_number
def get_nominal_data_lengths(self):
"""return the number of data samples that are usable for each antenna, accounting for different starting sample numbers.
returns array of ints"""
return self.nominal_DataLengths
def get_ITRF_antenna_positions(self, copy=False):
"""returns the ITRF positions of the antennas. Returns a 2D numpy array. If copy is False, then this just returns the internal array of values"""
if copy:
return np.array( self.ITRF_dipole_positions )
else:
return self.ITRF_dipole_positions
def get_LOFAR_centered_positions(self, out=None):
"""returns the positions (as a 2D numpy array) of the antennas with respect to CS002.
if out is a numpy array, it is used to store the antenna positions, otherwise a new array is allocated"""
return md.convertITRFToLocal(self.ITRF_dipole_positions, out=out)
def get_timing_callibration_phases(self):
"""only a test function for the moment, do not use"""
fpath = os.path.dirname(self.filename) + '/'+self.StationName
phase_calibration = md.getStationPhaseCalibration(self.StationName, self.antennaSet,self.FilterSelection, file_location=fpath )
phase_calibration = phase_calibration[ self.antenna_filter ]
return phase_calibration
def get_timing_callibration_delays(self, force_file_delays=False):
"""return the timing callibration of the anntennas, as a 1D np array. If not included in the metadata, will look
for a data file in the same directory as this file. Otherwise returns None"""
if (self.have_metadata and not self.forcemetadata_delays) or force_file_delays:
return self.calibrationDelays
else:
fpath = os.path.dirname(self.filename) + '/'+self.StationName
phase_calibration = md.getStationPhaseCalibration(self.StationName, self.antennaSet,self.FilterSelection, file_location=fpath )
phase_calibration = phase_calibration[ self.antenna_filter ]
return md.convertPhase_to_Timing(phase_calibration, 1.0/self.SampleFrequency)
def get_data(self, start_index, num_points, antenna_index=None, antenna_ID=None):
"""return the raw data for a specific antenna, as an 1D int16 numpy array, of length num_points. First point returned is
start_index past get_nominal_sample_number(). Specify the antenna by giving the antenna_ID (which is a string, same
as output from get_antenna_names(), or as an integer antenna_index. An antenna_index of 0 is the first antenna in
get_antenna_names()."""
if antenna_index is None:
if antenna_ID is None:
raise LookupError("need either antenna_ID or antenna_index")
antenna_index = self.dipoleNames.index(antenna_ID)
else:
antenna_ID = self.dipoleNames[ antenna_index ]
initial_point = self.sample_offsets[ antenna_index ] + start_index
final_point = initial_point+num_points
return self.file[ self.stationKey ][ antenna_ID ][initial_point:final_point]
class MultiFile_Dal1:
"""A class for reading the data from one station from multiple files"""
def __init__(self, filename_list, force_metadata_ant_pos=False, total_cal=None,
polarization_flips=None, bad_antennas=[], additional_ant_delays=None, station_delay=0.0,
only_complete_pairs=True, pol_flips_are_bad=False):
"""filename_list: list of filenames for this station for this event.
force_metadata_ant_pos -if True, then load antenna positions from a metadata file and not the raw data file. Default False
polarization_flips -list of even antennas where it is known that even and odd antenna names are flipped in file. This is assumed to apply both to data and timing calibration
bad_antennas -list of antennas that should not be used. Each item in the list is a tuple, first item of tuple is name of even antenna, second item is a 0 or 1 indicating if even or odd antenna is bad.
assumed to be BEFORE antenna flips are accounted for
additional_ant_delays -a dictionary. Each key is name of even antenna, each value is a tuple with additional even and odd antenna delays. This should rarely be needed.
assumed to be found BEFORE antenna flips are accounted for
station_delay -a single number that represents the clock offset of this station, as a delay
NOTE: polarization_flips, bad_antennas, additional_ant_delays, and station_delay can now be strings that are file names. If this is the case, they will be read automatically
only_complete_pairs -if True, discards antenna if the other in pair is not present or is bad. If False, keeps all good antennas with a 'none' value if other antenna in pair is missing
pol_flips_are_bad -if True, antennas that are in pol-flips are included in 'bad_antennas'
NOTE: This always defaults to using antenna timing calibration from metadata."""
self.files = [TBBData_Dal1(fname, force_metadata_ant_pos) for fname in filename_list]
#### get some data that should be constant #### TODO: change code to make use of getters
self.antennaSet = self.files[0].antennaSet
self.StationID = self.files[0].StationID
self.StationName = self.files[0].StationName
self.SampleFrequency = self.files[0].SampleFrequency
self.FilterSelection = self.files[0].FilterSelection
self.Time = self.files[0].Time
if total_cal is not None:
self.using_total_cal = True
if isinstance(total_cal, str):
bad_antennas, polarization_flips, station_delays, ant_delays = read_cal_file(total_cal, pol_flips_are_bad)
elif isinstance(total_cal, list):
bad_antennas, polarization_flips, station_delays, ant_delays = total_cal
else:
## bad_antennas from input
#polarization_flips from input
station_delays = {self.StationName:station_delay} ## hack to be compatible with below
ant_delays = additional_ant_delays
additional_ant_delays = None ## since this should really be a different thing
if self.StationName in station_delays:
station_delay = station_delays[ self.StationName ]
else:
station_delay = 0.0
self.ant_delays = ant_delays
else:
self.using_total_cal = False
if isinstance(polarization_flips, str):
polarization_flips = read_antenna_pol_flips( polarization_flips )
if isinstance(bad_antennas, str):
bad_antennas = read_bad_antennas( bad_antennas )
if isinstance(additional_ant_delays, str):
additional_ant_delays = read_antenna_delays( additional_ant_delays )
if isinstance(station_delay, str):
station_delay = read_station_delays( station_delay )[ self.StationName ]
if polarization_flips is not None and pol_flips_are_bad:
for even_ant in polarization_flips:
bad_antennas.append( even_ant )
bad_antennas.append( util.even_antName_to_odd( even_ant ) )
# bad_antennas.append( (even_ant,0) )
# bad_antennas.append( (even_ant,1) )
polarization_flips = []
self.bad_antennas = bad_antennas
self.odd_pol_additional_timing_delay = 0.0 # anouther timing delay to add to all odd-polarized antennas. Should remain zero if using_total_cal
self.station_delay = station_delay
#### check consistancy of data ####
for TBB_file in self.files:
if TBB_file.antennaSet != self.antennaSet:
raise IOError("antenna set not the same between files for station: "+self.StationName)
if TBB_file.StationID != self.StationID:
raise IOError("station ID not the same between files for station: "+self.StationName)
if TBB_file.StationName != self.StationName:
raise IOError("station name not the same between files for station: "+self.StationName)
if TBB_file.FilterSelection != self.FilterSelection:
raise IOError("filter selection not the same between files for station: "+self.StationName)
if TBB_file.Time != self.Time:
raise IOError("antenna set not the same between files for station: "+self.StationName)
## check LBA outer antenna set
if self.antennaSet != "LBA_OUTER":
print("WARNING: antenna set on station", self.StationName, "is not LBA_OUTER")
#### find best files to get antennas from ####
## require each antenna shows up once, and even pol is followed by odd pol
self.dipoleNames = []
self.antenna_to_file = [] ##each item is a tuple. First item is file object, second is antenna index in file
unused_antenna_names = []
unused_antenna_to_file = []
# bad_PolE_antennas = [ant for ant,pol in bad_antennas if pol==0]
# bad_PolO_antennas = [ant for ant,pol in bad_antennas if pol==1] ## note that this is still the name of the even antenna, although it is the ODD antenna that is bad!!!
for TBB_file in self.files:
file_ant_names = TBB_file.get_antenna_names()
for ant_i,ant_name in enumerate(file_ant_names):
if (ant_name in self.dipoleNames):
continue
# ant_ID = int(ant_name[-3:])
if util.antName_is_even(ant_name): #ant_ID%2 == 0: ##check if antenna is even
if ant_name in bad_antennas: #bad_PolE_antennas:
continue
odd_ant_name = util.even_antName_to_odd( ant_name ) #ant_name[:-3] + str(ant_ID+1).zfill(3)
if odd_ant_name in unused_antenna_names: ## we have the odd antenna
self.dipoleNames.append(ant_name)
self.dipoleNames.append(odd_ant_name)
self.antenna_to_file.append( (TBB_file, ant_i) )
odd_unused_index = unused_antenna_names.index( odd_ant_name )
self.antenna_to_file.append( unused_antenna_to_file[ odd_unused_index ] )
unused_antenna_names.pop( odd_unused_index )
unused_antenna_to_file.pop( odd_unused_index )
else: ## we haven't found the odd antenna, so store info for now
unused_antenna_names.append(ant_name)
unused_antenna_to_file.append( (TBB_file, ant_i) )
else: ## antenna is odd
even_ant_name = util.odd_antName_to_even( ant_name ) #ant_name[:-3] + str(ant_ID-1).zfill(3)
if ant_name in bad_antennas: #bad_PolO_antennas: ## note that have to check if EVEN antenna is in bad antenna names...
continue
if even_ant_name in unused_antenna_names: ## we have the even antenna
self.dipoleNames.append(even_ant_name)
self.dipoleNames.append(ant_name)
even_unused_index = unused_antenna_names.index( even_ant_name )
self.antenna_to_file.append( unused_antenna_to_file[ even_unused_index ] )
unused_antenna_names.pop( even_unused_index )
unused_antenna_to_file.pop( even_unused_index )
self.antenna_to_file.append( (TBB_file, ant_i) )
else: ## we haven't found the odd antenna, so store info for now
unused_antenna_names.append(ant_name)
unused_antenna_to_file.append( (TBB_file, ant_i) )
if not only_complete_pairs:
for ant_name, to_file in zip(unused_antenna_names, unused_antenna_to_file):
ant_ID = int(ant_name[-3:])
if ant_ID%2 == 0: ##check if antenna is even
self.dipoleNames.append( ant_name )
self.antenna_to_file.append( to_file )
self.dipoleNames.append( ant_name[:-3] + str(ant_ID+1).zfill(3) ) ## add the odd antenna
self.antenna_to_file.append( None ) ## doesn't exist in a file
else:
self.dipoleNames.append( ant_name[:-3] + str(ant_ID-1).zfill(3) ) ## add the even antenna
self.antenna_to_file.append( None ) ## doesn't exist in a file
self.dipoleNames.append( ant_name )
self.antenna_to_file.append( to_file )
if len(self.dipoleNames) == 0:
print('station', self.StationName, 'has no antennas')
return
self.index_adjusts = np.arange( len(self.antenna_to_file) ) ##used to compensate for polarization flips
### when given an antnna index to open data, use this index instead to open the correct data location
#### get sample numbers and offsets and lengths and other related stuff ####
self.SampleNumbers = []
self.DataLengths = []
for TBB_file, file_ant_i in self.antenna_to_file:
self.SampleNumbers.append( TBB_file.SampleNumbers[file_ant_i] )
self.DataLengths.append( TBB_file.DataLengths[file_ant_i] )
self.SampleNumbers = np.array( self.SampleNumbers, dtype=int )
self.DataLengths = np.array(self.DataLengths, dtype=int)
self.nominal_sample_number = np.max( self.SampleNumbers )
self.sample_offsets = self.nominal_sample_number - self.SampleNumbers
self.nominal_DataLengths = self.DataLengths - self.sample_offsets
self.even_ant_pol_flips = None
if polarization_flips is not None:
self.set_polarization_flips( polarization_flips )
self.additional_ant_delays = additional_ant_delays
def set_polarization_flips(self, even_antenna_names):
"""given a set of names(IDs) of even antennas, flip the data between the even and odd antennas"""
self.even_ant_pol_flips = even_antenna_names
for ant_name in even_antenna_names:
if ant_name in self.dipoleNames:
even_antenna_index = self.dipoleNames.index(ant_name)
self.index_adjusts[even_antenna_index] += 1
self.index_adjusts[even_antenna_index+1] -= 1
def set_odd_polarization_delay(self, new_delay):
self.odd_pol_additional_timing_delay = new_delay
def set_station_delay(self, station_delay):
""" set the station delay, should be a number"""
self.station_delay = station_delay
## this should be depreciated
def find_and_set_polarization_delay(self, verbose=False, tolerance=1e-9):
if self.using_total_cal:
print('warning: calibration probably already accounts for polarized delay. IN: find_and_set_polarization_delay')
fpath = os.path.dirname(self.files[0].filename) + '/'+self.StationName
phase_calibration = md.getStationPhaseCalibration(self.StationName, self.antennaSet,self.FilterSelection, file_location=fpath )
all_antenna_calibrations = md.convertPhase_to_Timing(phase_calibration, 1.0/self.SampleFrequency)
even_delays = all_antenna_calibrations[::2]
odd_delays = all_antenna_calibrations[1::2]
odd_offset = odd_delays-even_delays
median_odd_offset = np.median( odd_offset )
if verbose:
print("median offset is:", median_odd_offset)
below_tolerance = np.abs( odd_offset-median_odd_offset ) < tolerance
if verbose:
print(np.sum(below_tolerance), "antennas below tolerance.", len(below_tolerance)-np.sum(below_tolerance), "above.")
ave_best_offset = np.average( odd_offset[below_tolerance] )
if verbose:
print("average of below-tolerance offset is:", ave_best_offset)
self.set_odd_polarization_delay( -ave_best_offset )
above_tolerance = np.zeros( len(all_antenna_calibrations), dtype=bool )
above_tolerance[::2] = np.logical_not( below_tolerance )
above_tolerance[1::2] = above_tolerance[::2]
above_tolerance = above_tolerance[ md.make_antennaID_filter(self.get_antenna_names()) ]
return [AN for AN, AT in zip(self.get_antenna_names(),above_tolerance) if AT]
#### GETTERS ####
def needs_metadata(self):
for TBB_file in self.files:
if TBB_file.needs_metadata():
return True
return False
def get_station_name(self):
"""returns the name of the station, as a string"""
return self.StationName
def get_station_ID(self):
"""returns the ID of the station, as an integer. This is not the same as StationName. Mapping is givin in utilities"""
return self.StationID
def get_antenna_names(self):
"""return name of antenna as a list of strings. This is really the RCU id, and the physical antenna depends on the antennaSet"""
return self.dipoleNames
def has_antenna(self, antenna_name):
"""if only_complete_pairs is False, then we could have antenna names without the data. Return True if we actually have the antenna, False otherwise. Account for polarization flips."""
if antenna_name in self.dipoleNames:
index = self.index_adjusts( self.dipoleNames.index(antenna_name) )
if self.antenna_to_file[index] is None:
return False
else:
return True
else:
return False
def get_antenna_set(self):
"""return the antenna set as a string. Typically "LBA_OUTER" """
return self.antennaSet
def get_sample_frequency(self):
"""gets samples per second. Typically 200 MHz."""
return self.SampleFrequency
def get_filter_selection(self):
"""return a string that represents the frequency filter used. Typically "LBA_10_90"""
return self.FilterSelection
def get_timestamp(self):
"""return the POSIX timestamp of the first data point"""
return self.Time
def get_timestamp_as_datetime(self):
"""return the POSIX timestampe of the first data point as a python datetime localized to UTC"""
return datetime.datetime.fromtimestamp( self.get_timestamp(), tz=datetime.timezone.utc )
def get_full_data_lengths(self):
"""get the number of samples stored for each antenna. Note that due to the fact that the antennas do not start recording
at the exact same instant (in general), this full data length is not all usable
returns array of ints"""
return self.DataLengths
def get_all_sample_numbers(self):
"""return numpy array that contains the sample numbers of each antenna. Divide this by the sample frequency to get time
since the timestame of the first data point. Note that since these are, in general, different, they do NOT refer to sample
0 of "get_data" """
return self.SampleNumbers
def get_nominal_sample_number(self):
"""return the sample number of the 0th data sample returned by get_data.
Divide by sample_frequency to get time from timestamp of the 0th data sample"""
return self.nominal_sample_number
def get_nominal_data_lengths(self):
"""return the number of data samples that are usable for each antenna, accounting for different starting sample numbers.
returns array of ints"""
return self.nominal_DataLengths
def get_ITRF_antenna_positions(self, out=None):
"""returns the ITRF positions of the antennas. Returns a 2D numpy array.
if out is a numpy array, it is used to store the antenna positions, otherwise a new array is allocated.
Does not account for polarization flips, but shouldn't need too."""
if out is None:
out = np.empty( (len(self.dipoleNames), 3) )
for ant_i, (TBB_file,station_ant_i) in enumerate(self.antenna_to_file):
out[ant_i] = TBB_file.ITRF_dipole_positions[station_ant_i]
return out
def get_LOFAR_centered_positions(self, out=None):
"""returns the positions (as a 2D numpy array) of the antennas with respect to CS002.
if out is a numpy array, it is used to store the antenna positions, otherwise a new array is allocated.
Does not account for polarization flips, but shouldn't need too."""
if out is None:
out = np.empty( (len(self.dipoleNames), 3) )
md.convertITRFToLocal( self.get_ITRF_antenna_positions(), out=out )
return out
def get_timing_callibration_phases(self):
"""only a test function for the moment, do not use"""
out = [None for i in range(len(self.dipoleNames))]
for TBB_file in self.files:
ret = TBB_file.get_timing_callibration_phases()
if ret is None:
return None
for ant_i, (TBB_fileA,station_ant_i) in enumerate(self.antenna_to_file):
if TBB_fileA is TBB_file:
out[ant_i] = ret[station_ant_i]
return np.array(out)
def get_timing_callibration_delays(self, out=None, force_file_delays=False):
"""return the timing callibration of the anntennas, as a 1D np array. If not included in the metadata, will look
for a data file in the same directory as this file. Otherwise returns None.
if out is a numpy array, it is used to store the antenna delays, otherwise a new array is allocated.
This takes polarization flips, and additional_ant_delays into account (assuming that both were found BEFORE the pol flip was found).
Also can account for a timing difference between even and odd antennas, if it is set."""
if out is None:
out = np.zeros( len(self.dipoleNames), dtype=np.double )
if self.using_total_cal and not force_file_delays:
for i,ant_name in enumerate( self.dipoleNames ):
out[i] = self.ant_delays[ant_name]
else:## this whole clunky thing is purly historical. SHould be depreciated!!
for TBB_file in self.files:
ret = TBB_file.get_timing_callibration_delays(force_file_delays)
if ret is None:
return None
for ant_i, adjust_i in enumerate(self.index_adjusts):
TBB_fileA,station_ant_i = self.antenna_to_file[adjust_i]
if TBB_fileA is TBB_file:
out[ant_i] = ret[station_ant_i]
if self.additional_ant_delays is not None:
## additional_ant_delays stores only even antenna names for historical reasons. so we need to be clever here
antenna_polarization = 0 if (ant_i%2==0) else 1
even_ant_name = self.dipoleNames[ ant_i-antenna_polarization ]
if even_ant_name in self.additional_ant_delays:
if even_ant_name in self.even_ant_pol_flips:
antenna_polarization = int(not antenna_polarization)
out[ant_i] += self.additional_ant_delays[ even_ant_name ][ antenna_polarization ]
out[1::2] += self.odd_pol_additional_timing_delay
return out
def get_total_delays(self, out=None):
"""Return the total delay for each antenna, accounting for all antenna delays, polarization delay, station clock offsets, and trigger time offsets (nominal sample number).
This function should be prefered over 'get_timing_callibration_delays', but the offsets can have a large average. It is recomended to pick one antenna (on your referance station)
and use it as a referance antenna so that it has zero timing delay. Note: this creates two defintions of T=0. I will call 'uncorrected time' is when the result of this function is
used as-is, and a referance antenna is not choosen. (IE, the referance station can have a large total_delay offset), 'corrected time' will be otherwise."""
delays = self.get_timing_callibration_delays(out)
delays += self.station_delay - self.get_nominal_sample_number()*5.0E-9
return delays
def get_time_from_second(self, out=None):
""" return the time (in units of seconds) since the second of each antenna (which should be get_timestamp). accounting for delays. This is literally just the oppisite of get_total_delays"""
out = self.get_total_delays(out)
out *= -1
return out
def get_geometric_delays(self, source_location, out=None, antenna_locations=None):
"""Calculate travel time from a XYZ location to each antenna. out can be an array of length equal to number of antennas.
antenna_locations is the table of antenna locations, given by get_LOFAR_centered_positions(). If None, it is calculated. Note that antenna_locations CAN be modified in this function.
If antenna_locations is less then all antennas, then the returned array will be correspondingly shorter.
The output of this function plus??? get_total_delays plus emission time of the source is the time the source is seen on each antenna."""
if antenna_locations is None:
antenna_locations = self.get_LOFAR_centered_positions()
if out is None:
out = np.empty( len(antenna_locations), dtype=np.double )
if len(out) != len(antenna_locations):
print("ERROR: arrays are not of same length in geometric_delays()")
return None
antenna_locations -= source_location
antenna_locations *= antenna_locations
np.sum(antenna_locations, axis=1, out=out)
np.sqrt(out, out=out)
out /= util.v_air
return out
def get_data(self, start_index, num_points, antenna_index=None, antenna_ID=None):
"""return the raw data for a specific antenna, as an 1D int16 numpy array, of length num_points. First point returned is
start_index past get_nominal_sample_number(). Specify the antenna by giving the antenna_ID (which is a string, same
as output from get_antenna_names(), or as an integer antenna_index. An antenna_index of 0 is the first antenna in
get_antenna_names()."""
if antenna_index is None:
if antenna_ID is None:
raise LookupError("need either antenna_ID or antenna_index")
antenna_index = self.dipoleNames.index(antenna_ID)
antenna_index = self.index_adjusts[antenna_index] ##incase of polarization flips
initial_point = self.sample_offsets[ antenna_index ] + start_index
final_point = initial_point+num_points
to_file = self.antenna_to_file[antenna_index]
if to_file is None:
raise LookupError("do not have data for this antenna")
TBB_file,station_antenna_index = to_file
antenna_ID = self.dipoleNames[ antenna_index ]
if final_point >= len( TBB_file.file[ TBB_file.stationKey ][ antenna_ID ] ):
print("WARNING! data point", final_point, "is off end of file", len( TBB_file.file[ TBB_file.stationKey ][ antenna_ID ] ))
return TBB_file.file[ TBB_file.stationKey ][ antenna_ID ][initial_point:final_point]
if __name__ == "__main__":
import matplotlib.pyplot as plt
timeID = "D20170929T202255.000Z"
station = "RS406"
antenna_id = 0
block_size = 2**16
block_number = 30#3900
raw_fpaths = filePaths_by_stationName(timeID)
infile = MultiFile_Dal1(raw_fpaths[station])
# infile = MultiFile_Dal1(["./new_file.h5"])
print( infile.get_LOFAR_centered_positions() )
data = infile.get_data(block_number*block_size, block_size, antenna_index=antenna_id)
plt.plot(data)
plt.show()
#
| Bhare8972/LOFAR-LIM | LIM_scripts/IO/raw_tbb_IO.py | Python | mit | 47,210 | [
"Brian"
] | 5395df6cc969d2bfc0df5410cd9848aa8dcc7984ab66623b21b80997e1cf1cc3 |
'''
Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
'''
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL, SET_MODULE, \
unicodeDirectiveRE, encodingDirectiveRE, escapedNewlineRE
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
class Error(Exception): pass
# Settings format: (key, default, docstring)
_DEFAULT_COMPILER_SETTINGS = [
('useNameMapper', True, 'Enable NameMapper for dotted notation and searchList support'),
('useSearchList', True, 'Enable the searchList, requires useNameMapper=True, if disabled, first portion of the $variable is a global, builtin, or local variable that doesn\'t need looking up in the searchList'),
('allowSearchListAsMethArg', True, ''),
('useAutocalling', True, 'Detect and call callable objects in searchList, requires useNameMapper=True'),
('useStackFrames', True, 'Used for NameMapper.valueFromFrameOrSearchList rather than NameMapper.valueFromSearchList'),
('useErrorCatcher', False, 'Turn on the #errorCatcher directive for catching NameMapper errors, etc'),
('alwaysFilterNone', True, 'Filter out None prior to calling the #filter'),
('useFilters', True, 'If False, pass output through str()'),
('includeRawExprInFilterArgs', True, ''),
('useLegacyImportMode', True, 'All #import statements are relocated to the top of the generated Python module'),
('prioritizeSearchListOverSelf', False, 'When iterating the searchList, look into the searchList passed into the initializer instead of Template members first'),
('autoAssignDummyTransactionToSelf', False, ''),
('useKWsDictArgForPassingTrans', True, ''),
('commentOffset', 1, ''),
('outputRowColComments', True, ''),
('includeBlockMarkers', False, 'Wrap #block\'s in a comment in the template\'s output'),
('blockMarkerStart', ('\n<!-- START BLOCK: ', ' -->\n'), ''),
('blockMarkerEnd', ('\n<!-- END BLOCK: ', ' -->\n'), ''),
('defDocStrMsg', 'Autogenerated by Cheetah: The Python-Powered Template Engine', ''),
('setup__str__method', False, ''),
('mainMethodName', 'respond', ''),
('mainMethodNameForSubclasses', 'writeBody', ''),
('indentationStep', ' ' * 4, ''),
('initialMethIndentLevel', 2, ''),
('monitorSrcFile', False, ''),
('outputMethodsBeforeAttributes', True, ''),
('addTimestampsToCompilerOutput', True, ''),
## Customizing the #extends directive
('autoImportForExtendsDirective', True, ''),
('handlerForExtendsDirective', None, ''),
('disabledDirectives', [], 'List of directive keys to disable (without starting "#")'),
('enabledDirectives', [], 'List of directive keys to enable (without starting "#")'),
('disabledDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('postparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparsePlaceholderHooks', [], 'callable(parser)'),
('postparsePlaceholderHooks', [], 'callable(parser)'),
('expressionFilterHooks', [], '''callable(parser, expr, exprType, rawExpr=None, startPos=None), exprType is the name of the directive, "psp" or "placeholder" The filters *must* return the expr or raise an expression, they can modify the expr if needed'''),
('templateMetaclass', None, 'Strictly optional, only will work with new-style basecalsses as well'),
('i18NFunctionName', 'self.i18n', ''),
('cheetahVarStartToken', '$', ''),
('commentStartToken', '##', ''),
('multiLineCommentStartToken', '#*', ''),
('multiLineCommentEndToken', '*#', ''),
('gobbleWhitespaceAroundMultiLineComments', True, ''),
('directiveStartToken', '#', ''),
('directiveEndToken', '#', ''),
('allowWhitespaceAfterDirectiveStartToken', False, ''),
('PSPStartToken', '<%', ''),
('PSPEndToken', '%>', ''),
('EOLSlurpToken', '#', ''),
('gettextTokens', ["_", "N_", "ngettext"], ''),
('allowExpressionsInExtendsDirective', False, ''),
('allowEmptySingleLineMethods', False, ''),
('allowNestedDefScopes', True, ''),
('allowPlaceholderFilterArgs', True, ''),
]
DEFAULT_COMPILER_SETTINGS = dict([(v[0], v[1]) for v in _DEFAULT_COMPILER_SETTINGS])
class GenUtils(object):
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves.
"""
def genTimeInterval(self, timeString):
##@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7
else: # default to minutes
interval = float(timeString)*60
return interval
def genCacheInfo(self, cacheTokenParts):
"""Decipher a placeholder cachetoken
"""
cacheInfo = {}
if cacheTokenParts['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval'])
elif cacheTokenParts['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type':REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
@@TR: another marginally more efficient approach would be to put the
output in a dummy method that is never called.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList).
"""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE
------------------------------------------------------------------------
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where:
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
Note, if the compiler setting useStackFrames=False (default is true)
then
A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2]
This option allows Cheetah to be used with Psyco, which doesn't support
stack frame introspection.
"""
defaultUseAC = self.setting('useAutocalling')
useSearchList = self.setting('useSearchList')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
if not useSearchList:
firstDotIdx = name.find('.')
if firstDotIdx != -1 and firstDotIdx < len(name):
beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:]
pythonCode = ('VFN(' + beforeFirstDot +
',"' + afterDot +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = name+remainder
elif self.setting('useStackFrames'):
pythonCode = ('VFFSL(SL,'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = ('VFSL([locals()]+SL+[globals(), builtin],'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
##
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode +
',"' + name +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler,
initialMethodComment=None,
decorators=None):
self._settingsManager = classCompiler
self._classCompiler = classCompiler
self._moduleCompiler = classCompiler._moduleCompiler
self._methodName = methodName
self._initialMethodComment = initialMethodComment
self._setupState()
self._decorators = decorators or []
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionsStack = []
self._callRegionsStack = []
self._captureRegionsStack = []
self._filterRegionsStack = []
self._isErrorCatcherOn = False
self._hasReturnStatement = False
self._isGenerator = False
def cleanupState(self):
"""Called by the containing class compiler instance
"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
## methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev +=1
def dedent(self):
if self._indentLev:
self._indentLev -=1
else:
raise Error('Attempt to dedent when the indentLev is 0')
## methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
__unicode__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody() )
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join( self._methodBodyChunks )
def docString(self):
if not self._docStringLines:
return ''
ind = self._indent*2
docStr = (ind + '"""\n' + ind +
('\n' + ind).join([ln.replace('"""', "'''") for ln in self._docStringLines]) +
'\n' + ind + '"""\n')
return docStr
## methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%', '%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None):
if filterArgs is None:
filterArgs = ''
if self.setting('includeRawExprInFilterArgs') and rawExpr:
filterArgs += ', rawExpr=%s'%repr(rawExpr)
if self.setting('alwaysFilterNone'):
if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1:
self.addChunk("_v = %s # %r"%(chunk, rawExpr))
if lineCol:
self.appendToPrevChunk(' on line %s, col %s'%lineCol)
else:
self.addChunk("_v = %s"%chunk)
if self.setting('useFilters'):
self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs)
else:
self.addChunk("if _v is not None: write(str(_v))")
else:
if self.setting('useFilters'):
self.addChunk("write(_filter(%s%s))"%(chunk, filterArgs))
else:
self.addChunk("write(str(%s))"%chunk)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if not self._pendingStrConstChunks:
return
strConst = ''.join(self._pendingStrConstChunks)
self._pendingStrConstChunks = []
if not strConst:
return
reprstr = repr(strConst)
i = 0
out = []
if reprstr.startswith('u'):
i = 1
out = ['u']
body = escapedNewlineRE.sub('\\1\n', reprstr[i+1:-1])
if reprstr[i]=="'":
out.append("'''")
out.append(body)
out.append("'''")
else:
out.append('"""')
out.append(body)
out.append('"""')
self.addWriteChunk(''.join(out))
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def isErrorCatcherOn(self):
return self._isErrorCatcherOn
def turnErrorCatcherOn(self):
self._isErrorCatcherOn = True
def turnErrorCatcherOff(self):
self._isErrorCatcherOn = False
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm)
def addPlaceholder(self, expr, filterArgs, rawPlaceholder,
cacheTokenParts, lineCol,
silentMode=False):
cacheInfo = self.genCacheInfo(cacheTokenParts)
if cacheInfo:
cacheInfo['ID'] = repr(rawPlaceholder)[1:-1]
self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder)
if self.isErrorCatcherOn():
methodName = self._classCompiler.addErrorCatcherCall(
expr, rawCode=rawPlaceholder, lineCol=lineCol)
expr = 'self.' + methodName + '(localsDict=locals())'
if silentMode:
self.addChunk('try:')
self.indent()
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
self.dedent()
self.addChunk('except NotFound: pass')
else:
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
if self.setting('outputRowColComments'):
self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.')
if cacheInfo:
self.endCacheRegion()
def addSilent(self, expr):
self.addChunk( expr )
def addEcho(self, expr, rawExpr=None):
self.addFilteredChunk(expr, rawExpr=rawExpr)
def addSet(self, expr, exprComponents, setStyle):
if setStyle is SET_GLOBAL:
(LVALUE, OP, RVALUE) = (exprComponents.LVALUE,
exprComponents.OP,
exprComponents.RVALUE)
# we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2==-1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2, 0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos >0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
if setStyle is SET_MODULE:
self._moduleCompiler.addModuleGlobal(expr)
else:
self.addChunk(expr)
def addInclude(self, sourceExpr, includeFrom, isRaw):
self.addChunk('self._handleCheetahInclude(' + sourceExpr +
', trans=trans, ' +
'includeFrom="' + includeFrom + '", raw=' +
repr(isRaw) + ')')
def addWhile(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addFor(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addRepeat(self, expr, lineCol=None):
#the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)' % (self._repeatCount, expr), lineCol=lineCol)
def addIndentingDirective(self, expr, lineCol=None):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addReIndentingDirective(self, expr, dedent=True, lineCol=None):
self.commitStrConst()
if dedent:
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addOneLineIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr, lineCol=lineCol)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr, dedent=True, lineCol=None):
expr = re.sub(r'else[ \f\t]+if', 'elif', expr)
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addElif(self, expr, dedent=True, lineCol=None):
self.addElse(expr, dedent=dedent, lineCol=lineCol)
def addUnless(self, expr, lineCol=None):
self.addIf('if not (' + expr + ')')
def addClosure(self, functionName, argsList, parserComment):
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):"
self.addIndentingDirective(signature)
self.addChunk('#'+parserComment)
def addTry(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addExcept(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addFinally(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addReturn(self, expr):
assert not self._isGenerator
self.addChunk(expr)
self._hasReturnStatement = True
def addYield(self, expr):
assert not self._hasReturnStatement
self._isGenerator = True
if expr.replace('yield', '').strip():
self.addChunk(expr)
else:
self.addChunk('if _dummyTrans:')
self.indent()
self.addChunk('yield trans.response().getvalue()')
self.addChunk('trans = DummyTransaction()')
self.addChunk('write = trans.response().write')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'raise TypeError("This method cannot be called with a trans arg")')
self.dedent()
def addPass(self, expr):
self.addChunk(expr)
def addDel(self, expr):
self.addChunk(expr)
def addAssert(self, expr):
self.addChunk(expr)
def addRaise(self, expr):
self.addChunk(expr)
def addBreak(self, expr):
self.addChunk(expr)
def addContinue(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('_filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return ('_'+str(random.randrange(100, 999))
+ str(random.randrange(10000, 99999)))
def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None):
# @@TR: we should add some runtime logging to this
ID = self.nextCacheID()
interval = cacheInfo.get('interval', None)
test = cacheInfo.get('test', None)
customID = cacheInfo.get('id', None)
if customID:
ID = customID
varyBy = cacheInfo.get('varyBy', repr(ID))
self._cacheRegionsStack.append(ID) # attrib of current methodCompiler
# @@TR: add this to a special class var as well
self.addChunk('')
self.addChunk('## START CACHE REGION: ID='+ID+
'. line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_RECACHE_%(ID)s = False'%locals())
self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals()
+ repr(ID)
+ ', cacheInfo=%r'%cacheInfo
+ ')')
self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals()
+varyBy+')')
self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
#self.addChunk('print "DEBUG"+"-"*50')
self.addChunk('try:')
self.indent()
self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals())
self.dedent()
self.addChunk('except KeyError:')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
#self.addChunk('print "DEBUG"+"*"*50')
self.dedent()
self.addChunk('else:')
self.indent()
self.addWriteChunk('_output')
self.addChunk('del _output')
self.dedent()
self.dedent()
self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals())
if interval:
self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals())
+ str(interval) + ")")
def endCacheRegion(self):
ID = self._cacheRegionsStack.pop()
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals())
self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals())
self.addWriteChunk('_cacheData')
self.addChunk('del _cacheData')
self.addChunk('del _cacheCollector_%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.dedent()
self.addChunk('## END CACHE REGION: '+ID)
self.addChunk('')
def nextCallRegionID(self):
return self.nextCacheID()
def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'):
class CallDetails(object):
pass
callDetails = CallDetails()
callDetails.ID = ID = self.nextCallRegionID()
callDetails.functionName = functionName
callDetails.args = args
callDetails.lineCol = lineCol
callDetails.usesKeywordArgs = False
self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler
self.addChunk('## START %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def setCallArg(self, argName, lineCol):
ID, callDetails = self._callRegionsStack[-1]
argName = str(argName)
if callDetails.usesKeywordArgs:
self._endCallArg()
else:
callDetails.usesKeywordArgs = True
self.addChunk('_callKws%(ID)s = {}'%locals())
self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals())
callDetails.currentArgname = argName
def _endCallArg(self):
ID, callDetails = self._callRegionsStack[-1]
currCallArg = callDetails.currentArgname
self.addChunk(('_callKws%(ID)s[%(currCallArg)r] ='
' _callCollector%(ID)s.response().getvalue()')%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def endCallRegion(self, regionTitle='CALL'):
ID, callDetails = self._callRegionsStack[-1]
functionName, initialKwArgs, lineCol = (
callDetails.functionName, callDetails.args, callDetails.lineCol)
def reset(ID=ID):
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
if not callDetails.usesKeywordArgs:
reset()
self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
if initialKwArgs:
initialKwArgs = ', '+initialKwArgs
self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals())
self.addChunk('del _callArgVal%(ID)s'%locals())
else:
if initialKwArgs:
initialKwArgs = initialKwArgs+', '
self._endCallArg()
reset()
self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals())
self.addChunk('del _callKws%(ID)s'%locals())
self.addChunk('## END %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('')
self._callRegionsStack.pop() # attrib of current methodCompiler
def nextCaptureRegionID(self):
return self.nextCacheID()
def startCaptureRegion(self, assignTo, lineCol):
class CaptureDetails: pass
captureDetails = CaptureDetails()
captureDetails.ID = ID = self.nextCaptureRegionID()
captureDetails.assignTo = assignTo
captureDetails.lineCol = lineCol
self._captureRegionsStack.append((ID, captureDetails)) # attrib of current methodCompiler
self.addChunk('## START CAPTURE REGION: '+ID
+' '+assignTo
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _captureCollector%(ID)s.response().write'%locals())
def endCaptureRegion(self):
ID, captureDetails = self._captureRegionsStack.pop()
assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol)
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.addChunk('del _captureCollector%(ID)s'%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
def setErrorCatcher(self, errorCatcherName):
self.turnErrorCatcherOn()
self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' +
errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def nextFilterRegionID(self):
return self.nextCacheID()
def setTransform(self, transformer, isKlass):
self.addChunk('trans = TransformerTransaction()')
self.addChunk('trans._response = trans.response()')
self.addChunk('trans._response._filter = %s' % transformer)
self.addChunk('write = trans._response.write')
def setFilter(self, theFilter, isKlass):
class FilterDetails:
pass
filterDetails = FilterDetails()
filterDetails.ID = ID = self.nextFilterRegionID()
filterDetails.theFilter = theFilter
filterDetails.isKlass = isKlass
self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler
self.addChunk('_orig_filter%(ID)s = _filter'%locals())
if isKlass:
self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() +
'(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('_filter = self._CHEETAH__initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter'
+' = \\\n\t\t\tself._CHEETAH__filters[filterName] = '
+ 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter')
self.dedent()
def closeFilterBlock(self):
ID, filterDetails = self._filterRegionsStack.pop()
#self.addChunk('_filter = self._CHEETAH__initialFilter')
#self.addChunk('_filter = _orig_filter%(ID)s'%locals())
self.addChunk('_filter = self._CHEETAH__currentFilter = _orig_filter%(ID)s'%locals())
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [ ("self", None) ]
self._streamingEnabled = True
self._isClassMethod = None
self._isStaticMethod = None
def _useKWsDictArgForPassingTrans(self):
alreadyHasTransArg = [argname for argname, defval in self._argStringList
if argname=='trans']
return (self.methodName()!='respond'
and not alreadyHasTransArg
and self.setting('useKWsDictArgForPassingTrans'))
def isClassMethod(self):
if self._isClassMethod is None:
self._isClassMethod = '@classmethod' in self._decorators
return self._isClassMethod
def isStaticMethod(self):
if self._isStaticMethod is None:
self._isStaticMethod = '@staticmethod' in self._decorators
return self._isStaticMethod
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionsStack:
self.endCacheRegion()
if self._callRegionsStack:
self.endCallRegion()
if self._streamingEnabled:
kwargsName = None
positionalArgsListName = None
for argname, defval in self._argStringList:
if argname.strip().startswith('**'):
kwargsName = argname.strip().replace('**', '')
break
elif argname.strip().startswith('*'):
positionalArgsListName = argname.strip().replace('*', '')
if not kwargsName and self._useKWsDictArgForPassingTrans():
kwargsName = 'KWS'
self.addMethArg('**KWS', None)
self._kwargsName = kwargsName
if not self._useKWsDictArgForPassingTrans():
if not kwargsName and not positionalArgsListName:
self.addMethArg('trans', 'None')
else:
self._streamingEnabled = False
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
def _addAutoSetupCode(self):
if self._initialMethodComment:
self.addChunk(self._initialMethodComment)
if self._streamingEnabled and not self.isClassMethod() and not self.isStaticMethod():
if self._useKWsDictArgForPassingTrans() and self._kwargsName:
self.addChunk('trans = %s.get("trans")'%self._kwargsName)
self.addChunk('if (not trans and not self._CHEETAH__isBuffering'
' and not callable(self.transaction)):')
self.indent()
self.addChunk('trans = self.transaction'
' # is None unless self.awake() was called')
self.dedent()
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
if self.setting('autoAssignDummyTransactionToSelf'):
self.addChunk('self.transaction = trans')
self.addChunk('_dummyTrans = True')
self.dedent()
self.addChunk('else: _dummyTrans = False')
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('_dummyTrans = True')
self.addChunk('write = trans.response().write')
if self.setting('useNameMapper'):
argNames = [arg[0] for arg in self._argStringList]
allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg')
if allowSearchListAsMethArg and 'SL' in argNames:
pass
elif allowSearchListAsMethArg and 'searchList' in argNames:
self.addChunk('SL = searchList')
elif not self.isClassMethod() and not self.isStaticMethod():
self.addChunk('SL = self._CHEETAH__searchList')
else:
self.addChunk('SL = [KWS]')
if self.setting('useFilters'):
if self.isClassMethod() or self.isStaticMethod():
self.addChunk('_filter = lambda x, **kwargs: unicode(x)')
else:
self.addChunk('_filter = self._CHEETAH__currentFilter')
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## END - generated method body')
self.addChunk('')
if not self._isGenerator:
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk('return _dummyTrans and trans.response().getvalue() or ""')
def addMethArg(self, name, defVal=None):
self._argStringList.append( (name, defVal) )
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if chunk == 'self' and self.isClassMethod():
chunk = 'cls'
if chunk == 'self' and self.isStaticMethod():
# Skip the "self" method for @staticmethod decorators
continue
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = (', ').join(argStringChunks)
output = []
if self._decorators:
output.append(''.join([self._indent + decorator + '\n'
for decorator in self._decorators]))
output.append(self._indent + "def "
+ self.methodName() + "(" +
argString + "):\n\n")
return ''.join(output)
##################################################
## CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n', '\n'+' '*8)
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
moduleCompiler=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._moduleCompiler = moduleCompiler
self._mainMethodName = mainMethodName
self._setupState()
methodCompiler = self._spawnMethodCompiler(
mainMethodName,
initialMethodComment='## CHEETAH: main method generated for this template')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError(name)
def _setupState(self):
self._classDef = None
self._decoratorsForNextMethod = []
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
# printed after methods in the gen class def:
self._generatedAttribs = ['_CHEETAH__instanceInitialized = False']
self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__')
self._generatedAttribs.append(
'_CHEETAH_versionTuple = __CHEETAH_versionTuple__')
if self.setting('addTimestampsToCompilerOutput'):
self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__')
self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__')
self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__')
self._generatedAttribs.append(
'_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__')
if self.setting('templateMetaclass'):
self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass'))
self._initMethChunks = []
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
if self.setting('setup__str__method'):
self._generatedAttribs.append('def __str__(self): return self.respond()')
self.addAttribute('_mainCheetahMethod_for_' + self._className +
'= ' + repr(self._mainMethodName) )
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler('__init__',
klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk('super(%s, self).__init__(*args, **KWs)' % self._className)
__init__.addChunk(_initMethod_initCheetah % {'className' : self._className})
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# @@TR: this stuff needs auditing for Cheetah 2.0
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) )
# the rest is added to the main output method of the class ('mainMethod')
self.addChunk('if exists(self._filePath) and ' +
'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk('self._compile(file=self._filePath, moduleName='+self._className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_' + self._className +
')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
if methodName == self._mainMethodName:
return
## change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
## make sure that fileUpdate code still works properly:
chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
## get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def setMainMethodArgs(self, argsList):
mainMethodCompiler = self._methodsIndex[self._mainMethodName]
for argName, defVal in argsList:
mainMethodCompiler.addMethArg(argName, defVal)
def _spawnMethodCompiler(self, methodName, klass=None,
initialMethodComment=None):
if klass is None:
klass = self.methodCompilerClass
decorators = self._decoratorsForNextMethod or []
self._decoratorsForNextMethod = []
methodCompiler = klass(methodName, classCompiler=self,
decorators=decorators,
initialMethodComment=initialMethodComment)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos==None:
self._finishedMethodsList.append( methodCompiler )
else:
self._finishedMethodsList.insert(pos, methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(
methodName, initialMethodComment=parserComment)
self._setActiveMethodCompiler(methodCompiler)
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
def _finishedMethods(self):
return self._finishedMethodsList
def addDecorator(self, decoratorExpr):
"""Set the decorator to be used with the next method in the source.
See _spawnMethodCompiler() and MethodCompiler for the details of how
this is used.
"""
self._decoratorsForNextMethod.append(decoratorExpr)
def addClassDocString(self, line):
self._classDocStringLines.append( line.replace('%', '%%'))
def addChunkToInit(self, chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
## first test to make sure that the user hasn't used any fancy Cheetah syntax
# (placeholders, directives, etc.) inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(self,
'Invalid #attr directive.' +
' It should only contain simple Python literals.')
## now add the attribute
self._generatedAttribs.append(attribExpr)
def addSuper(self, argsList, parserComment=None):
className = self._className #self._baseClass
methodName = self._getActiveMethodCompiler().methodName()
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = ','.join(argStringChunks)
self.addFilteredChunk(
'super(%(className)s, self).%(methodName)s(%(argString)s)'%locals())
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if rawCode in self._placeholderToErrorCatcherMap:
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line %s, col %s'%lineCol)
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(
methodName,
klass=MethodCompiler,
initialMethodComment=('## CHEETAH: Generated from ' + rawCode +
' at line %s, col %s'%lineCol + '.')
)
catcherMeth.setMethodSignature('def ' + methodName +
'(self, localsDict={})')
# is this use of localsDict right?
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk("return eval('''" + codeChunk +
"''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:')
catcherMeth.indent()
catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " +
repr(codeChunk) + " , rawCode= " +
repr(rawCode) + " , lineCol=" + str(lineCol) +")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
#metaData = self._blockMetaData[methodName]
#rawDirective = metaData['raw']
#lineCol = metaData['lineCol']
## insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
#self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
#if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
## code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
__unicode__ = classDef
def wrapClassDef(self):
ind = self.setting('indentationStep')
classDefChunks = [self.classSignature(),
self.classDocstring(),
]
def addMethods():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED METHODS',
'\n',
self.methodDefs(),
])
def addAttributes():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED ATTRIBUTES',
'\n',
self.attributes(),
])
if self.setting('outputMethodsBeforeAttributes'):
addMethods()
addAttributes()
else:
addAttributes()
addMethods()
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
if not self._classDocStringLines:
return ''
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s' +
'\n%(ind)s'.join(self._classDocStringLines) +
'\n%(ind)s"""\n'
) % {'ind':ind}
return docStr
def methodDefs(self):
methodDefs = [methGen.methodDef() for methGen in self._finishedMethods()]
return '\n\n'.join(methodDefs)
def attributes(self):
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs ]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
## MODULE COMPILERS
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None,
moduleName='DynamicallyCompiledCheetahTemplate',
mainClassName=None, # string
mainMethodName=None, # string
baseclassName=None, # string
extraImportStatements=None, # list of strings
settings=None # dict
):
super(ModuleCompiler, self).__init__()
if settings:
self.updateSettings(settings)
# disable useStackFrames if the C version of NameMapper isn't compiled
# it's painfully slow in the Python version and bites Windows users all
# the time:
if not NameMapper.C_VERSION:
if not sys.platform.startswith('java'):
if False:
warnings.warn(
"\nYou don't have the C version of NameMapper installed! "
"I'm disabling Cheetah's useStackFrames option as it is "
"painfully slow with the Python version of NameMapper. "
"You should get a copy of Cheetah with the compiled C version of NameMapper."
)
self.setSetting('useStackFrames', False)
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodNameArg = mainMethodName
if mainMethodName:
self.setSetting('mainMethodName', mainMethodName)
self._baseclassName = baseclassName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, basestring): # it's a filename.
f = open(file) # Raises IOError.
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
source = file.read() # Can't set filename or mtime--they're not accessible.
elif file:
raise TypeError("'file' argument must be a filename string or file-like object")
if self._filePath:
self._fileDirName, self._fileBaseName = os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = os.path.splitext(self._fileBaseName)
if not isinstance(source, basestring):
source = unicode(source)
# by converting to string here we allow objects such as other Templates
# to be passed in
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
else:
unicodeMatch = unicodeDirectiveRE.search(source)
encodingMatch = encodingDirectiveRE.search(source)
if unicodeMatch:
if encodingMatch:
raise ParseError(
self, "#encoding and #unicode are mutually exclusive! "
"Use one or the other.")
source = unicodeDirectiveRE.sub('', source)
if isinstance(source, str):
encoding = unicodeMatch.group(1) or 'ascii'
source = unicode(source, encoding)
elif encodingMatch:
encodings = encodingMatch.groups()
if len(encodings):
encoding = encodings[0]
source = source.decode(encoding)
else:
source = unicode(source)
if source.find('#indent') != -1: #@@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath, compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead.
"""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError(name)
def _initializeSettings(self):
self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS))
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = 'ascii'
self._moduleEncodingStr = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
'try:',
' import builtins as builtin',
'except ImportError:',
' import __builtin__ as builtin',
"from os.path import getmtime, exists",
"import time",
"import types",
"from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import *",
"from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"VFFSL=valueFromFrameOrSearchList",
"VFSL=valueFromSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
if self._baseclassName:
classCompiler.setBaseClass(self._baseclassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
self._parser.cleanup()
def _spawnClassCompiler(self, className, klass=None):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
moduleCompiler=self,
mainMethodName=self.setting('mainMethodName'),
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append( classCompiler )
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames, raw_statement=None):
settings = self.settings()
if not varNames:
return
if not settings.get('useLegacyImportMode'):
if raw_statement and getattr(self, '_methodBodyChunks'):
self.addChunk(raw_statement)
else:
self._importedVarNames.extend(varNames)
## methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
if self._mainMethodNameArg:
self.setMainMethodName(self._mainMethodNameArg)
else:
self.setMainMethodName(self.setting('mainMethodNameForSubclasses'))
if self.setting('handlerForExtendsDirective'):
handler = self.setting('handlerForExtendsDirective')
baseClassName = handler(compiler=self, baseClassName=baseClassName)
self._getActiveClassCompiler().setBaseClass(baseClassName)
elif (not self.setting('autoImportForExtendsDirective')
or baseClassName=='object' or baseClassName in self.importedVarNames()):
self._getActiveClassCompiler().setBaseClass(baseClassName)
# no need to import
else:
##################################################
## If the #extends directive contains a classname or modulename that isn't
# in self.importedVarNames() already, we assume that we need to add
# an implied 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname from the
# module name. This might break if people do something really fancy
# with their dots and namespaces.
baseclasses = baseClassName.split(',')
for klass in baseclasses:
chunks = klass.split('.')
if len(chunks)==1:
self._getActiveClassCompiler().setBaseClass(klass)
if klass not in self.importedVarNames():
modName = klass
# we assume the class name to be the module name
# and that it's not a builtin:
importStatement = "from %s import %s" % (modName, klass)
self.addImportStatement(importStatement)
self.addImportedVarNames((klass,))
else:
needToAddImport = True
modName = chunks[0]
#print chunks, ':', self.importedVarNames()
for chunk in chunks[1:-1]:
if modName in self.importedVarNames():
needToAddImport = False
finalBaseClassName = klass.replace(modName+'.', '')
self._getActiveClassCompiler().setBaseClass(finalBaseClassName)
break
else:
modName += '.'+chunk
if needToAddImport:
modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1]
#if finalClassName != chunks[:-1][-1]:
if finalClassName != chunks[-2]:
# we assume the class name to be the module name
modName = '.'.join(chunks)
self._getActiveClassCompiler().setBaseClass(finalClassName)
importStatement = "from %s import %s" % (modName, finalClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [finalClassName,] )
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr) )
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
merge = True
if 'nomerge' in KWs:
merge = False
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = encoding
def getModuleEncoding(self):
return self._moduleEncoding
def addModuleHeader(self, line):
"""Adds a header comment to the top of the generated module.
"""
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
"""Adds a line to the generated module docstring.
"""
self._moduleDocStringLines.append(line)
def addModuleGlobal(self, line):
"""Adds a line of global module code. It is inserted after the import
statements and Cheetah default module constants.
"""
self._moduleConstants.append(line)
def addSpecialVar(self, basename, contents, includeUnderscores=True):
"""Adds module __specialConstant__ to the module globals.
"""
name = includeUnderscores and '__'+basename+'__' or basename
self._specialVars[name] = contents.strip()
def addImportStatement(self, impStatement):
settings = self.settings()
if not self._methodBodyChunks or settings.get('useLegacyImportMode'):
# In the case where we are importing inline in the middle of a source block
# we don't want to inadvertantly import the module at the top of the file either
self._importStatements.append(impStatement)
#@@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',')
importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases
importVarNames = [var for var in importVarNames if not var == '*']
self.addImportedVarNames(importVarNames, raw_statement=impStatement) #used by #extend for auto-imports
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
def addComment(self, comm):
if re.match(r'#+$', comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
# @@TR: this is a bit hackish and is being replaced with
# #set module varName = ...
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
## methods for module code wrapping
def getModuleCode(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
__str__ = getModuleCode
def wrapModuleDef(self):
self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg'))
self.addModuleGlobal('__CHEETAH_version__ = %r'%Version)
self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,))
if self.setting('addTimestampsToCompilerOutput'):
self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time())
self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp())
if self._filePath:
timestamp = self.timestamp(self._fileMtime)
self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath)
self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp)
else:
self.addModuleGlobal('__CHEETAH_src__ = None')
self.addModuleGlobal('__CHEETAH_srcLastModified__ = None')
moduleDef = """%(header)s
%(docstring)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
%(specialVars)s
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %%s. Templates compiled before version %%s must be recompiled.'%%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
%(classes)s
## END CLASS DEFINITION
if not hasattr(%(mainClassName)s, '_initCheetahAttributes'):
templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s)
%(footer)s
""" % {'header': self.moduleHeader(),
'docstring': self.moduleDocstring(),
'specialVars': self.specialVars(),
'imports': self.importStatements(),
'constants': self.moduleConstants(),
'classes': self.classDefs(),
'footer': self.moduleFooter(),
'mainClassName': self._mainClassName,
}
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncodingStr + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet +
('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n')
return header
def moduleDocstring(self):
if not self._moduleDocStringLines:
return ''
return ('"""' +
'\n'.join(self._moduleDocStringLines) +
'\n"""\n')
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = sorted(theVars.keys())
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]) )
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [klass.classDef() for klass in self._finishedClasses()]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=%(className)s()).run()
""" % {'className':self._mainClassName}
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
| jacklee0810/QMarkdowner | Cheetah/Compiler.py | Python | mit | 80,447 | [
"VisIt"
] | d3f6ceb9621a62d733b1477dfbdcec4be1dcbd5efd48ac1217a754cf09fe1959 |
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import re
from commoncode import fileutils
"""
Utilities to handle RPM NEVRA (name, epoch, version, release, architecture)
"""
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# modified and originally from:
# https://raw.githubusercontent.com/sassoftware/conary/c26507001b62b0839539908cc5bf28893c45c0b4/conary/rpmhelper.py
def from_name(filename):
"""
Return an (E, N, V, R, A) tuple given a file name, by splitting
[e:]name-version-release.arch into the four possible subcomponents.
Default epoch, version, release and arch to None if not specified.
Accepts RPM names with and without extensions
"""
_re = re.compile("^(.*)-([^-]*)-([^-]*)\.([^.]*)$")
file_ext = fileutils.file_extension(filename) or None
if file_ext in ['.rpm', 'srpm']:
filename = filename[:-len(file_ext)]
m = _re.match(filename)
if not m:
return None
n, v, r, a = m.groups()
if file_ext == '.srpm':
a = 'src'
if ':' not in v:
return None, n, v, r, a
e, v = v.split(':', 1)
e = int(e)
return (e, n, v, r, a)
| yasharmaster/scancode-toolkit | src/packagedcode/nevra.py | Python | apache-2.0 | 3,033 | [
"VisIt"
] | 16998c178acd9739f88df272123736c9ac8c30e9394e666ae0c220042f85411d |
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'abinit'
tab.settings['Output file name'] = 'BaTiO3.out'
#
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'average'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.1
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 0
tab.settings['Effective medium method'] = 'Averaged permittivity'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Legend'] = 'Averaged permittivity'
# Add new scenarios
methods = ['Bruggeman']
shapes = ['Ellipsoid','Plate']
hkls = [[0,0,1], [1,0,0]]
vfs = [0.1]
sizes = [0.0, 1.0, 3.0]
for method in methods:
for shape,hkl in zip(shapes,hkls):
for vf in vfs:
for size in sizes:
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Volume fraction'] = vf
tab.settings['Particle shape'] = shape
tab.settings['Particle size(mu)'] = size
tab.settings['Effective medium method'] = method
tab.settings['Unique direction - h'] = hkl[0]
tab.settings['Unique direction - k'] = hkl[1]
tab.settings['Unique direction - l'] = hkl[2]
#tab.settings['Legend'] = method + ' ' + shape + ' vf='+str(vf)+' size='+str(size)
tab.settings['Legend'] = method + ' ' + shape + str(hkl) + ' size='+str(size)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0.0
tab.settings['Maximum frequency'] = 300.0
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'Size Effects BaTiO3'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 800
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
| JohnKendrick/PDielec | Examples/SizeEffects/BaTiO3/script.py | Python | mit | 2,397 | [
"ABINIT"
] | 932ee8cfc15ea2d325ba7102100174304913939986dab31bfd3cc7a0aec9ef21 |
""" Author: Ning Guo <ceguo@connect.ust.hk>
run `mv retainingSmooth.yade.gz 0.yade.gz`
to generate initial RVE packing
"""
from esys.escript import *
from esys.finley import Rectangle
from esys.weipa import saveVTK
from esys.escript.pdetools import Projector
from msFEM2D import MultiScale
from saveGauss import saveGauss2D
import time
vel = -0.00017; surcharge=-2.e4; # surcharge equals to the initial vertical stress of the RVE packing; vel<0 passive failure; vel>0 active failure
B = 0.4; H = 0.2; wallH = 0.17; baseH = H-wallH; # setup dimensions
nx = 40; ny = 20 # discretization with 40x20 quads
mydomain = Rectangle(l0=B,l1=H,n0=nx,n1=ny,order=2,integrationOrder=2)
dim = mydomain.getDim()
k = kronecker(mydomain)
numg = 4*nx*ny; nump = 16;
packNo=range(0,numg,16)
disp = Vector(0.,Solution(mydomain))
prob = MultiScale(domain=mydomain,ng=numg,np=nump,random=False,rtol=1e-2,usePert=False,pert=-2.e-5,verbose=True)
t=0
time_start = time.time()
x = mydomain.getX()
bx = FunctionOnBoundary(mydomain).getX()
left = whereZero(x[0])
right = whereZero(x[0]-B)
bottom = whereZero(x[1])
top = whereZero(bx[1]-H)
base = whereZero(x[0]-B)*whereNegative(x[1]-baseH)
wall = whereZero(x[0]-B)*whereNonNegative(x[1]-baseH)
wallBF = whereZero(bx[0]-B)*whereNonNegative(bx[1]-baseH)
# Dirichlet BC, all fixed in space except wall (only fixed in x direction, smooth)
Dbc = left*[1,1]+base*[1,1]+bottom*[1,1]+wall*[1,0]
Vbc = left*[0,0]+base*[0,0]+bottom*[0,0]+wall*[vel,0]
# Neumann BC, apply surcharge at the top surface
Nbc = top*[0,surcharge]
stress = prob.getCurrentStress()
proj = Projector(mydomain)
sig = proj(stress)
sig_bounda = interpolate(sig,FunctionOnBoundary(mydomain))
traction = matrix_mult(sig_bounda,mydomain.getNormal())
tract = traction*wallBF # traction on wall
forceWall = integrate(tract,where=FunctionOnBoundary(mydomain)) # force on wall
lengthWall = integrate(wallBF,where=FunctionOnBoundary(mydomain))
fout=file('./result/pressure.dat','w')
fout.write('0 '+str(forceWall[0])+' '+str(lengthWall)+'\n')
while t < 100:
prob.initialize(f=Nbc,specified_u_mask=Dbc,specified_u_val=Vbc)
t += 1
du=prob.solve(iter_max=100)
disp += du
stress=prob.getCurrentStress()
dom = prob.getDomain()
proj = Projector(dom)
sig = proj(stress)
sig_bounda = interpolate(sig,FunctionOnBoundary(dom))
traction = matrix_mult(sig_bounda,dom.getNormal())
tract = traction*wallBF
forceWall = integrate(tract,where=FunctionOnBoundary(dom))
lengthWall = integrate(wallBF,where=FunctionOnBoundary(dom))
fout.write(str(t*vel)+' '+str(forceWall[0])+' '+str(lengthWall)+'\n')
vR=prob.getLocalVoidRatio()
rotation=prob.getLocalAvgRotation()
fabric=prob.getLocalFabric()
strain = prob.getCurrentStrain()
saveGauss2D(name='./result/gauss/time_'+str(t)+'.dat',strain=strain,stress=stress,fabric=fabric)
volume_strain = trace(strain)
dev_strain = symmetric(strain) - volume_strain*k/dim
shear = sqrt(2*inner(dev_strain,dev_strain))
saveVTK("./result/vtk/retainingSmooth_%d.vtu"%t,disp=disp,stress=stress,shear=shear,e=vR,rot=rotation)
prob.getCurrentPacking(pos=packNo,time=t,prefix='./result/packing/')
time_elapse = time.time() - time_start
fout.write("#Elapsed time in hours: "+str(time_elapse/3600.)+'\n')
fout.close()
prob.exitSimulation()
| anna-effeindzourou/trunk | examples/FEMxDEM/retainingSmooth.py | Python | gpl-2.0 | 3,320 | [
"VTK"
] | e0cf2fe02f09df9b3cc99f411f5af2c391c7788623d7abab3733c8bcee88604a |
#!/usr/bin/env python3
from runtime import EspNone, EspList, EspString, EspDict
import re
from multimethod import multimethod
def join(sep, v):
return sep.join(str(x) for x in v if x is not None)
COLOR = True
if COLOR:
num = '\033[38;5;202m%s\033[0m'
color = {
str: '\033[38;5;247;4m%r\033[0m',
EspString: '\033[38;5;247m%r\033[0m',
bool: '\033[38;5;202m%s\033[0m',
int: num, float: num,
type(EspNone): '\033[38;5;172m%s\033[0m',
"var": '\033[38;5;228m%s\033[0m',
"op": '\033[38;5;33m%s\033[0m'
}
else:
color = {
str: "%s", bool: "%s", int: "%s", float: "%s",
type(EspNone): "%s", "var": "%s"
}
SOL = re.compile(r"^", flags=re.M)
def indent(x):
return re.sub(SOL, ' ', x)
def subsexp(ex, before, after):
nl = False
for x in ex:
if x == ...:
nl = True
elif nl:
after.append(sexp(x))
else:
before.append(sexp(x))
b = join(' ', before)
if len(after):
a = indent(join('\n', after))
ba = join('\n', [b, a]) if a else b
else:
ba = b
return ba
def sexp(v):
ex = v.sexp() if isinstance(v, Expr) else v
tex = type(ex)
if ex is None:
pass
elif tex is str:
return color[str]%ex
elif tex is EspString:
return color[EspString]%ex
elif tex in color:
return color[tex]%ex
elif tex is tuple:
# Special colorations
if ex:
if ex[0] == "var":
return color['var']%ex[1]
before = [color['op']%ex[0]]
else:
before = []
after = []
return f"({subsexp(ex[1:], before, after)})"
elif tex is list or tex is EspList:
return f"[{subsexp(tuple(ex), [], [])}]"
else:
raise TypeError(f"Unknown value in sexp {type(v).__name__} {v}")
def is_expr(*x):
return all(isinstance(e, Expr) for e in x)
class Expr:
lvalue = True
rvalue = True
statement = False
def __init__(self):
self.origin = None
def __repr__(self):
raise NotImplementedError("__repr__")
def visit(self, v):
# Give it a name for better stack traces
visit_method = getattr(v, f"visit_{type(self).__name__.lower()}")
return visit_method(self)
def set_origin(self, token):
self.origin = token
return self
def make_expr(self):
'''
Signals to this and all subexpressions that it's being used as an
expression
'''
self.statement = False
def sexp(self):
'''
Return the expression as an S-expression. Ellipses are used to
indicate where the rest of the arguments should be separated by
newlines
'''
raise NotImplementedError("sexp")
class Statement(Expr):
'''
Expressions which don't automatically return if they're the last in a
function body.
'''
statement = True
class Value(Expr):
'''Value'''
def __init__(self, value):
super().__init__()
assert(not is_expr(value))
# Convert Pythonic values to espresso values
tv = type(value)
if value is None:
value = EspNone
elif tv is str:
value = EspString(value)
elif tv is list:
value = EspList(value)
elif tv is dict:
value = EspDict(value)
self.value = value
self.lvalue = False
def __str__(self):
return sexp(self.value)
def __repr__(self):
return f"Value({self.value!r})"
def sexp(self):
#raise ValueError("Sexp")
return self.value
class Var(Expr):
'''Variable'''
def __init__(self, name, mutable=True):
super().__init__()
if name and type(name) != str:
raise TypeError(f"Var name must be str, got {type(name)}")
self.name = name
self.mutable = mutable
def __str__(self):
return sexp(self)
def __repr__(self):
if self.mutable:
return f"Var({self.name!r})"
return f"Var({self.name!r}, mutable={self.mutable!r})"
def sexp(self):
return ("var", self.name, self.mutable or None)
class Spread(Expr):
'''Spread operator, has its own node because it's syntax'''
rvalue = False
def __init__(self, var):
super().__init__()
assert(is_expr(var))
self.var = var
self.lvalue = var.lvalue
var.make_expr()
def __str__(self):
return "..." + sexp(self.var)
def __repr__(self):
return f"Spread({self.var!r})"
def sexp(self):
return ("...", self.var)
class Assign(Statement):
'''Assignment is syntactic too'''
def __init__(self, name, value, op=""):
super().__init__()
assert(is_expr(name))
assert(is_expr(value))
assert(type(op) is str)
self.name = name
self.value = value
self.op = op
value.make_expr()
def __str__(self):
return sexp((f"assign{self.op or ''}=", self.name, self.value))
def __repr__(self):
if self.op:
return f"Assign({self.name!r}, {self.value!r}, {self.op!r})"
else:
return f"Assign({self.name!r}, {self.value!r})"
def sexp(self):
return (self.op + '=', self.name, self.value)
class Tuple(Expr):
'''Tuple'''
def __init__(self, elems):
super().__init__()
assert(is_expr(*elems))
self.elems = elems
lv = rv = True
for e in elems:
lv = lv and e.lvalue
rv = rv and e.rvalue
e.make_expr()
self.lvalue = lv
self.rvalue = rv
def append(self, x):
self.elems.append(x)
def __str__(self):
return sexp((",", *self.elems))
def __repr__(self):
return f"Tuple({self.elems!r})"
def sexp(self):
return ("tuple", *self.elems)
class Call(Expr):
'''Call a function'''
def __init__(self, func, args):
super().__init__()
assert(is_expr(func))
assert(is_expr(*args))
self.func = func
self.args = args
func.make_expr()
for a in args:
a.make_expr()
def __str__(self):
return sexp(("call", self.func, *self.args))
def __repr__(self):
return f"Call({self.func!r}, {self.args!r})"
def sexp(self):
return ("call", self.func, *self.args)
class Index(Expr):
'''Index a value'''
def __init__(self, obj, indices):
super().__init__()
assert(is_expr(obj))
assert(is_expr(*indices))
self.obj = obj
self.indices = indices
obj.make_expr()
for i in indices:
i.make_expr()
def __str__(self):
return sexp((".", self.obj, [*self.indices]))
def __repr__(self):
return f"Index({self.obj!r}, {self.indices!r})"
def sexp(self):
return (".", self.obj, [*self.indices])
class After(Expr):
def __init__(self, value, update):
super().__init__()
assert(is_expr(value))
assert(is_expr(update))
self.value = value
self.update = update
value.make_expr()
update.make_expr()
def __str__(self):
return sexp(("after", self.value, self.update))
def __repr__(self):
return f"After({self.value!r}, {self.update!r})"
def sexp(self):
return ("after", self.value, self.update)
class Bind(Expr):
'''Binding operator ->'''
def __init__(self, obj, member):
super().__init__()
assert(is_expr(obj))
assert(is_expr(member))
self.obj = obj
self.member = member
obj.make_expr()
member.make_expr()
def __str__(self):
return sexp(self)
def __repr__(self):
return f"Bind({self.obj!r}, {self.member!r})"
def sexp(self):
return ("->", self.obj, self.member)
class Descope(Expr):
'''Descoping operator ::'''
def __init__(self, obj, member):
super().__init__()
assert(is_expr(obj))
assert(is_expr(member))
self.obj = obj
self.member = member
obj.make_expr()
member.make_expr()
def __str__(self):
return sexp(self)
def __repr__(self):
return f"Descope({self.obj!r}, {self.member!r})"
def sexp(self):
return ("::", self.obj, self.member)
class Loop(Statement):
'''All loop types simplify to this node, an infinite loop'''
def __init__(self, body, el=None):
super().__init__()
assert(is_expr(body))
assert(is_expr(el) or el is None)
self.body = body
self.el = el
def __str__(self):
return sexp(("loop", ..., self.body, self.el and ("else", self.el)))
def __repr__(self):
return f"Loop({self.body!r}, {self.el!r})"
def sexp(self):
return ("loop", ...,
self.body, self.el and ("else", self.el))
class If(Expr):
'''
if statements always act the same as an expression or statement, so
they're actually a kind of expression
'''
def __init__(self, cond, th, el):
super().__init__()
assert(is_expr(cond))
assert(is_expr(th) or th is None)
assert(is_expr(el) or el is None)
self.cond = cond
self.th = th
self.el = el
cond.make_expr()
# Then and else retain their statement value
def __str__(self):
return sexp(("if", self.cond,
...,
self.th and ("then", self.th),
self.el and ("else", self.el)
))
def __repr__(self):
return f"If({self.cond!r}, {self.th!r}, {self.el!r})"
def sexp(self):
return ("if", self.cond, ...,
self.th and ("then", self.th),
self.el and ("else", self.el))
class Branch(Statement):
'''Base class for branching in blocks'''
def __init__(self, kind, level=0):
super().__init__()
assert(type(kind) is str)
assert(type(level) is int)
self.kind = kind
self.level = level
def __str__(self):
return sexp((self.kind, self.level))
def __repr__(self):
return f"Branch({self.kind!r}, {self.level!r})"
def sexp(self):
return (self.kind, self.level)
class Op(Expr):
'''Simple operation, evaluates to a value'''
def __init__(self, op, *args):
super().__init__()
assert(type(op) is str)
assert(is_expr(*args))
self.op = op
self.args = args
self.lvalue = False # ops are always r-value
# All subexpressions are not statements
for a in args:
a.make_expr()
def __str__(self):
return sexp((self.op, *self.args))
def __repr__(self):
return f"Op({self.op!r}, {self.args!r}, {self.lvalue!r})"
def sexp(self):
return (self.op, *self.args)
class Import(Expr):
'''Import statement, for now just support builtin libraries'''
def __init__(self, name):
super().__init__()
assert(is_expr(name))
self.name = name
def __str__(self):
return sexp(("import", self.name))
def __repr__(self):
return f"Import({self.name!r})"
def sexp(self):
return ("import", self.name)
class Proto(Expr):
'''Proto expression'''
def __init__(self, name, parent, pub, priv, stat):
super().__init__()
assert(is_expr(name))
assert(is_expr(parent) or parent is None)
assert(is_expr(*pub))
assert(is_expr(*priv))
assert(is_expr(*stat))
self.name = name
self.parent = parent
self.pub = pub
self.priv = priv
self.stat = stat
def __str__(self):
return sexp(("proto",
self.name and f":{self.name}",
self.parent and ("is", self.parent),
...,
self.pub and ("public", self.pub),
self.priv and ("private", self.priv),
self.stat and ("static", self.stat)
))
def __repr__(self):
return f"Proto({self.name!r}, {self.parent!r}, {self.pub!r}, {self.priv!r}, {self.stat!r})"
def sexp(self):
return ("proto", self.name, self.parent and ("is", self.parent),
...,
self.pub and ("public", self.pub),
self.priv and ("private", self.priv),
self.stat and ("static", self.stat)
)
class Return(Statement):
'''Return statement'''
def __init__(self, value):
super.__init__()
assert(is_expr(value))
self.value = value
value.make_expr()
def __str__(self):
return sexp(("return", self.value))
def __repr__(self):
return f"Return({self.value!r})"
def sexp(self):
return ("return", self.value)
class Format(Expr):
'''Formatted string expression'''
def __init__(self, parts):
super().__init__()
assert(is_expr(*parts))
self.parts = parts
for p in parts:
p.make_expr()
def __str__(self):
return sexp(("format", ..., *(
repr(x) if type(x) is str else x for x in self.parts
)))
def __repr__(self):
return f"Format({self.parts!r})"
def sexp(self):
return ("format", ..., *(
repr(x) if type(x) is str else x for x in self.parts
))
class Case(Expr):
def __init__(self, op, value, body, next):
super().__init__()
assert(type(op) is str)
assert(is_expr(value))
assert(is_expr(body))
self.op = op
self.value = value
self.body = body
self.next = next
value.make_expr()
def __str__(self):
return sexp(("case", self.op, self.value,
self.body, self.next and "..."
))
def __repr__(self):
return f"Case({self.op!r}, {self.value!r}, {self.body!r}, {self.next!r})"
def sexp(self):
return ("case" + self.op, self.value,
self.body, self.next and "..."
)
class Switch(Expr):
'''
Switch expression.
This is implemented by separating the predicates from the values/bodies.
Predicates keep track of the comparison operation, value to compare
against, a body index, and a next index. Blocks
'''
def __init__(self, ex, cs, de, th, el):
super().__init__()
assert(is_expr(ex))
assert(is_expr(*cs))
assert(is_expr(de) or de is None)
assert(is_expr(th) or th is None)
assert(is_expr(el) or el is None)
self.ex = ex # EXpression
self.cs = cs # CaseS
self.de = de # DEfault
self.th = th # THen
self.el = el # ELse
ex.make_expr()
def __str__(self):
return sexp(("switch", self.ex,
...,
*self.cs,
self.de and ("default", self.de),
self.th and ("then", self.th),
self.el and ("else", self.el)
))
def __repr__(self):
return f"Switch({self.ex!r}, {self.cs!r}, {self.de!r}, {self.th!r}, {self.el!r})"
def sexp(self):
return ("switch", self.ex, ...,
*self.cs,
self.de and ("default", self.de),
self.th and ("then", self.th),
self.el and ("else", self.el)
)
class ObjectLiteral(Expr):
'''Object literal'''
def __init__(self, obj):
super().__init__()
#assert(??)
self.values = obj
for k, v in obj:
k.make_expr()
v.make_expr()
def __str__(self):
return sexp(("object", ...,
*(("pair", k, v) for k, v in self.values)
))
def __repr__(self):
return f"ObjectLiteral({self.values!r})"
def sexp(self):
return ("object", ...,
*(("pair", k, v) for k, v in self.values)
)
class ListLiteral(Expr):
'''List literal'''
def __init__(self, vals):
super().__init__()
assert(is_expr(*vals))
self.values = vals
for v in vals:
v.make_expr()
def __str__(self):
return sexp(("list", *self.values))
def __repr__(self):
return f"ListLiteral({self.values!r})"
def sexp(self):
return ("list", *self.values)
class ForLoop(Statement):
'''
Representing for loops with Loop ends up being too complicated
'''
def __init__(self, itvar, toiter, body, th, el):
super().__init__()
assert(is_expr(itvar))
assert(is_expr(toiter))
assert(is_expr(body))
assert(is_expr(th) or th is None)
assert(is_expr(el) or el is None)
self.itvar = itvar
self.toiter = toiter
self.body = body
self.th = th
self.el = el
toiter.make_expr()
def __str__(self):
return sexp(("for",
self.itvar,
("in", self.toiter),
...,
("body", self.body),
self.th and ("then", self.th),
self.el and ("else", self.el)
))
def __repr__(self):
return f"ForLoop({self.itvar!r}, {self.toiter!r}, {self.body!r}, {self.th!r}, {self.el!r})"
def sexp(self):
return ("for", self.itvar, ("in", self.toiter), ...,
("body", self.body),
self.th and ("then", self.th),
self.el and ("else", self.el)
)
class Block(Statement):
'''Sequence of expressions evaluating to the last'''
def __init__(self, elems, vars=None):
super().__init__()
vars = vars or []
assert(is_expr(*vars))
se = []
for e in elems:
if type(e) is Block:
se += e.elems
vars += e.vars
elif e is not None:
se.append(e)
self.elems = se
self.vars = vars
self.lvalue = False
def __str__(self):
#v = [x for x in self.vars if x.mutable]
#c = [x for x in self.vars if not x.mutable]
return sexp(("block",
self.vars,
...,
#c and tuple(["const", *c]),
*self.elems
))
def __repr__(self):
return f"Block({self.elems!r}, {self.vars!r})"
def sexp(self):
return ("block", self.vars, ...,
#c and tuple(["const", *c]),
*self.elems
)
class Prog(Block):
def __init__(self, elems, vars=None):
super().__init__(elems, vars)
def __repr__(self):
return f"Prog({self.elems!r}, {self.vars!r})"
class Func(Expr):
def __init__(self, name, args, body):
super().__init__()
assert(is_expr(name))
assert(is_expr(*args))
assert(is_expr(body))
self.name = name
self.args = args
self.body = body
def __str__(self):
return sexp(("function", self.name, self.args, ..., self.body))
def __repr__(self):
return f"Func({self.name!r}, {self.args!r}, {self.body!r}"
def sexp(self):
return ("function", self.name, self.args, ..., self.body)
| ConsciousCode/espresso | src/ast.py | Python | bsd-2-clause | 16,300 | [
"ESPResSo",
"VisIt"
] | f4e2af1dbaa906de2d93775995e388b46c826e21359a7d0f80ca3b399e9d1ae3 |
from __future__ import print_function
import json
import os
import os.path
import re
import sys
import warnings
from collections import defaultdict
from distutils.command.build_scripts import build_scripts as BuildScripts
from distutils.command.sdist import sdist as SDist
try:
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py as BuildPy
from setuptools.command.install_lib import install_lib as InstallLib
from setuptools.command.install_scripts import install_scripts as InstallScripts
except ImportError:
print("Ansible now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).", file=sys.stderr)
sys.exit(1)
sys.path.insert(0, os.path.abspath('lib'))
from ansible.release import __version__, __author__
SYMLINK_CACHE = 'SYMLINK_CACHE.json'
def _find_symlinks(topdir, extension=''):
"""Find symlinks that should be maintained
Maintained symlinks exist in the bin dir or are modules which have
aliases. Our heuristic is that they are a link in a certain path which
point to a file in the same directory.
"""
symlinks = defaultdict(list)
for base_path, dirs, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
if os.path.islink(filepath) and filename.endswith(extension):
target = os.readlink(filepath)
if os.path.dirname(target) == '':
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[os.path.basename(target)].append(link)
return symlinks
def _cache_symlinks(symlink_data):
with open(SYMLINK_CACHE, 'w') as f:
json.dump(symlink_data, f)
def _maintain_symlinks(symlink_type, base_path):
"""Switch a real file into a symlink"""
try:
# Try the cache first because going from git checkout to sdist is the
# only time we know that we're going to cache correctly
with open(SYMLINK_CACHE, 'r') as f:
symlink_data = json.load(f)
except (IOError, OSError) as e:
# IOError on py2, OSError on py3. Both have errno
if e.errno == 2:
# SYMLINKS_CACHE doesn't exist. Fallback to trying to create the
# cache now. Will work if we're running directly from a git
# checkout or from an sdist created earlier.
symlink_data = {'script': _find_symlinks('bin'),
'library': _find_symlinks('lib', '.py'),
}
# Sanity check that something we know should be a symlink was
# found. We'll take that to mean that the current directory
# structure properly reflects symlinks in the git repo
if 'ansible-playbook' in symlink_data['script']['ansible']:
_cache_symlinks(symlink_data)
else:
raise
else:
raise
symlinks = symlink_data[symlink_type]
for source in symlinks:
for dest in symlinks[source]:
dest_path = os.path.join(base_path, dest)
if not os.path.islink(dest_path):
try:
os.unlink(dest_path)
except OSError as e:
if e.errno == 2:
# File does not exist which is all we wanted
pass
os.symlink(source, dest_path)
class BuildPyCommand(BuildPy):
def run(self):
BuildPy.run(self)
_maintain_symlinks('library', self.build_lib)
class BuildScriptsCommand(BuildScripts):
def run(self):
BuildScripts.run(self)
_maintain_symlinks('script', self.build_dir)
class InstallLibCommand(InstallLib):
def run(self):
InstallLib.run(self)
_maintain_symlinks('library', self.install_dir)
class InstallScriptsCommand(InstallScripts):
def run(self):
InstallScripts.run(self)
_maintain_symlinks('script', self.install_dir)
class SDistCommand(SDist):
def run(self):
# have to generate the cache of symlinks for release as sdist is the
# only command that has access to symlinks from the git repo
symlinks = {'script': _find_symlinks('bin'),
'library': _find_symlinks('lib', '.py'),
}
_cache_symlinks(symlinks)
SDist.run(self)
def read_file(file_name):
"""Read file and return its contents."""
with open(file_name, 'r') as f:
return f.read()
def read_requirements(file_name):
"""Read requirements file as a list."""
reqs = read_file(file_name).splitlines()
if not reqs:
raise RuntimeError(
"Unable to read requirements from the %s file"
"That indicates this copy of the source code is incomplete."
% file_name
)
return reqs
PYCRYPTO_DIST = 'pycrypto'
def get_crypto_req():
"""Detect custom crypto from ANSIBLE_CRYPTO_BACKEND env var.
pycrypto or cryptography. We choose a default but allow the user to
override it. This translates into pip install of the sdist deciding what
package to install and also the runtime dependencies that pkg_resources
knows about.
"""
crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', '').strip()
if crypto_backend == PYCRYPTO_DIST:
# Attempt to set version requirements
return '%s >= 2.6' % PYCRYPTO_DIST
return crypto_backend or None
def substitute_crypto_to_req(req):
"""Replace crypto requirements if customized."""
crypto_backend = get_crypto_req()
if crypto_backend is None:
return req
def is_not_crypto(r):
CRYPTO_LIBS = PYCRYPTO_DIST, 'cryptography'
return not any(r.lower().startswith(c) for c in CRYPTO_LIBS)
return [r for r in req if is_not_crypto(r)] + [crypto_backend]
def read_extras():
"""Specify any extra requirements for installation."""
extras = dict()
extra_requirements_dir = 'packaging/requirements'
for extra_requirements_filename in os.listdir(extra_requirements_dir):
filename_match = re.search(r'^requirements-(\w*).txt$', extra_requirements_filename)
if not filename_match:
continue
extra_req_file_path = os.path.join(extra_requirements_dir, extra_requirements_filename)
try:
extras[filename_match.group(1)] = read_file(extra_req_file_path).splitlines()
except RuntimeError:
pass
return extras
def get_dynamic_setup_params():
"""Add dynamically calculated setup params to static ones."""
return {
# Retrieve the long description from the README
'long_description': read_file('README.rst'),
'install_requires': substitute_crypto_to_req(
read_requirements('requirements.txt'),
),
'extras_require': read_extras(),
}
static_setup_params = dict(
# Use the distutils SDist so that symlinks are not expanded
# Use a custom Build for the same reason
cmdclass={
'build_py': BuildPyCommand,
'build_scripts': BuildScriptsCommand,
'install_lib': InstallLibCommand,
'install_scripts': InstallScriptsCommand,
'sdist': SDistCommand,
},
name='ansible',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='info@ansible.com',
url='https://ansible.com/',
project_urls={
'Bug Tracker': 'https://github.com/ansible/ansible/issues',
'CI: Shippable': 'https://app.shippable.com/github/ansible/ansible',
'Documentation': 'https://docs.ansible.com/ansible/',
'Source Code': 'https://github.com/ansible/ansible',
},
license='GPLv3+',
# Ansible will also make use of a system copy of python-six and
# python-selectors2 if installed but use a Bundled copy if it's not.
python_requires='>=2.6,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
package_dir={'': 'lib'},
packages=find_packages('lib'),
package_data={
'': [
'module_utils/powershell/*.psm1',
'module_utils/powershell/*/*.psm1',
'modules/windows/*.ps1',
'modules/windows/*/*.ps1',
'galaxy/data/*/*.*',
'galaxy/data/*/*/.*',
'galaxy/data/*/*/*.*',
'galaxy/data/*/tests/inventory',
'config/base.yml',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-console',
'bin/ansible-connection',
'bin/ansible-vault',
'bin/ansible-config',
'bin/ansible-inventory',
],
data_files=[],
# Installing as zip files would break due to references to __file__
zip_safe=False
)
def main():
"""Invoke installation process using setuptools."""
setup_params = dict(static_setup_params, **get_dynamic_setup_params())
ignore_warning_regex = (
r"Unknown distribution option: '(project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
setup(**setup_params)
warnings.resetwarnings()
if __name__ == '__main__':
main()
| KohlsTechnology/ansible | setup.py | Python | gpl-3.0 | 10,363 | [
"Galaxy"
] | 26025e60aa123d4eaa2c0fe43fd884f889e54e1654d46bdf1a4fce132a30e66f |
#
# Konstrukteur - Static Site Generator
# Copyright 2013-2014 Sebastian Fastner
# Copyright 2014 Sebastian Werner
#
"""
**Konstrukteur - Static Site Generator**
Konstrukteur is a website generator that uses a template and content files
to create static website output.
"""
__version__ = "0.1.15"
__author__ = "Sebastian Fastner <mail@sebastianfastner.de>"
def info():
"""
Prints information about Jasy to the console.
"""
import jasy.core.Console as Console
print("Konstrukteur %s is a static site generator" % __version__)
print("Visit %s for details." % Console.colorize("https://github.com/fastner/konstrukteur", "underline"))
print()
class UserError(Exception):
"""
Standard Konstrukteur error class raised whenever something happens which the system understands (somehow excepected)
"""
pass
| fastner/konstrukteur | konstrukteur/__init__.py | Python | mit | 850 | [
"VisIt"
] | 95e050a51ea35de7da7b620de00a32c03d13211a59e4db14b5be79148578db04 |
<<<<<<< HEAD
<<<<<<< HEAD
"""
A number of functions that enhance IDLE on Mac OSX.
"""
import sys
import tkinter
from os import path
import warnings
def runningAsOSXApp():
warnings.warn("runningAsOSXApp() is deprecated, use isAquaTk()",
DeprecationWarning, stacklevel=2)
return isAquaTk()
def isCarbonAquaTk(root):
warnings.warn("isCarbonAquaTk(root) is deprecated, use isCarbonTk()",
DeprecationWarning, stacklevel=2)
return isCarbonTk()
_tk_type = None
def _initializeTkVariantTests(root):
"""
Initializes OS X Tk variant values for
isAquaTk(), isCarbonTk(), isCocoaTk(), and isXQuartz().
"""
global _tk_type
if sys.platform == 'darwin':
ws = root.tk.call('tk', 'windowingsystem')
if 'x11' in ws:
_tk_type = "xquartz"
elif 'aqua' not in ws:
_tk_type = "other"
elif 'AppKit' in root.tk.call('winfo', 'server', '.'):
_tk_type = "cocoa"
else:
_tk_type = "carbon"
else:
_tk_type = "other"
def isAquaTk():
"""
Returns True if IDLE is using a native OS X Tk (Cocoa or Carbon).
"""
assert _tk_type is not None
return _tk_type == "cocoa" or _tk_type == "carbon"
def isCarbonTk():
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
assert _tk_type is not None
return _tk_type == "carbon"
def isCocoaTk():
"""
Returns True if IDLE is using a Cocoa Aqua Tk.
"""
assert _tk_type is not None
return _tk_type == "cocoa"
def isXQuartz():
"""
Returns True if IDLE is using an OS X X11 Tk.
"""
assert _tk_type is not None
return _tk_type == "xquartz"
def tkVersionWarning(root):
"""
Returns a string warning message if the Tk version in use appears to
be one known to cause problems with IDLE.
1. Apple Cocoa-based Tk 8.5.7 shipped with Mac OS X 10.6 is unusable.
2. Apple Cocoa-based Tk 8.5.9 in OS X 10.7 and 10.8 is better but
can still crash unexpectedly.
"""
if isCocoaTk():
patchlevel = root.tk.call('info', 'patchlevel')
if patchlevel not in ('8.5.7', '8.5.9'):
return False
return (r"WARNING: The version of Tcl/Tk ({0}) in use may"
r" be unstable.\n"
r"Visit http://www.python.org/download/mac/tcltk/"
r" for current information.".format(patchlevel))
else:
return False
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respond to open AppleEvents, which
makes is feasible to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that is more appropriate for
IDLE with an Aqua Tk.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from tkinter import Menu, Text, Text
from idlelib.EditorWindow import prepstr, get_accelerator
from idlelib import Bindings
from idlelib import WindowList
from idlelib.MultiCall import MultiCallCreator
closeItem = Bindings.menudefs[0][1][-2]
# Remove the last 3 items of the file menu: a separator, close window and
# quit. Close window will be reinserted just above the save item, where
# it should be according to the HIG. Quit is in the application menu.
del Bindings.menudefs[0][1][-3:]
Bindings.menudefs[0][1].insert(6, closeItem)
# Remove the 'About' entry from the help menu, it is in the application
# menu
del Bindings.menudefs[-1][1][0:2]
# Remove the 'Configure Idle' entry from the options menu, it is in the
# application menu as 'Preferences'
del Bindings.menudefs[-2][1][0]
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
menudict['windows'] = menu = Menu(menubar, name='windows')
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
WindowList.add_windows_to_menu(menu)
WindowList.register_callback(postwindowsmenu)
def about_dialog(event=None):
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About IDLE')
def config_dialog(event=None):
from idlelib import configDialog
# Ensure that the root object has an instance_dict attribute,
# mirrors code in EditorWindow (although that sets the attribute
# on an EditorWindow instance that is then passed as the first
# argument to ConfigDialog)
root.instance_dict = flist.inversedict
root.instance_dict = flist.inversedict
configDialog.ConfigDialog(root, 'Settings')
def help_dialog(event=None):
from idlelib import textView
fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt')
textView.view_file(root, 'Help', fn)
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
# The binding above doesn't reliably work on all versions of Tk
# on MacOSX. Adding command definition below does seem to do the
# right thing for now.
root.createcommand('exit', flist.close_all_callback)
if isCarbonTk():
# for Carbon AquaTk, replace the default Tk apple menu
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
Bindings.menudefs.insert(0,
('application', [
('About IDLE', '<<about-idle>>'),
None,
]))
tkversion = root.tk.eval('info patchlevel')
if tuple(map(int, tkversion.split('.'))) < (8, 4, 14):
# for earlier AquaTk versions, supply a Preferences menu item
Bindings.menudefs[0][1].append(
('_Preferences....', '<<open-config-dialog>>'),
)
if isCocoaTk():
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
root.createcommand('::tk::mac::ShowHelp', help_dialog)
# remove redundant "IDLE Help" from menu
del Bindings.menudefs[-1][1][0]
def setupApp(root, flist):
"""
Perform initial OS X customizations if needed.
Called from PyShell.main() after initial calls to Tk()
There are currently three major versions of Tk in use on OS X:
1. Aqua Cocoa Tk (native default since OS X 10.6)
2. Aqua Carbon Tk (original native, 32-bit only, deprecated)
3. X11 (supported by some third-party distributors, deprecated)
There are various differences among the three that affect IDLE
behavior, primarily with menus, mouse key events, and accelerators.
Some one-time customizations are performed here.
Others are dynamically tested throughout idlelib by calls to the
isAquaTk(), isCarbonTk(), isCocoaTk(), isXQuartz() functions which
are initialized here as well.
"""
_initializeTkVariantTests(root)
if isAquaTk():
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
=======
"""
A number of functions that enhance IDLE on Mac OSX.
"""
import sys
import tkinter
from os import path
import warnings
def runningAsOSXApp():
warnings.warn("runningAsOSXApp() is deprecated, use isAquaTk()",
DeprecationWarning, stacklevel=2)
return isAquaTk()
def isCarbonAquaTk(root):
warnings.warn("isCarbonAquaTk(root) is deprecated, use isCarbonTk()",
DeprecationWarning, stacklevel=2)
return isCarbonTk()
_tk_type = None
def _initializeTkVariantTests(root):
"""
Initializes OS X Tk variant values for
isAquaTk(), isCarbonTk(), isCocoaTk(), and isXQuartz().
"""
global _tk_type
if sys.platform == 'darwin':
ws = root.tk.call('tk', 'windowingsystem')
if 'x11' in ws:
_tk_type = "xquartz"
elif 'aqua' not in ws:
_tk_type = "other"
elif 'AppKit' in root.tk.call('winfo', 'server', '.'):
_tk_type = "cocoa"
else:
_tk_type = "carbon"
else:
_tk_type = "other"
def isAquaTk():
"""
Returns True if IDLE is using a native OS X Tk (Cocoa or Carbon).
"""
assert _tk_type is not None
return _tk_type == "cocoa" or _tk_type == "carbon"
def isCarbonTk():
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
assert _tk_type is not None
return _tk_type == "carbon"
def isCocoaTk():
"""
Returns True if IDLE is using a Cocoa Aqua Tk.
"""
assert _tk_type is not None
return _tk_type == "cocoa"
def isXQuartz():
"""
Returns True if IDLE is using an OS X X11 Tk.
"""
assert _tk_type is not None
return _tk_type == "xquartz"
def tkVersionWarning(root):
"""
Returns a string warning message if the Tk version in use appears to
be one known to cause problems with IDLE.
1. Apple Cocoa-based Tk 8.5.7 shipped with Mac OS X 10.6 is unusable.
2. Apple Cocoa-based Tk 8.5.9 in OS X 10.7 and 10.8 is better but
can still crash unexpectedly.
"""
if isCocoaTk():
patchlevel = root.tk.call('info', 'patchlevel')
if patchlevel not in ('8.5.7', '8.5.9'):
return False
return (r"WARNING: The version of Tcl/Tk ({0}) in use may"
r" be unstable.\n"
r"Visit http://www.python.org/download/mac/tcltk/"
r" for current information.".format(patchlevel))
else:
return False
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respond to open AppleEvents, which
makes is feasible to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that is more appropriate for
IDLE with an Aqua Tk.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from tkinter import Menu, Text, Text
from idlelib.EditorWindow import prepstr, get_accelerator
from idlelib import Bindings
from idlelib import WindowList
from idlelib.MultiCall import MultiCallCreator
closeItem = Bindings.menudefs[0][1][-2]
# Remove the last 3 items of the file menu: a separator, close window and
# quit. Close window will be reinserted just above the save item, where
# it should be according to the HIG. Quit is in the application menu.
del Bindings.menudefs[0][1][-3:]
Bindings.menudefs[0][1].insert(6, closeItem)
# Remove the 'About' entry from the help menu, it is in the application
# menu
del Bindings.menudefs[-1][1][0:2]
# Remove the 'Configure Idle' entry from the options menu, it is in the
# application menu as 'Preferences'
del Bindings.menudefs[-2][1][0]
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
menudict['windows'] = menu = Menu(menubar, name='windows')
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
WindowList.add_windows_to_menu(menu)
WindowList.register_callback(postwindowsmenu)
def about_dialog(event=None):
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About IDLE')
def config_dialog(event=None):
from idlelib import configDialog
# Ensure that the root object has an instance_dict attribute,
# mirrors code in EditorWindow (although that sets the attribute
# on an EditorWindow instance that is then passed as the first
# argument to ConfigDialog)
root.instance_dict = flist.inversedict
root.instance_dict = flist.inversedict
configDialog.ConfigDialog(root, 'Settings')
def help_dialog(event=None):
from idlelib import textView
fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt')
textView.view_file(root, 'Help', fn)
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
# The binding above doesn't reliably work on all versions of Tk
# on MacOSX. Adding command definition below does seem to do the
# right thing for now.
root.createcommand('exit', flist.close_all_callback)
if isCarbonTk():
# for Carbon AquaTk, replace the default Tk apple menu
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
Bindings.menudefs.insert(0,
('application', [
('About IDLE', '<<about-idle>>'),
None,
]))
tkversion = root.tk.eval('info patchlevel')
if tuple(map(int, tkversion.split('.'))) < (8, 4, 14):
# for earlier AquaTk versions, supply a Preferences menu item
Bindings.menudefs[0][1].append(
('_Preferences....', '<<open-config-dialog>>'),
)
if isCocoaTk():
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
root.createcommand('::tk::mac::ShowHelp', help_dialog)
# remove redundant "IDLE Help" from menu
del Bindings.menudefs[-1][1][0]
def setupApp(root, flist):
"""
Perform initial OS X customizations if needed.
Called from PyShell.main() after initial calls to Tk()
There are currently three major versions of Tk in use on OS X:
1. Aqua Cocoa Tk (native default since OS X 10.6)
2. Aqua Carbon Tk (original native, 32-bit only, deprecated)
3. X11 (supported by some third-party distributors, deprecated)
There are various differences among the three that affect IDLE
behavior, primarily with menus, mouse key events, and accelerators.
Some one-time customizations are performed here.
Others are dynamically tested throughout idlelib by calls to the
isAquaTk(), isCarbonTk(), isCocoaTk(), isXQuartz() functions which
are initialized here as well.
"""
_initializeTkVariantTests(root)
if isAquaTk():
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
A number of functions that enhance IDLE on Mac OSX.
"""
import sys
import tkinter
from os import path
import warnings
def runningAsOSXApp():
warnings.warn("runningAsOSXApp() is deprecated, use isAquaTk()",
DeprecationWarning, stacklevel=2)
return isAquaTk()
def isCarbonAquaTk(root):
warnings.warn("isCarbonAquaTk(root) is deprecated, use isCarbonTk()",
DeprecationWarning, stacklevel=2)
return isCarbonTk()
_tk_type = None
def _initializeTkVariantTests(root):
"""
Initializes OS X Tk variant values for
isAquaTk(), isCarbonTk(), isCocoaTk(), and isXQuartz().
"""
global _tk_type
if sys.platform == 'darwin':
ws = root.tk.call('tk', 'windowingsystem')
if 'x11' in ws:
_tk_type = "xquartz"
elif 'aqua' not in ws:
_tk_type = "other"
elif 'AppKit' in root.tk.call('winfo', 'server', '.'):
_tk_type = "cocoa"
else:
_tk_type = "carbon"
else:
_tk_type = "other"
def isAquaTk():
"""
Returns True if IDLE is using a native OS X Tk (Cocoa or Carbon).
"""
assert _tk_type is not None
return _tk_type == "cocoa" or _tk_type == "carbon"
def isCarbonTk():
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
assert _tk_type is not None
return _tk_type == "carbon"
def isCocoaTk():
"""
Returns True if IDLE is using a Cocoa Aqua Tk.
"""
assert _tk_type is not None
return _tk_type == "cocoa"
def isXQuartz():
"""
Returns True if IDLE is using an OS X X11 Tk.
"""
assert _tk_type is not None
return _tk_type == "xquartz"
def tkVersionWarning(root):
"""
Returns a string warning message if the Tk version in use appears to
be one known to cause problems with IDLE.
1. Apple Cocoa-based Tk 8.5.7 shipped with Mac OS X 10.6 is unusable.
2. Apple Cocoa-based Tk 8.5.9 in OS X 10.7 and 10.8 is better but
can still crash unexpectedly.
"""
if isCocoaTk():
patchlevel = root.tk.call('info', 'patchlevel')
if patchlevel not in ('8.5.7', '8.5.9'):
return False
return (r"WARNING: The version of Tcl/Tk ({0}) in use may"
r" be unstable.\n"
r"Visit http://www.python.org/download/mac/tcltk/"
r" for current information.".format(patchlevel))
else:
return False
def addOpenEventSupport(root, flist):
"""
This ensures that the application will respond to open AppleEvents, which
makes is feasible to use IDLE as the default application for python files.
"""
def doOpenFile(*args):
for fn in args:
flist.open(fn)
# The command below is a hook in aquatk that is called whenever the app
# receives a file open event. The callback can have multiple arguments,
# one for every file that should be opened.
root.createcommand("::tk::mac::OpenDocument", doOpenFile)
def hideTkConsole(root):
try:
root.tk.call('console', 'hide')
except tkinter.TclError:
# Some versions of the Tk framework don't have a console object
pass
def overrideRootMenu(root, flist):
"""
Replace the Tk root menu by something that is more appropriate for
IDLE with an Aqua Tk.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
# contains a number of menus, none of which are appropriate for IDLE. The
# Most annoying of those is an 'About Tck/Tk...' menu in the application
# menu.
#
# This function replaces the default menubar by a mostly empty one, it
# should only contain the correct application menu and the window menu.
#
# Due to a (mis-)feature of TkAqua the user will also see an empty Help
# menu.
from tkinter import Menu, Text, Text
from idlelib.EditorWindow import prepstr, get_accelerator
from idlelib import Bindings
from idlelib import WindowList
from idlelib.MultiCall import MultiCallCreator
closeItem = Bindings.menudefs[0][1][-2]
# Remove the last 3 items of the file menu: a separator, close window and
# quit. Close window will be reinserted just above the save item, where
# it should be according to the HIG. Quit is in the application menu.
del Bindings.menudefs[0][1][-3:]
Bindings.menudefs[0][1].insert(6, closeItem)
# Remove the 'About' entry from the help menu, it is in the application
# menu
del Bindings.menudefs[-1][1][0:2]
# Remove the 'Configure Idle' entry from the options menu, it is in the
# application menu as 'Preferences'
del Bindings.menudefs[-2][1][0]
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
menudict['windows'] = menu = Menu(menubar, name='windows')
menubar.add_cascade(label='Window', menu=menu, underline=0)
def postwindowsmenu(menu=menu):
end = menu.index('end')
if end is None:
end = -1
if end > 0:
menu.delete(0, end)
WindowList.add_windows_to_menu(menu)
WindowList.register_callback(postwindowsmenu)
def about_dialog(event=None):
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About IDLE')
def config_dialog(event=None):
from idlelib import configDialog
# Ensure that the root object has an instance_dict attribute,
# mirrors code in EditorWindow (although that sets the attribute
# on an EditorWindow instance that is then passed as the first
# argument to ConfigDialog)
root.instance_dict = flist.inversedict
root.instance_dict = flist.inversedict
configDialog.ConfigDialog(root, 'Settings')
def help_dialog(event=None):
from idlelib import textView
fn = path.join(path.abspath(path.dirname(__file__)), 'help.txt')
textView.view_file(root, 'Help', fn)
root.bind('<<about-idle>>', about_dialog)
root.bind('<<open-config-dialog>>', config_dialog)
root.createcommand('::tk::mac::ShowPreferences', config_dialog)
if flist:
root.bind('<<close-all-windows>>', flist.close_all_callback)
# The binding above doesn't reliably work on all versions of Tk
# on MacOSX. Adding command definition below does seem to do the
# right thing for now.
root.createcommand('exit', flist.close_all_callback)
if isCarbonTk():
# for Carbon AquaTk, replace the default Tk apple menu
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
Bindings.menudefs.insert(0,
('application', [
('About IDLE', '<<about-idle>>'),
None,
]))
tkversion = root.tk.eval('info patchlevel')
if tuple(map(int, tkversion.split('.'))) < (8, 4, 14):
# for earlier AquaTk versions, supply a Preferences menu item
Bindings.menudefs[0][1].append(
('_Preferences....', '<<open-config-dialog>>'),
)
if isCocoaTk():
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
root.createcommand('::tk::mac::ShowHelp', help_dialog)
# remove redundant "IDLE Help" from menu
del Bindings.menudefs[-1][1][0]
def setupApp(root, flist):
"""
Perform initial OS X customizations if needed.
Called from PyShell.main() after initial calls to Tk()
There are currently three major versions of Tk in use on OS X:
1. Aqua Cocoa Tk (native default since OS X 10.6)
2. Aqua Carbon Tk (original native, 32-bit only, deprecated)
3. X11 (supported by some third-party distributors, deprecated)
There are various differences among the three that affect IDLE
behavior, primarily with menus, mouse key events, and accelerators.
Some one-time customizations are performed here.
Others are dynamically tested throughout idlelib by calls to the
isAquaTk(), isCarbonTk(), isCocoaTk(), isXQuartz() functions which
are initialized here as well.
"""
_initializeTkVariantTests(root)
if isAquaTk():
hideTkConsole(root)
overrideRootMenu(root, flist)
addOpenEventSupport(root, flist)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| ArcherSys/ArcherSys | Lib/idlelib/macosxSupport.py | Python | mit | 25,772 | [
"VisIt"
] | 9768f0fa6d61fddd147d4c5a64fd121ed428199bc8de63fd04d2aa4f6f2d8d63 |
# -----------------------------------------------------------------------------------------------------
# CONDOR
# Simulator for diffractive single-particle imaging experiments with X-ray lasers
# http://xfel.icm.uu.se/condor/
# -----------------------------------------------------------------------------------------------------
# Copyright 2016 Max Hantke, Filipe R.N.C. Maia, Tomas Ekeberg
# Condor is distributed under the terms of the BSD 2-Clause License
# -----------------------------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------------------------------
# General note:
# All variables are in SI units by default. Exceptions explicit by variable name.
# -----------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import # Compatibility with python 2 and 3
# System packages
import sys, os, numpy
from scipy import constants
# Logging
import logging
logger = logging.getLogger(__name__)
# Condor
import condor
import condor._load_data
from condor.utils.log import log_and_raise_error,log_warning,log_info,log_debug
_data_dir = os.path.dirname(os.path.realpath(__file__)) + "/../data"
_atomic_scattering_factors = condor._load_data.load_atomic_scattering_factors(_data_dir)
get_atomic_scattering_factors = lambda element: _atomic_scattering_factors[element]
"""
Returns 2-dim. array of photon energy [eV] vs. real and imaginary part of the atomic scattering factor (forward scattering) for a given element.
Args:
:element (str): Element name (abbreviation of the latin name, for example \'He\' for helium).
"""
_atomic_masses = condor._load_data.load_atomic_masses(_data_dir)
get_atomic_mass = lambda element: _atomic_masses[element]
"""
Returns the atomic mass (standard atomic weight in unit Dalton) for a given element.
Args:
:element (str): Element name (abbreviation of the latin name, for example \'He\' for helium).
"""
_atomic_numbers = condor._load_data.load_atomic_numbers(_data_dir)
get_atomic_number = lambda element: _atomic_numbers[element]
"""
Returns the atomic number for a given element.
Args:
:element (str): Element name (abbreviation of the latin name, for example \'He\' for helium).
"""
atomic_names = ['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Cp', 'Uut', 'Uuq', 'Uup', 'Uuh', 'Uus', 'Uuo']
"""
List of atom names (i.e. latin abbreviations) for all elements sorted by atomic number (increasing order).
"""
atomic_numbers = range(1,len(atomic_names)+1)
"""
List of atomic numbers of all elements in increasing order.
"""
class MaterialType:
r"""
Standard material types:
================= ====================== =================================================================== ======================
``material_type`` :math:`\rho_m` [kg/m3] Atomic composition Reference
================= ====================== =================================================================== ======================
``custom`` ``massdensity`` ``atomic_composition`` -
``'water'`` 995 (25 deg. C) :math:`H_2O` [ONeil1868]_ p. 1868
``'protein'`` 1350 :math:`H_{86}C_{52}N_{13}O_{15}S` [Bergh2008]_
``'dna'`` 1700 :math:`H_{11}C_{10}N_4O_6P` [Bergh2008]_
``'lipid'`` 1000 :math:`H_{69}C_{36}O_6P` [Bergh2008]_
``'cell'`` 1000 :math:`H_{23}C_3NO_{10}S` [Bergh2008]_
``'poliovirus'`` 1340 :math:`C_{332652}H_{492388}N_{98245}O_{131196}P_{7501}S_{2340}` [Molla1991]_
``'styrene'`` 902 (25 deg. C) :math:`C_8H_8` [Haynes2013]_ p. 3-488
``'sucrose'`` 1581 (17 deg. C) :math:`C_{12}H_{22O1}` [Lide1998]_ p. 3-172
================= ====================== =================================================================== ======================
"""
atomic_compositions = {
'water': { "H" : 2., "C" : 0., "N" : 0., "O" : 1., "P" : 0., "S" : 0. }, # Water H2O
'protein': { "H" : 86., "C" : 52., "N" : 13., "O" : 15., "P" : 0., "S" : 1. }, # Bergh et al. 2008: H86 C52 N13 O15 S
'dna': { "H" : 11., "C" : 10., "N" : 4., "O" : 6., "P" : 1., "S" : 0. }, # Bergh et al. 2008: H11 C10 N4 O6 P
'lipid': { "H" : 69., "C" : 36., "N" : 0., "O" : 6., "P" : 1., "S" : 0. }, # Bergh et al. 2008: H69 C36 O6 P
'cell': { "H" : 23., "C" : 3., "N" : 1., "O" : 10., "P" : 0., "S" : 1. }, # Bergh et al. 2008: H23 C3 N O10 S
'poliovirus': { "H" :492388., "C" :332652., "N" : 98245., "O" :131196., "P" : 7501., "S" : 2340. }, # Molla et al. 1991: C332652 H492388 N98245 0131196 P7501 S2340
'styrene': { "H" : 8., "C" : 8., "N" : 0., "O" : 0., "P" : 0., "S" : 0. }, # Styrene C8H8
'sucrose': { "H" : 22., "C" : 12., "N" : 0., "O" : 11., "P" : 0., "S" : 0. }, # Sucrose C12H22O11
}
"""
Dictionary of atomic compositions (available keys are the tabulated ``material_types``)
"""
mass_densities = {
'water': 995., # at 25 C: O'Neil, M.J. (ed.). The Merck Index - An Encyclopedia of Chemicals, Drugs, and Biologicals. Cambridge, UK: Royal Society of Chemistry, 2013., p. 1868
'protein': 1350., # Bergh et al. 2008
'dna': 1700., # Bergh et al. 2008
'lipid': 1000., # Bergh et al. 2008
'cell': 1000., # Bergh et al. 2008
'poliovirus':1340., # Dans et al. 1966
'styrene': 902., # at 25 C: Haynes, W.M. (ed.). CRC Handbook of Chemistry and Physics. 94th Edition. CRC Press LLC, Boca Raton: FL 2013-2014, p. 3-488
'sucrose': 1581., # at 17 C: Lide, D.R. (ed.). CRC Handbook of Chemistry and Physics. 79th ed. Boca Raton, FL: CRC Press Inc., 1998-1999., p. 3-172
}
"""
Dictionary of mass densities (available keys are the tabulated ``material_types``)
"""
class AbstractMaterial:
def __init__(self):
pass
def get_n(self,photon_wavelength):
r"""
Return complex refractive index at a given wavelength (Henke, 1994)
.. math::
n = 1 - \frac{ r_0 }{ 2\pi } \lambda^2 \sum_i \rho_i f_i(0)
:math:`r_0`: classical electron radius
:math:`\rho_q`: atomic number density of atom species :math:`i`
:math:`f_q(0)`: atomic scattering factor (forward scattering) of atom species :math:`i`
Args:
:photon_wavelength (float): Photon wavelength in unit meter
"""
f = self.get_f(photon_wavelength)
scatterer_density = self.get_scatterer_density()
r_0 = constants.value("classical electron radius")
n = 1 - r_0/2/numpy.pi * photon_wavelength**2 * f * scatterer_density
return n
def get_transmission(self,thickness,photon_wavelength):
r"""
Return transmission coefficient :math:`T` for given material thickness :math:`t` and wavelength :math:`\lambda` [Henke1993]_
.. math::
T = e^{-\rho\,\mu_a(\lambda)\,t}
:math:`\rho`: Average atom density
:math:`\mu_a(\lambda)`: Photoabsorption cross section at photon energy :math:`\lambda`
Args:
:thickness (float): Material thickness in unit meter
:photon_wavelength (float): Photon wavelength in unit meter
.. [Henke1993] B.L. Henke, E.M. Gullikson, and J.C. Davis. X-ray interactions: photoabsorption, scattering, transmission, and reflection at E=50-30000 eV, Z=1-92, Atomic Data and Nuclear Data Tables Vol. 54 (no.2), 181-342 (July 1993).
See also `http://henke.lbl.gov/ <http://henke.lbl.gov/>`_.
"""
mu = self.get_photoabsorption_cross_section(photon_wavelength=photon_wavelength)
rho = self.get_scatterer_density()
return numpy.exp(-rho*mu*thickness)
def get_attenuation_length(self, photon_wavelength):
r"""
Return the absorption length in unit meter for the given wavelength :math:`\lambda`
.. math::
\mu = \frac{1}{\rho\,\mu_a(\lambda)}
:math:`\rho`: Average atom density
:math:`\mu_a(\lambda)`: Photoabsorption cross section at photon energy :math:`\lambda`
Args:
:photon_wavelength (float): Photon wavelength in unit meter
.. [Henke1993] B.L. Henke, E.M. Gullikson, and J.C. Davis. X-ray interactions: photoabsorption, scattering, transmission, and reflection at E=50-30000 eV, Z=1-92, Atomic Data and Nuclear Data Tables Vol. 54 (no.2), 181-342 (July 1993).
See also `http://henke.lbl.gov/ <http://henke.lbl.gov/>`_.
"""
mu = self.get_photoabsorption_cross_section(photon_wavelength=photon_wavelength)
rho = self.get_scatterer_density()
return (1./(mu*rho))
def get_dn(self, photon_wavelength):
r"""
Return :math:`\delta n` at a given wavelength
.. math::
\delta n = 1 - n
:math:`n`: Refractive index
See also :meth:`condor.utils.material.Material.get_n`
Args:
:photon_wavelength (float): Photon wavelength in unit meter
"""
return (1-self.get_n(photon_wavelength))
# convenience functions
# n = 1 - delta - i beta
def get_delta(self, photon_wavelength):
r"""
Return :math:`\delta` (real part of :math:`\delta n`) at a given wavelength
.. math:
n = 1 - \delta n = 1 - \delta - i \beta
:math:`n`: Refractive index
See also :meth:`condor.utils.material.Material.get_n`
Args:
:photon_wavelength (float): Photon wavelength in unit meter
"""
return self.get_dn(photon_wavelength=photon_wavelength).real
def get_beta(self, photon_wavelength):
r"""
Return :math:`\beta` (imaginary part of :math:`\delta n`) at a given wavelength
.. math::
n = 1 - \delta n = 1 - \delta - i \beta
:math:`n`: Refractive index
See also :meth:`condor.utils.material.Material.get_n`
Args:
:photon_wavelength (float): Photon wavelength in unit meter
"""
return self.get_dn(photon_wavelength=photon_wavelength).imag
def get_photoabsorption_cross_section(self, photon_wavelength):
r"""
Return the photoabsorption cross section :math:`\mu_a` at a given wavelength :math:`\lambda`
.. math::
\mu_a = 2 r_0 \lambda f_2
:math:`r_0`: classical electron radius
:math:`f_2`: imaginary part of the atomic scattering factor
Args:
:photon_wavelength (float): Photon wavelength in unit meter
"""
r_0 = constants.value("classical electron radius")
h = constants.h
c = constants.c
qe = constants.e
mu = 2*r_0*photon_wavelength*self.get_f(photon_wavelength).imag
return mu
class ElectronDensityMaterial(AbstractMaterial):
r"""
Class for electron density material model
Thomson scattering with the given value for the electron density is used to determine the material's scattering properties.
Args:
:electron_density: (float): Electron density in unit inverse cubic meter
"""
def __init__(self, electron_density):
AbstractMaterial.__init__(self)
self.electron_density = electron_density
def get_conf(self):
conf = {}
conf["electron_density"] = self.electron_density
return conf
def get_f(self, photon_energy):
return complex(1., 0.)
def get_scatterer_density(self):
return self.electron_density
class AtomDensityMaterial(AbstractMaterial):
r"""
Class for material model
Args:
:material_type (str): The material type can be either ``custom`` or one of the standard types, i.e. tabulated combinations of massdensity and atomic composition, listed here :class:`condor.utils.material.MaterialType`.
Kwargs:
:massdensity (float): Mass density in unit kilogram per cubic meter (default ``None``)
:atomic_composition (dict): Dictionary of key-value pairs for atom species (e.g. ``'H'`` for hydrogen) and concentration (default ``None``)
"""
def __init__(self, material_type, massdensity = None, atomic_composition = None):
AbstractMaterial.__init__(self)
self.clear_atomic_composition()
if atomic_composition is not None and massdensity is not None and (material_type is None or material_type == "custom"):
for element,concentration in atomic_composition.items():
self.set_atomic_concentration(element, concentration)
self.massdensity = massdensity
elif material_type is not None and atomic_composition is None and massdensity is None:
for element, concentration in MaterialType.atomic_compositions[material_type].items():
self.set_atomic_concentration(element, concentration)
self.massdensity = MaterialType.mass_densities[material_type]
else:
log_and_raise_error(logger, "Invalid arguments in Material initialization.")
def get_conf(self):
conf = {}
conf["material_type"] = "custom"
conf["atomic_composition"] = self.get_atomic_composition()
conf["massdensity"] = self.massdensity
return conf
def clear_atomic_composition(self):
"""
Empty atomic composition dictionary
"""
self._atomic_composition = {}
def set_atomic_concentration(self, element, relative_concentration):
r"""
Set the concentration of a given atomic species
Args:
:element (str): Atomic species (e.g. ``'H'`` for hydrogen)
:relative_concentration (float): Relative quantity of atoms of the given atomic species with respect to the others (e.g. for water: hydrogen concentration ``2.``, oxygen concentration ``1.``)
"""
if element not in atomic_names:
log_and_raise_error(logger, "Cannot add element \"%s\". Invalid name." % element)
self._atomic_composition[element] = relative_concentration
def get_atomic_composition(self, normed=False):
r"""
Return dictionary of atomic concentrations
Args:
:normed (bool): If ``True`` the concentrations are rescaled by a common factor such that their sum equals 1 (default ``False``)
"""
atomic_composition = self._atomic_composition.copy()
if normed:
s = numpy.array(list(atomic_composition.values()), dtype=numpy.float64).sum()
for element in atomic_composition.keys():
atomic_composition[element] /= s
return atomic_composition
def get_f(self, photon_wavelength):
r"""
Get effective average complex scattering factor for forward scattering at a given photon wavlength from Henke tables
Args:
:photon_wavlength (float): Photon wavelength in unit meter
"""
atomic_composition = self.get_atomic_composition(normed=True)
r_0 = constants.value("classical electron radius")
h = constants.h
c = constants.c
qe = constants.e
photon_energy_eV = h*c/photon_wavelength/qe
f_sum = complex(0.,0.)
for element in atomic_composition.keys():
# sum up average atom factor
f = get_f_element(element,photon_energy_eV)
f_sum += atomic_composition[element] * f
return f_sum
def get_scatterer_density(self):
r"""
Return total atom density :math:`\rho` in unit inverse cubic meter
.. math::
\rho = \frac{\rho_m}{\sum_i c_i m_i}
:math:`\rho_m`: Mass denisty of material
:math:`c_i`: Normalised fraction of atom species :math:`i`
:math:`m_i`: Standard atomic mass of atom species :math:`i`
"""
u = constants.value("atomic mass constant")
atomic_composition = self.get_atomic_composition(normed=True)
M = 0
for element in atomic_composition.keys():
# sum up average mass
M += atomic_composition[element]*get_atomic_mass(element)*u
number_density = self.massdensity/M
return number_density
def get_electron_density(self):
r"""
Return electron density :math:`\rho_e` in unit inverse cubic meter
.. math::
\rho_e = \frac{\rho_m \cdot \sum_i}{\left( \sum_i c_i m_i \right) \left( \sum_i c_i Z_i \right)}
:math:`\rho_m`: Mass denisty of material
:math:`c_i`: Normalised fraction of atom species :math:`i`
:math:`m_i`: Standard atomic mass of atom species :math:`i`
:math:`Z_i`: Atomic number of atom species :math:`i`
"""
u = constants.value("atomic mass constant")
atomic_composition = self.get_atomic_composition(normed=True)
M = 0
Q = 0
for element in atomic_composition.keys():
# sum up electrons
M += atomic_composition[element]*get_atomic_mass(element)*u
Q += atomic_composition[element]*get_atomic_number(element)
electron_density = Q*self.massdensity/M
return electron_density
def get_f_element(element, photon_energy_eV):
r"""
Get the scattering factor for an element through linear interpolation of the tabulated values (Henke tables)
Args:
:element (str): Atomic species (e.g. ``'H'`` for hydrogen)
:photon_energy_eV: Photon energy in unit eV
"""
SF_X = get_atomic_scattering_factors(element)
f1 = numpy.interp(photon_energy_eV,SF_X[:,0],SF_X[:,1])
f2 = numpy.interp(photon_energy_eV,SF_X[:,0],SF_X[:,2])
return complex(f1,f2)
class MaterialMap:
def __init__(self, shape):
if len(shape) != 3:
log_and_raise_error(logger, "%s is an invald shape for initialisation of MaterialMap.", str(shape))
self._shape = tuple(shape)
def add_material(self, material, density_map):
if not isinstance(material, Material):
log_and_raise_error(logger, "Cannot add material %s. It is not an instance of Material." % str(material))
if density_map.shape != self._shape:
log_and_raise_error(logger, "Cannot add material. Density map has incompatible shape: %s. Should be %s." % (str(density_map.shape), str(self._shape)))
self.materials.append(material)
self.density_maps.append(density_map)
def get_n(self, photon_wavelength):
dn = self.get_dn(photon_wavelength)
n = 1 - dn
return n
def get_dn(self, photon_wavelength):
dn = numpy.zeros(shape=self._shape, dtype=numpy.complex128)
for mat,dmap in zip(self.materials, self.density_maps):
dn += mat.get_dn(photon_wavelength) * dmap
return dn
def get_beta(self, photon_wavelength):
dn = self.get_dn(photon_wavelength)
return dn.imag
def get_delta(self, photon_wavelength):
dn = self.get_dn(photon_wavelength)
return dn.real
def get_photoabsorption_cross_section(self, photon_wavelength):
pacs = numpy.zeros(shape=self._shape, dtype=numpy.float64)
for mat,dmap in zip(self.materials, self.density_maps):
pacs += mat.get_photoabsorption_cross_section(photon_wavelength) * dmap
return pacs
def get_f(self, photon_wavelength):
f = numpy.zeros(shape=self._shape, dtype=numpy.complex128)
for mat,dmap in zip(self.materials, self.density_maps):
f += mat.get_f(photon_wavelength)*dmap
return trans
def get_electron_density(self, photon_wavelength):
ed = numpy.zeros(shape=self._shape, dtype=numpy.float64)
for mat,dmap in zip(self.materials, self.density_maps):
ed += mat.get_electron_density(photon_wavelength) * dmap
return ed
#class DensityMap:
#
# def __init__(self, shape):
# self.density = numpy.zeros(shape=(shape[0], shape[1], shape[2], len(atomic_numbers.keys())),dtype=numpy.float64)
#
# def get_n(self, wavelength):
# """
# Obtains complex refractive index.
# Henke (1994): n = 1 - r_0/(2pi) lambda^2 sum_q rho_q f_q(0)
# r_0: classical electron radius
# rho_q: atomic number density of atom species q
# f_q(0): atomic scattering factor (forward scattering) of atom species q
# """
#
# r_0 = constants.value("classical electron radius")
# h = constants.h
# c = constants.c
# qe = constants.e
# photon_energy_eV = h*c/photon_wavelength/qe
#
# s = numpy.zeros(shape=(shape[0], shape[1], shape[2]), dtype=numpy.complex128)
# for (el, de) in zip(atomic_numbers.keys(), self.density):
# s += de * get_f_element(el, photon_energy_eV)
#
# n = 1 - r_0 / (2*numpy.pi) * wavelength**2 * s
#
# return n
#
# def get_dn(self, wavelength):
# return (1-self.get_n(wavelength))
| FXIhub/condor | condor/utils/material.py | Python | bsd-2-clause | 23,942 | [
"Dalton"
] | 550d07c04637f8ea86ddb7650420bb491416acce506c6ec08a7ad0ecbb1d2d76 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The Clear BSD License
# Copyright (c) 2019 Carnegie Mellon University
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted (subject to the limitations in the disclaimer below) provided that
# the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Carnegie Mellon University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import tempfile
import numpy as np
import pytest
from bigdl.chronos.simulator import DPGANSimulator
from bigdl.chronos.simulator.doppelganger.output import Output, OutputType, Normalization
from bigdl.orca.test_zoo_utils import ZooTestCase
def get_train_data():
import os
import io
import shutil
import urllib.request as req
dfp = f'{os.getenv("FTP_URI")}/analytics-zoo-data/apps/doppelGANger_data/data_train_small.npz'
fi = io.BytesIO()
with req.urlopen(dfp) as dp:
shutil.copyfileobj(dp, fi)
fi.seek(0)
df = np.load(fi)
return df
class TestDoppelganer(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_output_value(self):
attribute_outputs = [Output(type_=OutputType.DISCRETE, dim=2),
Output(type_=OutputType.CONTINUOUS, dim=1,
normalization=Normalization.MINUSONE_ONE)]
assert set([val.type_.value for val in attribute_outputs]) == \
set([val.type_.name for val in attribute_outputs])
# illegal input.
with pytest.raises(Exception):
[Output(type_=OutputType.CONTINUOUS, dim=2, normalization=None)]
def test_init_doppelganer(self):
df = get_train_data()
feature_outputs = [Output(type_=OutputType.CONTINUOUS,
dim=1,
normalization=Normalization.MINUSONE_ONE)]
attribute_outputs = [Output(type_=OutputType.DISCRETE, dim=9),
Output(type_=OutputType.DISCRETE, dim=3),
Output(type_=OutputType.DISCRETE, dim=2)]
doppelganger = DPGANSimulator(L_max=550,
sample_len=10,
feature_dim=1,
num_real_attribute=3,
num_threads=1)
doppelganger.fit(data_feature=df['data_feature'],
data_attribute=df['data_attribute'],
data_gen_flag=df['data_gen_flag'],
feature_outputs=feature_outputs,
attribute_outputs=attribute_outputs,
epoch=2,
batch_size=32)
feature, attribute, gen_flags, lengths = doppelganger.generate()
assert feature.shape == (1, doppelganger.L_max, 1)
assert attribute.shape == (1, df['data_attribute'].shape[-1])
assert gen_flags.shape == (1, doppelganger.L_max) and (gen_flags[0, :] == 1).all()
assert lengths[0] == doppelganger.L_max
with tempfile.TemporaryDirectory() as tf:
doppelganger.save(tf)
doppelganger.load(tf)
df.close()
# illegal input
df = get_train_data()
feature_outputs = [Output(type_=OutputType.CONTINUOUS,
dim=1,
normalization=Normalization.MINUSONE_ONE)]
attribute_outputs = [Output(type_=OutputType.DISCRETE, dim=9),
Output(type_=OutputType.DISCRETE, dim=3),
Output(type_=OutputType.DISCRETE, dim=2)]
doppelganger = DPGANSimulator(L_max=551,
sample_len=10,
feature_dim=1,
num_real_attribute=3,
num_threads=1)
with pytest.raises(RuntimeError):
doppelganger.fit(data_feature=df['data_feature'],
data_attribute=df['data_attribute'],
data_gen_flag=df['data_gen_flag'],
feature_outputs=feature_outputs,
attribute_outputs=attribute_outputs)
df.close()
| intel-analytics/BigDL | python/chronos/test/bigdl/chronos/simulator/test_doppelganger_simulator.py | Python | apache-2.0 | 6,273 | [
"ORCA"
] | bb35381ede393335d7005fb5920be1d6a998d5e8172c7e6584c0674e681e5683 |
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from os import path
import io
import string
from pysces import model, PyscesModel
from ._paths import get_model_name
__all__ = ['psc_to_str',
'mod_to_str',
'strip_fixed',
'augment_fix_sting',
'fix_metabolite',
'fix_metabolite_ss']
def psc_to_str(name):
"""
Takes a filename and returns a path of where this file should be found.
Parameters
----------
name : str
A string containing a filename.
Returns
-------
str
A string indicating the path to a psc file.
"""
if name[-4:] != '.psc':
name += '.psc'
F = file(path.join(PyscesModel.MODEL_DIR, name), 'r')
fstr = F.read()
F.close()
return fstr
def mod_to_str(mod):
"""
Converts an instantiated PySCeS model to a string.
Parameters
----------
mod : PysMod
A Pysces model.
Returns
-------
str
A string representation of the contents of a PySCeS model file.
"""
F = io.StringIO()
mod.showModel(filename=F)
fstr = F.getvalue()
F.close()
return fstr
def strip_fixed(fstr):
"""
Take a psc file string and return two strings: (1) The file header
containing the "FIX: " line and (2) the remainder of file.
Parameters
----------
fstr : str
String representation of psc file.
Returns
-------
tuple of str
1st element contains file header, second the remainder of the file.
See also
--------
psc_to_str
mod_to_str
"""
Fi = io.StringIO()
Fi.write(fstr)
Fi.seek(0)
Fo = io.StringIO()
Fhead = None
for line in Fi:
if line[:4] == "FIX:":
Fhead = line.strip()
Fo.write('\n')
else:
Fo.write(line)
Fo.seek(0)
return Fhead, Fo.read()
def augment_fix_sting(OrigFix, fix):
"""
Adds a species to a psc file header.
Parameters
----------
OrigFix : str
A psc file header
fix : str
Additional species to add to psc file header.
Returns
-------
str
A new psc file header that contains the contents of the original
together with the new fixed species.
"""
return OrigFix + ' %s' % fix
def fix_metabolite(mod, fix, model_name=None):
"""
Fix a metabolite in a model and return a new model with the fixed
metabolite.
Parameters
----------
mod : PysMod
The original model.
fix : str
The metabolite to fix.
model_name : str, optional (Default : none)
The file name to use when saving the model (in psc/orca).
If None it defaults to original_model_name_fix.
Returns
-------
PysMod
A new model instance with an additional fixed species.
"""
assert fix in mod.species, "\nInvalid fixed species."
if model_name is None:
model_name = get_model_name(mod) + '_' + fix
mod_str = mod_to_str(mod)
fix_head, mod_str_sans_fix = strip_fixed(mod_str)
new_fix_head = augment_fix_sting(fix_head, fix)
new_mod = model(model_name, loader="string", fString=new_fix_head
+ '\n' + mod_str_sans_fix)
return new_mod
def fix_metabolite_ss(mod, fix, model_name=None):
"""
Fix a metabolite at its steady state in a model and return a new
model with the fixed metabolite.
Parameters
----------
mod : PysMod
The original model.
fix : str
The metabolite to fix.
model_name : str, optional (Default : none)
The file name to use when saving the model (in psc/orca).
If None it defaults to original_model_name_fix.
Returns
-------
PysMod
A new model instance with an additional fixed species.
See Also
--------
fix_metabolite
"""
mod.doState()
fixed_ss = getattr(mod,fix)
fix_mod = fix_metabolite(mod,fix,model_name)
setattr(fix_mod,fix,fixed_ss)
return fix_mod
| PySCeS/PyscesToolbox | psctb/modeltools/_pscmanipulate.py | Python | bsd-3-clause | 4,111 | [
"ORCA",
"PySCeS"
] | 2b3c033b2e5c3a4856cd35a4a2a31d1f02e8e493e8b6746438c72fd6897ebaf4 |
from mayavi import mlab
import sys
sys.path.append("..")
import data_tools as dt
import plotting as plot
import linear_algebra as la
import numpy as np
def plot_coords_interactive(coords, res, color=(1,0,0), radius=None, out_path=None):
if radius is None:
radius = calculateRadius(coords, res)
xs = coords[:,0]
ys = coords[:,1]
zs = coords[:,2]
mlab.figure(bgcolor=(1,1,1))
mlab.plot3d(xs, ys, zs, tube_radius=radius, color=color)
if out_path is not None:
mlab.savefig(out_path)
mlab.show()
def calculateRadius(coords, res):
"""Calculate to-scale radius based on Kuhn length and diameter of chromatin"""
#from Rippe (2001)
kl = 289 #Kuhn length (nm)
bpPerKL = 30000. #base pairs per Kuhn length
chromatinDiameter = 30 #diameter of heterochromatin (nm)
totDist = 0
count = 0
n = len(coords)
for i in range(1, n):
totDist += la.calcDistance(coords[i-1], coords[i])
count += 1
avgDist = totDist/count #average distance between neighboring loci
physicalDist = kl * (res/bpPerKL)**(1./2) #physical distance between neighboring loci (nm)
conversionFactor = avgDist/physicalDist
return chromatinDiameter/2 * conversionFactor
mmds_structure = dt.structure_from_file("hic_data/GM12878_combined_22_10kb_mmds_coords.tsv")
cmds_structure = dt.structure_from_file("hic_data/GM12878_combined_22_10kb_cmds_coords.tsv")
minimds_structure = dt.structure_from_file("hic_data/GM12878_combined_22_10kb_minimds_coords.tsv")
mmds_res = mmds_structure.chrom.res
cmds_res = cmds_structure.chrom.res
minimds_res = minimds_structure.chrom.res
assert mmds_res == cmds_res == minimds_res
res = mmds_res
plot.plot_structure_interactive(mmds_structure, out_path="Fig9A.png")
plot.plot_structure_interactive(cmds_structure, out_path="Fig9B.png")
plot.plot_structure_interactive(minimds_structure, out_path="Fig9C.png")
plot_coords_interactive(np.loadtxt("MOGEN/examples/hiC/output/GM12878_combined_22_10kb_rep1_coords.tsv"), res, out_path="Fig9D.png")
| seqcode/miniMDS | scripts/fig9.py | Python | mit | 1,962 | [
"Mayavi"
] | 9901972d3e45dca87f5333da22a7e543df6dda2e0622da764a8aa7e55d667da5 |
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import range
import copy
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
from nnabla.testing import assert_allclose, clear_called_flag_recorder
from nbla_test_utils import list_context
@pytest.mark.parametrize("seed", [313])
def test_graph_logreg(seed):
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4], need_grad=True)
w = nn.Variable([12, 5], need_grad=True)
b = nn.Variable([5], need_grad=True)
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
w.d = rng.randn(*w.shape)
b.d = rng.randn(*b.shape)
t.d = rng.randint(0, 5, size=t.shape)
nn.set_default_context(nn.Context())
# Forwardprop by definition
with nn.auto_forward():
z = F.affine(x, w, b, 1)
l = F.softmax_cross_entropy(z, t, 1)
L = F.mean(l)
# Backprop
# Diff should be initialized since they are always accumulated
x.g = 0
w.g = 0
b.g = 0
L.backward(clear_buffer=True)
x.g = rng.randn(*x.shape)
inputs = [x, w, b]
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L, inputs, 1e-3)
assert_allclose(ngrad, agrad, atol=1e-2)
@pytest.mark.parametrize("seed", [311])
@pytest.mark.parametrize("model", ["mlp", "recurrent", "convolution"])
def test_graph_model(model, seed):
np.random.seed(313)
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4, 4], need_grad=True)
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
t.d = rng.randint(0, 5, size=t.shape)
nn.set_default_context(nn.Context())
# Forwardprop by definition
nn.clear_parameters()
if model == "mlp":
with nn.parameter_scope('fc1'):
z = PF.affine(x, 3)
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
elif model == "recurrent":
with nn.parameter_scope('fc1'):
z = PF.affine(x, 8)
z2 = F.relu(z, inplace=True)
h = z2
for _ in range(2):
with nn.parameter_scope('fc2'):
h = PF.affine(h, 8)
h = F.relu(h, inplace=True)
with nn.parameter_scope('fc3'):
z3 = PF.affine(h, 5)
elif model == "convolution":
with nn.parameter_scope('conv1'):
z = PF.convolution(x, 3, (2, 2))
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
else:
raise ValueError()
l = F.softmax_cross_entropy(z3, t, 1)
L = F.mean(l)
# Forwardprop
L.forward(clear_no_need_grad=True)
# Backprop
# Diff should be initialized since they are always accumulated
x.grad.zero()
L.backward(clear_buffer=True)
x.g = rng.randn(*x.shape)
parameters = nn.get_parameters()
for param in parameters.values():
param.grad.zero()
inputs = [x] + list(parameters.values())
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L, inputs, 1e-3)
assert_allclose(ngrad, agrad, atol=1.05e-2)
@pytest.mark.parametrize("seed", [311])
def test_graph_unlink_backward(seed):
rng = np.random.RandomState(seed)
x0 = nn.Variable([2, 4], need_grad=True)
x1 = nn.Variable([2, 4], need_grad=True)
x0.d = rng.randn(*x0.shape)
x1.d = rng.randn(*x1.shape)
x0.grad.zero()
x1.grad.zero()
with nn.auto_forward():
with nn.parameter_scope("fc0"):
h0 = PF.affine(x0, 2)
with nn.parameter_scope("fc1"):
h1 = PF.affine(x1, 2)
h0.need_grad = False
h = h0 + h1
with nn.parameter_scope("fc"):
y = PF.affine(h, 1)
y.backward(clear_buffer=True)
assert np.all(x0.g == 0)
assert not np.all(x1.g == 0)
@pytest.mark.parametrize("seed", [311])
def test_graph_clear_buffer(seed):
np.random.seed(313)
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4, 4])
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
t.d = rng.randint(0, 5, size=t.shape)
# Network definition
nn.set_default_context(nn.Context())
nn.clear_parameters()
x1 = x + 1
x2 = x1 - 1
with nn.parameter_scope('conv1'):
z = PF.convolution(x2, 3, (2, 2))
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
l = F.softmax_cross_entropy(z3, t, 1)
L = F.mean(l)
# Forwardprop
import tempfile
import os
tmpd = tempfile.mkdtemp()
nn.save_parameters(os.path.join(tmpd, 'parameter.h5'))
first = False
for cnng in [False, True]:
for cb in [False, True]:
_ = nn.load_parameters(os.path.join(tmpd, 'parameter.h5'))
for v in nn.get_parameters().values():
v.grad.zero()
L.forward(clear_no_need_grad=cnng)
L.backward(clear_buffer=cb)
if not first:
first = True
g = list(nn.get_parameters().values())[0].g.copy()
else:
g2 = list(nn.get_parameters().values())[0].g.copy()
import platform
if platform.machine() == 'ppc64le':
pytest.skip("This test fails on ppc64le")
assert np.all(g == g2)
@pytest.mark.parametrize("seed", [311])
@pytest.mark.parametrize("clear_buffer", [True, False])
def test_graph_rewire(seed, clear_buffer):
nn.clear_parameters()
# A. defining graph definition utility
def mlp2(x, scope):
with nn.parameter_scope(scope):
h = F.tanh(PF.affine(x, 10, name='a1'))
h = F.tanh(PF.affine(h, 10, name='a1'))
return h
# A. Create a graph A.
xa = nn.Variable((2, 10), need_grad=True)
ya = mlp2(xa, 'a')
# B. Create a graph B.
xb = nn.Variable((2, 10), need_grad=True)
yb = mlp2(xb, 'b')
# C. Create directly connected graph.
xc = nn.Variable((2, 10))
yc = mlp2(mlp2(xc, 'a'), 'b')
# D. Rewire the graphs A and B.
xb.rewire_on(ya)
# E. Check whether the results are the same.
rng = np.random.RandomState(seed)
data = rng.randn(*xa.shape)
xa.d = data
xc.d = data
params = nn.get_parameters()
def zero_grad():
for p in params.values():
p.grad.zero()
def backup_params():
return [p.g.copy() for p in params.values()]
# Checking forward
yb.forward(clear_no_need_grad=clear_buffer)
yc.forward(clear_no_need_grad=clear_buffer)
assert_allclose(yb.d, yc.d)
# Checking backward
zero_grad()
yb.backward(clear_buffer=clear_buffer)
gb = backup_params()
zero_grad()
yc.backward(clear_buffer=clear_buffer)
gc = backup_params()
assert_allclose(xa.d, xc.d)
for b, c in zip(gb, gc):
assert_allclose(b, c)
def test_deleted_outputs():
rng = np.random.RandomState(313)
x = nn.Variable((2, 3, 4, 5))
h, m, v = PF.batch_normalization(x, output_stat=True)
del m
x.d = rng.randn(*x.shape).astype(np.float32)
h.forward()
h.backward()
def test_function_hook():
'''
Testing function hooks in forward and backward
'''
x = nn.Variable.from_numpy_array(
np.zeros((2, 3), dtype=np.float32)).apply(need_grad=True)
x.grad.zero()
h = x + 2
h.data.zero()
h.grad.zero()
y = h * 0.5
y.data.zero()
def forward_pre_hook(f):
assert_allclose(f.outputs[0].d, 0)
def forward_post_hook(f):
if f.info.type_name == 'AddScalar':
assert_allclose(f.outputs[0].d, 2)
if f.info.type_name == 'MulScalar':
assert_allclose(f.outputs[0].d, 1)
def backward_pre_hook(f):
assert_allclose(f.inputs[0].g, 0)
def backward_post_hook(f):
# Both h and x grad will be 0.5
assert_allclose(f.inputs[0].g, 0.5)
y.forward(function_pre_hook=forward_pre_hook,
function_post_hook=forward_post_hook)
y.backward(function_pre_hook=backward_pre_hook,
function_post_hook=backward_post_hook)
x.grad.zero()
z = x * 0.1
# Just calling test
nn.forward_all((y, z), function_pre_hook=lambda f: None,
function_post_hook=lambda f: None)
@pytest.mark.parametrize("seed", [313])
def test_shared_variable_on_same_function(seed):
rng = np.random.RandomState(313)
xd = rng.randn(2, 3)
x = nn.Variable.from_numpy_array(xd).apply(need_grad=True)
x.grad.zero()
y = x * x * x
y.forward()
y.backward()
assert_allclose(x.g, 3 * xd ** 2)
@pytest.mark.parametrize("seed", [313])
def test_function_context(seed):
rng = np.random.RandomState(313)
xd = rng.randn(2, 3)
x = nn.Variable.from_numpy_array(xd)
ctx1 = nn.Context(backend=['cpu:float'],
array_class='CpuCachedArray', device_id='1')
with nn.context_scope(ctx1):
y = F.relu(x)
ctx0 = nn.Context(backend=['cpu:float'],
array_class='CpuCachedArray', device_id='0')
# TODO: use id or hash if we determine the spec
assert str(ctx0) != str(ctx1)
assert str(ctx1) == str(y.parent.context)
with nn.context_scope(y.parent.context):
z = F.relu(x)
assert str(y.parent.context) == str(z.parent.context)
def test_no_need_grad_backward():
'''
This tests a previously existing bug where an
intermediate variable with need_grad=False yet required
to compute a gradient in a function has been unexpectedly cleared.
'''
nn.prefer_cached_array(False)
x = nn.Variable(tuple(), need_grad=False)
y = nn.Variable(tuple(), need_grad=True)
z = nn.Variable(tuple(), need_grad=False)
xx = x * 1
yy = y * 1
zz = z * 1
a = xx * 3
b = xx * yy
c = xx * zz
d = a * b * c
x.data.fill(1)
y.data.fill(2)
z.data.fill(0.5)
hook = None # lambda f: print(f, list(map(lambda x: x.d, f.inputs)))
d.forward(clear_no_need_grad=True, function_pre_hook=hook)
y.grad.zero()
d.backward(clear_buffer=True, function_pre_hook=hook)
assert np.isclose(y.g, 1.5)
@pytest.mark.parametrize("clear_buffer", [False, True])
def test_no_need_grad_forward(clear_buffer):
'''
This tests a previously existing bug where an intermediate variable
has been unexpectedly cleared before the end of life if
it is used in an in-place function and
another function at the same time.
'''
import nnabla as nn
import nnabla.functions as F
nn.prefer_cached_array(False)
x = nn.Variable(tuple(), need_grad=False)
xx = x * 1
a = xx.reshape(x.shape)
b = xx * 1
d = a * b
x.data.fill(1)
d.forward(clear_no_need_grad=True, clear_buffer=clear_buffer)
assert np.isclose(d.d, 1.0)
def test_no_need_grad_forward_double():
'''
This tests a previously existing bug where a variable used
twice by a single function caused an unexpected clear due to
incorrect count of function references.
'''
import nnabla as nn
import nnabla.functions as F
nn.prefer_cached_array(False)
x = nn.Variable(tuple())
xx = x * 1
y = xx * xx
z = xx * 1
a = y * z
x.data.fill(1)
a.forward(clear_no_need_grad=True)
assert np.isclose(a.d, 1.0)
class TestClearInput():
def check_input_data_clear_called_flags(self, answer):
result = clear_called_flag_recorder.get_input_clear_called_flags()
assert len(result) == len(answer)
for i, flags in enumerate(answer):
assert len(result[i]) == len(flags)
for j, flag in enumerate(flags):
assert flag == result[i][j][0]
def setup_method(self):
clear_called_flag_recorder.activate_clear_called_flag_recorder()
def teardown_method(self):
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Test for clearing input in a network of two layers.
def test_clear_input_if_no_need_grad0(self):
x1 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
answer = []
answer.append([False])
answer.append([True])
y1.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for clearing input in a network of three layers.
def test_clear_input_if_no_need_grad1(self):
x1 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
y2 = F.add_scalar(y1)
answer = []
answer.append([False])
answer.append([True])
answer.append([True])
y2.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test the case where an input is not cleared when it is required for backward at the previous layer function.
def test_clear_input_if_no_need_grad2(self):
x1 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1) # (1)
y1 = F.tanh(xx1) # (2)
y2 = F.add_scalar(y1) # (3)
answer = []
answer.append([False])
answer.append([True])
answer.append([False])
# y1 must not be clear after (3) because y1 is required for backward of (2).
y2.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for a variable shared with two layer functions.
# Check if it is cleared after the both functions finish to use it.
def test_clear_input_if_no_need_grad_branch0(self):
x1 = nn.Variable([1, 5], need_grad=True)
x2 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1) # (1)
y2 = F.add_scalar(xx1) # (2)
y3 = F.add2(y1, y2) # (3)
answer = []
answer.append([False])
answer.append([False]) # (1) does not clear xx1
answer.append([True]) # (2) clears xx1
answer.append([True, True])
y3.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for a variable shared with mul2 and add2.
# add2 does not require it as input for backward, but mul2 does.
def test_clear_input_if_no_need_grad_branch1(self):
x1 = nn.Variable([1, 5], need_grad=True)
x2 = nn.Variable([1, 5], need_grad=True)
x3 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1)
xx2 = F.identity(x2)
y1 = F.mul2(xx1, xx2) # (1)
xx3 = F.identity(x3)
y2 = F.add2(xx2, xx3) # (2)
y3 = F.add2(y1, y2) # (3)
answer = []
answer.append([False])
answer.append([False])
answer.append([False, False]) # (1)
answer.append([False])
answer.append([False, True]) # (2) use xx2 in backward
answer.append([True, True]) # (3)
y3.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for only clearing bias in convolution.
def test_clear_input_if_no_need_grad_convolution(self):
x1 = nn.Variable([1, 1, 2], need_grad=True)
x2 = nn.Variable([1, 1, 2], need_grad=True)
x3 = nn.Variable([1], need_grad=True)
inp = F.identity(x1)
weight = F.identity(x2)
bias = F.identity(x3)
y = F.convolution(inp, weight, bias) # (1)
answer = []
answer.append([False])
answer.append([False])
answer.append([False])
answer.append([False, False, True]) # (1) clears bias
y.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for only clearing beta in batch_normalization.
@pytest.mark.parametrize("batch_stat", [False, True])
def test_clear_input_if_no_need_grad_batch_normalization(self, batch_stat):
x1 = nn.Variable([1, 1, 2], need_grad=True)
x2 = nn.Variable([1, 1, 1], need_grad=True)
x3 = nn.Variable([1, 1, 1], need_grad=True)
x4 = nn.Variable([1, 1, 1], need_grad=True)
x5 = nn.Variable([1, 1, 1], need_grad=True)
x = F.identity(x1)
beta = F.identity(x2)
gamma = F.identity(x3)
if batch_stat:
y = F.batch_normalization(
x, beta, gamma, x4, x5, batch_stat=batch_stat)
else:
mean = F.identity(x4)
var = F.identity(x5)
y = F.batch_normalization(
x, beta, gamma, mean, var, batch_stat=batch_stat)
answer = []
answer.append([False])
answer.append([False])
answer.append([False])
if not batch_stat:
answer.append([False])
answer.append([False])
answer.append([False, True, False, False, False])
y.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
class TestClearOutputGrad():
def check_grad_cleared_flags(self, answer):
result = clear_called_flag_recorder.get_output_clear_called_flags()
assert len(result) == len(answer)
for i, flags in enumerate(answer):
assert len(result[i]) == len(flags)
for j, flag in enumerate(flags):
assert flag == result[i][j][1]
def setup_method(self):
clear_called_flag_recorder.activate_clear_called_flag_recorder()
def teardown_method(self):
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Test for the type of grad given to backward.
@pytest.mark.parametrize("grad", [1, None, np.ndarray([1]), nn.NdArray([1])])
def test_clear_output_grad_argument(self, grad):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
answer_grad = []
if grad is None or isinstance(grad, nn.NdArray):
answer_grad.append([False]) # y1
else:
answer_grad.append([True]) # y1
answer_grad.append([True]) # xx1
y1.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y1.backward(clear_buffer=True, grad=grad)
self.check_grad_cleared_flags(answer_grad)
assert y1.grad.clear_called == False
# Test for an inplaced variable.
def test_clear_output_grad_inplace(self):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1, inplace=True)
y2 = F.add_scalar(y1)
answer_grad = []
answer_grad.append([True])
answer_grad.append([True])
answer_grad.append([True])
y2.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y2.backward(clear_buffer=True)
self.check_grad_cleared_flags(answer_grad)
# Test for a variable shared with two layer functions.
def test_clear_output_grad_shared_variable(self):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
y2 = F.add_scalar(xx1)
y3 = F.add2(y1, y2)
answer_grad = []
answer_grad.append([True])
answer_grad.append([True])
answer_grad.append([True])
answer_grad.append([True])
y3.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y3.backward(clear_buffer=True)
self.check_grad_cleared_flags(answer_grad)
# Test for a persistent variable.
def test_clear_output_grad_persistent(self):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
y2 = F.add_scalar(y1)
xx1.persistent = True
y2.persistent = True
answer_grad = []
answer_grad.append([False]) # y2
answer_grad.append([True]) # y1
answer_grad.append([False]) # xx1
y2.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y2.backward(clear_buffer=True)
self.check_grad_cleared_flags(answer_grad)
# Test for the input variables of sink.
# In the case where Function::prohibit_clear_input_buffers returns true,
# these inputs must not be cleared from any function.
def test_clear_output_grad_prohibit_clear_input(self):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
y2 = F.add_scalar(xx1)
y3 = F.sink(y1, y2)
answer_grad = []
answer_grad.append([True]) # y3
answer_grad.append([False]) # y2
answer_grad.append([False]) # y1
answer_grad.append([True]) # xx1
y3.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y3.backward(clear_buffer=True)
self.check_grad_cleared_flags(answer_grad)
class TestRecomputation():
def teardown_method(self):
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
def check_input_data_clear_called_flags(self, answer):
result = clear_called_flag_recorder.get_input_clear_called_flags()
assert len(result) == len(answer)
for i, flags in enumerate(answer):
assert len(result[i]) == len(flags)
for j, flag in enumerate(flags):
assert flag == result[i][j][0]
def check_recomputation(self, seed, graph, inputs):
def forward_backward_and_get_grads(y):
# Initialize grads
for input in inputs:
if input.need_grad:
input.grad.zero()
y.forward(clear_no_need_grad=True)
y.backward(clear_buffer=True)
# Get grads
grads = []
for input in inputs:
if input.need_grad:
grads.append(copy.deepcopy(input.g))
return grads
# Set random input data.
rng = np.random.RandomState(seed)
for input in inputs:
input.d = rng.randn(*input.shape)
# Calculate reference grads.
y_ref = graph(*inputs)
# Disable recompute flags for generating reference grads.
def disable_recompute_flag(f):
for input in f.inputs:
input.apply(recompute=False)
y_ref.visit(disable_recompute_flag)
grads_expected = forward_backward_and_get_grads(y_ref)
y = graph(*inputs)
grads_actual = forward_backward_and_get_grads(y)
for a, e in zip(grads_actual, grads_expected):
assert_allclose(a, e, rtol=0, atol=0)
# Check setting up recompute flag.
def test_recompute_flag(self):
x0 = nn.Variable((1, 1), need_grad=True)
x1 = F.sin(x0).apply(recompute=True)
x2 = F.sin(x1).apply(recompute=False)
x3 = F.sin(x2)
assert x0.recompute == False
assert x1.recompute == True
assert x2.recompute == False
assert x3.recompute == False
# Check whether input data is cleared when recompute flag is True.
def test_clear_input_data(self):
x0 = nn.Variable((1, 1), need_grad=True)
# `F.sin` input data is always needed for grad calculation
x1 = F.sin(x0).apply(recompute=True)
x2 = F.sin(x1).apply(recompute=False)
x3 = F.sin(x2)
answer = []
answer.append([False]) # x0
answer.append([True]) # x1
answer.append([False]) # x2
clear_called_flag_recorder.activate_clear_called_flag_recorder()
x3.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check claering output which needs `setup_recompute` for recomputation.
def test_clearing_without_recompute_flag(self):
x0 = nn.Variable((1, 128, 128), need_grad=True)
x1 = F.sin(x0).apply(recompute=True)
x2 = F.dropout(x1)
x3 = F.sin(x2).apply(recompute=True)
x4 = F.sin(x3).apply(recompute=True)
y = F.identity(x4)
# Skip this code temporarily since it cause
# randomly crash when perform CI testing on windows 10 with nnabla-cuda-ext
pytest.skip(
'Skipped for randomly crash when perform CI testing on windows 10 with nnabla-cuda-ext')
y.forward(clear_no_need_grad=True)
x2.data.clear()
with pytest.raises(RuntimeError, match="Failed `called_setup_recompute_`"):
# x2.data cannot be recomputed correctly since `setup_recompute` is not called during forward propagation.
# Backward should raise when some intermediate variables are cleared by user.
y.backward()
# Check recomputed data value.
@pytest.mark.parametrize("seed", [313])
def test_recomputed_data_value(self, seed):
rng = np.random.RandomState(seed)
a0 = nn.Variable((2, 3), need_grad=True)
b0 = nn.Variable((2, 3), need_grad=True)
a0.d = rng.randn(*a0.shape)
b0.d = rng.randn(*b0.shape)
a1 = F.sin(a0).apply(recompute=True)
a2 = F.sin(a1)
a3 = F.sin(a2)
b1 = F.sin(b0)
b2 = F.sin(b1).apply(recompute=True)
b3 = F.sin(b2)
c0 = F.mul2(a3, b3).apply(recompute=True)
c1 = F.sin(c0)
# Forward
# Get output data which will be recomputed.
ref_data = [] # data of a0, b2 and c0 will be stored.
def get_output_data(nnabla_func):
outputs = nnabla_func.outputs
for output in outputs:
if output.recompute:
ref_data.append(copy.deepcopy(output.d))
c1.forward(function_post_hook=get_output_data)
# Backward
# Get recomputed data
act_data = []
def get_recomputed_data(nnabla_func):
inputs = nnabla_func.inputs
for input in inputs:
if input.recompute:
act_data.append(copy.deepcopy(input.d))
c1.backward(function_pre_hook=get_recomputed_data)
# Make the order the same as `ref_data`.
act_data.reverse()
# Check recomputed data
for act, ref in zip(act_data, ref_data):
assert_allclose(act, ref, rtol=0, atol=0)
@pytest.mark.parametrize("seed", [313])
def test_grad_value_simple(self, seed):
x = nn.Variable((2, 3), need_grad=True)
inputs = (x,)
def graph(x):
y = F.sin(x).apply(recompute=True)
y = F.cos(y)
return y
self.check_recomputation(seed, graph, inputs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("need_grad_x1", [False, True])
@pytest.mark.parametrize("need_grad_x2", [False, True])
def test_grad_value_with_branch(self, seed, need_grad_x1, need_grad_x2):
x1 = nn.Variable((2, 3), need_grad=need_grad_x1)
x2 = nn.Variable((2, 3), need_grad=need_grad_x2)
inputs = (x1, x2)
def graph(x1, x2):
x1 = F.identity(x1).apply(recompute=True)
x2 = F.identity(x2).apply(recompute=True)
y = F.mul2(x1, x2)
y = F.identity(y)
return y
self.check_recomputation(seed, graph, inputs)
# Check `setup_recompute`
@pytest.mark.parametrize("seed", [313])
def test_grad_value_with_random_function(self, seed):
x1 = nn.Variable((2, 3), need_grad=True)
inputs = (x1,)
def graph(x1):
x1 = F.identity(x1).apply(recompute=True)
x2 = F.randn(shape=x1.shape, seed=123).apply(recompute=True)
x3 = F.rand(shape=x1.shape, seed=456).apply(recompute=True)
y = F.mul2(x1, x2).apply(recompute=True)
y = F.mul2(y, x3).apply(recompute=True)
y = F.identity(y)
return y
self.check_recomputation(seed, graph, inputs)
@pytest.mark.parametrize("seed", [313])
def test_grad_value_with_output_dependent_function(self, seed):
"""
Gradient values are tested for the function which depends on output data.
Here, we test a following case that variable `h` will be recomputed and
its data is needed for the `F.swish` backward.
x -> F.swish -> h -> F.interpolate -> y
"""
def graph(x0):
# F.swish -> F.interpolate
x1 = F.swish(x0)
x1.apply(recompute=True)
x2 = F.interpolate(x1, scale=(2,))
return x2
x = nn.Variable((2, 3), need_grad=True)
inputs = (x,)
self.check_recomputation(seed, graph, inputs)
@pytest.mark.parametrize("seed", [313])
def test_with_persistent_flag(self, seed):
x = nn.Variable((2, 3), need_grad=True)
inputs = (x,)
def graph(x0):
x1 = F.sin(x0).apply(recompute=True)
# Set `recompute` and `persistent` flag at the same time
x2 = F.sin(x1).apply(recompute=True, persistent=True)
x3 = F.sin(x2).apply(recompute=True)
y = F.sin(x3)
return y
y = graph(x)
# Trace data clearing during forward propagation.
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y.forward(clear_no_need_grad=True)
expected = [
[False], # x0: graph input
[True], # x1: Cleared because `recompute=True`
[False], # x2: Not cleared because `persistent=True`
[True], # x3: Cleared because `recompute=True`
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check grad value
self.check_recomputation(seed, graph, inputs)
@pytest.mark.parametrize("seed", [313])
def test_with_inplacing(self, seed):
x = nn.Variable((2, 3), need_grad=True)
inputs = (x,)
def graph(x0):
x1 = F.sin(x0).apply(recompute=True)
# Set `recompute` flag to the inplaced variable.
x2 = F.reshape(x1, (3, 2), inplace=True).apply(recompute=True)
x3 = F.sin(x2).apply(recompute=True)
y = F.sin(x3)
return y
y = graph(x)
# Trace data clearing during forward propagation.
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y.forward(clear_no_need_grad=True)
expected = [
[False], # x0: graph input
[False], # x1: Not cleared because inplaced data
[False], # x2: Not cleared because inplaced data
[True], # x3: Cleared because `recompute=True`
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check grad value
self.check_recomputation(seed, graph, inputs)
# Check clear of recomputed data on the subgraph which is not back-propagated.
def test_clear_data_on_not_bwd_path(self):
a0 = nn.Variable((2, 3), need_grad=True)
a1 = F.identity(a0).apply(recompute=True)
a2 = F.sin(a1).apply(recompute=True)
# These three variables are not back-propagated.
b0 = nn.Variable((2, 3), need_grad=False)
b1 = F.identity(b0).apply(recompute=True)
b2 = F.sin(b1).apply(recompute=True)
c1 = F.add2(a2, b2).apply(recompute=True)
c2 = F.sin(c1)
# Forward
clear_called_flag_recorder.activate_clear_called_flag_recorder()
c2.forward(clear_no_need_grad=True)
# Data which will be recomputed must be cleared during forward propagation.
expected = [
[False], # a0
[True], # a1
[False], # b0
[True], # b1
[True, True], # a2, b2
[True], # c1
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Backward
clear_called_flag_recorder.activate_clear_called_flag_recorder()
c2.backward(clear_buffer=True)
# b1 is not on backward path and must be cleared during recomputation.
expected = [
# Recomputation
[False], # a0
[False], # a1
[False], # b0
[True], # b1 (not on backward path) must be cleared
[True, True], # a2, b2
[False], # c1
# Backward propagation
[True, True], # a2, b2
[False], # a1
[False], # a0
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check clear of data not need for grad calculation during recomputation.
def test_clear_no_need_grad_during_recomputation(self):
x0 = nn.Variable((2, 3), need_grad=True)
x1 = F.identity(x0).apply(recompute=True)
# x2.data must be cleared just after recomputation because they are not need for backward propagation.
x2 = F.sin(x1).apply(recompute=True)
x3 = F.identity(x2).apply(recompute=True)
x4 = F.sin(x3)
# Forward
clear_called_flag_recorder.activate_clear_called_flag_recorder()
x4.forward(clear_no_need_grad=True)
# All intermediate data must be cleared.
expected = [
[False], # x0
[True], # x1
[True], # x2
[True], # x3
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Backward
clear_called_flag_recorder.activate_clear_called_flag_recorder()
x4.backward(clear_buffer=True)
expected = [
# Recomputation
[False], # x0
[False], # x1
[True], # x2: not need for grad calculation
# Backward propagation
[False], # x3
[True], # x2
[False], # x1
[False], # x0
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check recompute recursion stops at checkpoint.
def test_checkpoint(self):
x0 = nn.Variable((2, 3), need_grad=True)
x1 = F.sin(x0).apply(recompute=True)
x2 = F.sin(x1).apply(recompute=True)
x3 = F.sin(x2) # Checkpoint 1 (recompute == False)
x4 = F.sin(x3).apply(recompute=True)
x5 = F.sin(x4).apply(recompute=True)
x6 = F.sin(x5) # Checkpoint 2 (recompute == False)
x7 = F.sin(x6).apply(recompute=True)
x8 = F.sin(x7).apply(recompute=True)
# All intermediate data except checkpoints will be cleared during forward propagation.
x8.forward(clear_no_need_grad=True)
# Trace clear_called flags of `x2` and `x5` during backward propagation.
# clear_called flag changes True to False when the data is recomputed.
act_flags = []
def get_clear_called_flags(nnabla_func):
act_flags.append([x2.data.clear_called, x5.data.clear_called])
x8.backward(function_post_hook=get_clear_called_flags)
ref_flags = [
# [x2, x5] clear_called flags
[True, True], # After F.sin(x7) backward
[True, True], # After F.sin(x6) backward
[True, False], # After F.sin(x5) backward
[True, False], # After F.sin(x4) backward
[True, False], # After F.sin(x3) backward
[False, False], # After F.sin(x2) backward
[False, False], # After F.sin(x1) backward
[False, False], # After F.sin(x0) backward
]
assert(ref_flags == act_flags)
# Test unnecessary recomputation with single recomputation recursion.
def test_unnecessary_traverse_0(self):
# No need grad path
a0 = nn.Variable((2, 3), need_grad=False)
a1 = F.sin(a0).apply(recompute=True)
# Need grad path
b0 = nn.Variable((2, 3), need_grad=True)
b1 = F.sin(b0).apply(recompute=True)
# branch
c = F.add2(a1, b1)
# Check whether unnecessary recomputation for `a1.data` is performed.
c.forward(clear_no_need_grad=True)
assert(a1.data.clear_called == True)
assert(b1.data.clear_called == True)
# Exec backward without clearing buffer to check whether recomputation is performed by seeing `clear_called` flag.
c.backward(clear_buffer=False)
# a1.data is still cleared. (Recalculation is not performed)
assert(a1.data.clear_called == True)
# b1.data is set. (Recalculation is performed)
assert(b1.data.clear_called == False)
# Test recomputation recursion depth.
def test_unnecessary_traverse_1(self):
a0 = nn.Variable((2, 3), need_grad=False)
# `a1` will not be recomputed since `a2` will not be cleared.
a1 = F.sin(a0).apply(recompute=True)
a2 = F.cos(a1)
a3 = F.sin(a2).apply(recompute=True) # 'a3` will be recomputed.
b0 = nn.Variable((2, 3), need_grad=True).apply(recompute=True)
b1 = F.identity(b0).apply(recompute=True)
c = F.mul2(a3, b1).apply(recompute=True)
# Check recomputation recursion stops when `a3.data` is calculated.
c.forward(clear_buffer=False)
# `a1.data` is cleared because `recompute` flag is `true`.
assert(a1.data.clear_called == True)
# `a2.data` is not cleared because `recompute` flag is `false`.
assert(a2.data.clear_called == False)
c.backward(clear_buffer=False)
# If the recursive call reached to `a1`, `a1.data` should be set by recomputation.
# However, the recursive call stops at `a2` whose data is not cleared.
assert(a1.data.clear_called == True)
# Test unnecessary recomputation for whole graph.
def test_unnecessary_traverse_2(self):
def fail_with_not_cleared_data(nnabla_func):
inputs = nnabla_func.inputs
for input in inputs:
if input.parent is None:
continue
if not input.data.clear_called:
# Not cleared (recomputed) data is found.
pytest.fail()
# Prepare graph does not need any recomputation.
x1 = nn.Variable((2, 3), need_grad=True)
x1 = F.identity(x1).apply(recompute=True)
x2 = nn.Variable((2, 3), need_grad=True)
x2 = F.identity(x2).apply(recompute=True)
y = F.add2(x1, x2).apply(recompute=True)
y = F.identity(y).apply(recompute=True)
# Check unnecessary recomputation.
y.forward(clear_no_need_grad=True)
y.backward(function_pre_hook=fail_with_not_cleared_data)
@pytest.mark.parametrize("recompute_flag", [False, True])
def test_with_statement_variable_creation(self, recompute_flag):
"""
Test for setting recompute flags with Python `with` statement.
"""
# Create a new Variable
x1 = nn.Variable((2, 3))
assert x1.recompute == False
with nn.recompute(recompute_flag):
# Create Variable by `__cinit__()`
y1 = nn.Variable((2, 3))
assert y1.recompute == recompute_flag
# Create Variable by `create_from_cvariable()`
y2 = x1.reshape((3, 2), unlink=True)
assert y2.recompute == recompute_flag
# Create Variable by `create_from_cg_variable()`
y3 = F.relu(x1)
assert y3.recompute == recompute_flag
# Create Variable by `from_numpy_array()`
data = np.array((2, 3))
y4 = nn.Variable.from_numpy_array(data)
assert y4.recompute == recompute_flag
# Create Variable by `get_unlinked_variable()`
y5 = x1.get_unlinked_variable()
assert y5.recompute == recompute_flag
# Recompute flag for referenced Variable must not be overwritten.
# More detail tests are performed by `test_nested_with_statement`
y6 = x1
assert y6.recompute == False
# Direct function connection
y7 = F.relu(F.relu(x1))
# Create a new Variable after with statement
x2 = nn.Variable((2, 3))
assert x2.recompute == False
# Check recompute flag of forcibly got Pyhon Variable.
assert y7.parent.inputs[0].recompute == recompute_flag
# Check default recompute flag for nn.recompute()
with nn.recompute():
x = nn.Variable((2, 3))
assert x.recompute == True
# Recompute flag for first nest
@pytest.mark.parametrize("f1", [False, True])
# Recompute flag for second nest
@pytest.mark.parametrize("f2", [False, True])
# Recompute flag for third nest
@pytest.mark.parametrize("f3", [False, True])
def test_nested_with_statement(self, f1, f2, f3):
"""
Test for nested Pyhon `with` statement of recomputation.
"""
x0 = nn.Variable((2, 3))
assert x0.recompute == False
# Nest 1
with nn.recompute(f1):
x1 = nn.Variable((2, 3))
x0_1 = x0
assert x1.recompute == f1
assert x0_1.recompute == False
# Nest 2
with nn.recompute(f2):
x2 = nn.Variable((2, 3))
x0_2 = x0
x1_2 = x1
assert x2.recompute == f2
assert x0_2.recompute == False
assert x1_2.recompute == f1
# Nest 3
with nn.recompute(f3):
x3 = nn.Variable((2, 3))
x0_3 = x0
x1_3 = x1
x2_3 = x2
assert x3.recompute == f3
assert x0_3.recompute == False
assert x1_3.recompute == f1
assert x2_3.recompute == f2
x2 = nn.Variable((2, 3))
x0_2 = x0
x1_2 = x1
assert x2.recompute == f2
assert x0_2.recompute == False
assert x1_2.recompute == f1
x1 = nn.Variable((2, 3))
x0_1 = x0
assert x1.recompute == f1
assert x0_1.recompute == False
x0 = nn.Variable((2, 3))
assert x0.recompute == False
# Recompute flag for first `with` block
@pytest.mark.parametrize("f1", [False, True])
# Recompute flag for second `with` block
@pytest.mark.parametrize("f2", [False, True])
def test_sequential_with_statement(self, f1, f2):
"""
Test for sequential use of with statement.
"""
x = nn.Variable((2, 3))
assert x.recompute == False
# First `with` block
with nn.recompute(f1):
y = F.relu(x)
assert y.recompute == f1
y = F.sin(y)
assert y.recompute == f1
assert y.recompute == f1
y = F.relu(y)
assert y.recompute == False
# Second `with` block
with nn.recompute(f2):
y = F.relu(x)
assert y.recompute == f2
y = F.sin(y)
assert y.recompute == f2
assert y.recompute == f2
y = F.relu(y)
assert y.recompute == False
@pytest.mark.parametrize("recompute_flag", [False, True])
def test_recompute_fn_decorator(self, recompute_flag):
"""
Test for setting recompute flags with function decorator `nn.recompute_fn()`.
"""
# Specifying recompute flag
@nn.recompute_fn(recompute_flag)
def func2(x):
assert x.recompute == False
y = F.relu(x)
assert y.recompute == recompute_flag
return y
# Check recompute flags
x2 = nn.Variable((2, 3))
assert x2.recompute == False
y2 = func2(x2)
assert y2.recompute == recompute_flag
def test_recompute_fn_decorator_default_use(self):
"""
Test for setting recompute flags with function decorator `nn.recompute_fn()` without specifying recompute flag.
"""
# Default usage
@nn.recompute_fn()
def func1(x):
assert x.recompute == False
y = F.relu(x)
assert y.recompute == True
return y
# Check recompute flags
x1 = nn.Variable((2, 3))
assert x1.recompute == False
y1 = func1(x1)
assert y1.recompute == True
@pytest.mark.parametrize("recompute_flag", [False, True])
def test_recompute_fn_decorator_multiple_inputs_outputs(self, recompute_flag):
"""
Test for the use of `nn.recompute_fn()` with a function which have multiple inputs, outpus, args and kwargs.
"""
# Define sample function with multiple inputs and outputs
@nn.recompute_fn(recompute_flag)
def func(x1, x2, val, axis, reverse=False, alpha=0.2):
# Check args and kwargs passed correctly
assert val == 3.14
assert axis == 0
assert reverse == True
assert alpha == 0.3
y1 = F.cumsum(x1, axis, reverse=reverse)
y2 = x2 * val
y3 = y1 + y2
y3 = F.leaky_relu(y3, alpha=alpha)
# Check recompute flags for variables defined inside this function
assert y1.recompute == recompute_flag
assert y2.recompute == recompute_flag
assert y3.recompute == recompute_flag
return y2, y3
x1 = nn.Variable((2, 3))
x2 = nn.Variable((2, 3))
y1, y2 = func(x1, x2, 3.14, 0, alpha=0.3, reverse=True)
assert y1.recompute == recompute_flag
assert y2.recompute == recompute_flag
# Recompute flag for outer function
@pytest.mark.parametrize("f0", [False, True])
# Recompute flag for first inner function
@pytest.mark.parametrize("f1", [False, True])
# Recompute flag for second inner function
@pytest.mark.parametrize("f2", [False, True])
def test_nested_recompute_fn_decorator(self, f0, f1, f2):
"""
Test for setting recompute flags with nested function decorator `nn.recompute_fn()`.
"""
# First sub function
@nn.recompute_fn(f1)
def func1(x):
assert x.recompute == f0
y = F.relu(x)
assert y.recompute == f1
return y
# Second sub function
@nn.recompute_fn(f2)
def func2(x):
assert x.recompute == f0
y = F.sin(x)
assert y.recompute == f2
return y
# Main function
@nn.recompute_fn(f0)
def func0(x):
assert x.recompute == False
y = F.identity(x)
assert y.recompute == f0
# First inner function call
y = func1(y)
assert y.recompute == f1
y = F.relu(y)
assert y.recompute == f0
# Second inner function call
y = func2(y)
assert y.recompute == f2
y = F.identity(y)
assert y.recompute == f0
return y
# Call main function
x = nn.Variable((2, 3))
y = func0(x)
assert y.recompute == f0
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("func, num_inputs", [
(F.relu, 1),
(F.leaky_relu, 1),
(F.random_erase, 1),
(F.add2, 2),
(F.bc_add2, 2),
(F.sub2, 2),
(F.add_scalar, 1),
(F.mul_scalar, 1),
])
def test_obsolete_inplace_option(inplace, func, num_inputs):
'''
This test confirms the construction of graph.
Since F.log_softmax requires output for backward calculation, graph cannot be constructed if it is inplaced.
'''
x0 = nn.Variable((2, 3, 4, 5), need_grad=True)
x1 = nn.Variable((2, 3, 4, 5), need_grad=True)
if num_inputs == 1:
y = F.identity(x0)
y = F.log_softmax(y)
y = func(y, inplace=inplace)
y.forward()
y.backward()
elif num_inputs == 2:
y0 = F.identity(x0)
y1 = F.identity(x1)
y0 = F.log_softmax(y0)
y1 = F.log_softmax(y1)
y = func(y0, y1, inplace=inplace)
y.forward()
y.backward()
| sony/nnabla | python/test/test_graph.py | Python | apache-2.0 | 49,780 | [
"VisIt"
] | f3fae2489f1bc99897e9d03815b1026a88aa48019dd7257d3cf4d3c906e61944 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# sandbox_list_users - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""List sandbox users and print total number of registered users"""
import os
import sys
from shared.conf import get_configuration_object
from shared.serial import load
configuration = get_configuration_object()
sandboxdb_file = configuration.sandbox_home + os.sep\
+ 'sandbox_users.pkl'
userdb = None
if not os.path.isfile(sandboxdb_file):
print '%s is not an existing file!' % sandboxdb_file
sys.exit(1)
try:
userdb = load(sandboxdb_file)
except Exception, exc:
print 'Exception reading %s, (%s)' % (sandboxdb_file, exc)
user_count = 0
for (key, value) in userdb.items():
print key, ':', value
user_count += 1
print 'Total number of registered users: %d' % user_count
| heromod/migrid | mig/server/sandbox_list_users.py | Python | gpl-2.0 | 1,664 | [
"Brian"
] | 9329b8df9066b8ec9619931e65d81446e90d8c8e29bf39bc39b44f4513af6b3b |
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
#
# Note : After editing this file and commiting changes, please run
# generate_funcs.py and commit the changes as a separate commit with a comment
# such as : GEN: special: run generate_ufuncs.py
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "_sf_error_test_function",
"""
Private function; do not use.
""")
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
http://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
Returns
-------
omega : ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/.org/amos/
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
See Also
--------
comb : The number of combinations of N things taken k at a time.
""")
add_newdoc("scipy.special", "btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
r"""
btdtr(a, b, x)
Cumulative density function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative density function of the beta distribution with parameters
`a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of `x`
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtriv(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \\approx P(1-m) - (1-m) \\log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \\sqrt(1-m)
is used.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipk(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, ``1 - erf(x)``.
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, ``-i erf(i z)``.
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
Returns
-------
P : ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
Returns
-------
G : ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
""")
add_newdoc("scipy.special", "eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
Returns
-------
C : ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
T : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
U : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
S : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
C : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
T : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evalaute Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
U : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
Returns
-------
P : ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
Returns
-------
P : ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. The Laguerre polynomials are the special case where
:math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
Returns
-------
L : ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
Returns
-------
L : ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
H : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
""")
add_newdoc("scipy.special", "eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
He : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative density function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function.
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
r"""
gammainc(a, x)
Regularized lower incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where
`gammaincc` is the regularized upper incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammaincc",
r"""
gammaincc(a, x)
Regularized upper incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where `gammainc`
is the regularized lower incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "_gammaln",
"""
Internal function, use ``gammaln`` instead.
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
loggamma
""")
add_newdoc("scipy.special", "gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative density function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtri
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a seach for a value
that produces the desired value of `p`. The search relies on the
monotinicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp0f1",
r"""
hyp0f1(v, x)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`.
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
""")
add_newdoc("scipy.special", "iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptitic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptitic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptitic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptitic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
It should not to be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv : Bessel function of real order and complex argument.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
It should not to be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
""")
add_newdoc("scipy.special", "jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(n\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-n\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. http://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("scipy.special", "_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc("scipy.special", "_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
r"""
lpmv(m, v, x)
Associated Legendre function of integer order and real degree.
Defined as
.. math::
P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
where
.. math::
P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
\left(\frac{1 - x}{2}\right)^k
is the Legendre function of the first kind. Here :math:`(\cdot)_k`
is the Pochhammer symbol; see `poch`.
Parameters
----------
m : array_like
Order (int or float). If passed a float not equal to an
integer the function returns NaN.
v : array_like
Degree (float).
x : array_like
Argument (float). Must have ``|x| <= 1``.
Returns
-------
pmv : ndarray
Value of the associated Legendre function.
See Also
--------
lpmn : Compute the associated Legendre function for all orders
``0, ..., m`` and degrees ``0, ..., n``.
clpmn : Compute the associated Legendre function at complex
arguments.
Notes
-----
Note that this implementation includes the Condon-Shortley phase.
References
----------
.. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
and Sons, Inc, 1996.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a seach for a value that produces the desired
value of `y`. The search relies on the monotinicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a, x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a, x) in w and the
derivative, W'(a, x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
Returns
-------
si : ndarray
Hyperbolic sine integral at ``x``
ci : ndarray
Hyperbolic cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *shi* and *chi* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
Returns
-------
si : ndarray
Sine integral at ``x``
ci : ndarray
Cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *si* and *ci* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on `n` samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to `smirnov`
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
r"""
spence(z, out=None)
Spence's function, also known as the dilogarithm.
It is defined to be
.. math::
\int_0^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Parameters
----------
z : array_like
Points at which to evaluate Spence's function
Returns
-------
s : ndarray
Computed values of Spence's function
Notes
-----
There is a different convention which defines Spence's function by
the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.wofz(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$wofz(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the Gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining ``loggamma`` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas ``loggamma`` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make ``loggama`` useful for working in complex logspace. However,
``loggamma`` necessarily returns complex outputs for real inputs,
so if you want to work only with real numbers use `gammaln`. On
the real line the two functions are related by ``exp(loggamma(x))
= gammasgn(x)*exp(gammaln(x))``, though in practice rounding
errors will introduce small spurious imaginary components in
``exp(loggamma(x))``.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the Gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("scipy.special", "_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_cospi",
"""
Internal function, do not use.
""")
| asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/scipy/special/add_newdocs.py | Python | mit | 158,460 | [
"Gaussian"
] | 044702d7778929d8bca2e7570b93ae2fabef0288606375daa5759f5f5eadd7a6 |
# (C) British Crown Copyright 2013 - 2018, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# Historically this was auto-generated from
# SciTools/iris-code-generators:tools/gen_rules.py
import cf_units
import numpy as np
import calendar
from iris.aux_factory import HybridHeightFactory, HybridPressureFactory
from iris.coords import AuxCoord, CellMethod, DimCoord
from iris.fileformats.rules import (ConversionMetadata, Factory, Reference,
ReferenceTarget)
import iris.fileformats.pp
from iris.fileformats._pp_lbproc_pairs import LBPROC_MAP
from iris.fileformats.um_cf_map import (LBFC_TO_CF, STASH_TO_CF,
STASHCODE_IMPLIED_HEIGHTS)
###############################################################################
#
# Convert vectorisation routines.
#
def _dim_or_aux(*args, **kwargs):
try:
result = DimCoord(*args, **kwargs)
except ValueError:
attr = kwargs.get('attributes')
if attr is not None and 'positive' in attr:
del attr['positive']
result = AuxCoord(*args, **kwargs)
return result
def _convert_vertical_coords(lbcode, lbvc, blev, lblev, stash,
bhlev, bhrlev, brsvd1, brsvd2, brlev,
dim=None):
"""
Encode scalar or vector vertical level values from PP headers as CM data
components.
Args:
* lbcode:
Scalar field :class:`iris.fileformats.pp.SplittableInt` value.
* lbvc:
Scalar field value.
* blev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* lblev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* stash:
Scalar field :class:`iris.fileformats.pp.STASH` value.
* bhlev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* bhrlev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* brsvd1:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* brsvd2:
Scalar field value or :class:`numpy.ndarray` vector of field values.
* brlev:
Scalar field value or :class:`numpy.ndarray` vector of field values.
Kwargs:
* dim:
Associated dimension of the vertical coordinate. Defaults to None.
Returns:
A tuple containing a list of coords_and_dims, and a list of factories.
"""
factories = []
coords_and_dims = []
# See Word no. 33 (LBLEV) in section 4 of UM Model Docs (F3).
BASE_RHO_LEVEL_LBLEV = 9999
model_level_number = np.atleast_1d(lblev)
model_level_number[model_level_number == BASE_RHO_LEVEL_LBLEV] = 0
# Ensure to vectorise these arguments as arrays, as they participate
# in the conditions of convert rules.
blev = np.atleast_1d(blev)
brsvd1 = np.atleast_1d(brsvd1)
brlev = np.atleast_1d(brlev)
# Height.
if (lbvc == 1) and \
str(stash) not in STASHCODE_IMPLIED_HEIGHTS and \
np.all(blev != -1):
coord = _dim_or_aux(blev, standard_name='height', units='m',
attributes={'positive': 'up'})
coords_and_dims.append((coord, dim))
if str(stash) in STASHCODE_IMPLIED_HEIGHTS:
height = STASHCODE_IMPLIED_HEIGHTS[str(stash)]
coord = DimCoord(height, standard_name='height', units='m',
attributes={'positive': 'up'})
coords_and_dims.append((coord, None))
# Model level number.
if (len(lbcode) != 5) and \
(lbvc == 2):
coord = _dim_or_aux(model_level_number, standard_name='model_level_number',
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Depth - unbound.
if (len(lbcode) != 5) and \
(lbvc == 2) and \
np.all(brsvd1 == brlev):
coord = _dim_or_aux(blev, standard_name='depth', units='m',
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Depth - bound.
if (len(lbcode) != 5) and \
(lbvc == 2) and \
np.all(brsvd1 != brlev):
coord = _dim_or_aux(blev, standard_name='depth', units='m',
bounds=np.vstack((brsvd1, brlev)).T,
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Depth - unbound and bound (mixed).
if (len(lbcode) != 5) and \
(lbvc == 2) and \
(np.any(brsvd1 == brlev) and np.any(brsvd1 != brlev)):
lower = np.where(brsvd1 == brlev, blev, brsvd1)
upper = np.where(brsvd1 == brlev, blev, brlev)
coord = _dim_or_aux(blev, standard_name='depth', units='m',
bounds=np.vstack((lower, upper)).T,
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Soil level/depth.
if len(lbcode) != 5 and lbvc == 6:
if np.all(brsvd1 == 0) and np.all(brlev == 0):
# UM populates lblev, brsvd1 and brlev metadata INCORRECTLY,
# so continue to treat as a soil level.
coord = _dim_or_aux(model_level_number,
long_name='soil_model_level_number',
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
elif np.any(brsvd1 != brlev):
# UM populates metadata CORRECTLY,
# so treat it as the expected (bounded) soil depth.
coord = _dim_or_aux(blev, standard_name='depth', units='m',
bounds=np.vstack((brsvd1, brlev)).T,
attributes={'positive': 'down'})
coords_and_dims.append((coord, dim))
# Pressure.
if (lbvc == 8) and \
(len(lbcode) != 5 or (len(lbcode) == 5 and
1 not in [lbcode.ix, lbcode.iy])):
coord = _dim_or_aux(blev, long_name='pressure', units='hPa')
coords_and_dims.append((coord, dim))
# Air potential temperature.
if (len(lbcode) != 5) and \
(lbvc == 19):
coord = _dim_or_aux(blev, standard_name='air_potential_temperature', units='K',
attributes={'positive': 'up'})
coords_and_dims.append((coord, dim))
# Hybrid pressure levels.
if lbvc == 9:
model_level_number = _dim_or_aux(model_level_number,
standard_name='model_level_number',
attributes={'positive': 'up'})
level_pressure = _dim_or_aux(bhlev,
long_name='level_pressure',
units='Pa',
bounds=np.vstack((bhrlev, brsvd2)).T)
sigma = AuxCoord(blev,
long_name='sigma',
bounds=np.vstack((brlev, brsvd1)).T)
coords_and_dims.extend([(model_level_number, dim),
(level_pressure, dim),
(sigma, dim)])
factories.append(Factory(HybridPressureFactory,
[{'long_name': 'level_pressure'},
{'long_name': 'sigma'},
Reference('surface_air_pressure')]))
# Hybrid height levels.
if lbvc == 65:
model_level_number = _dim_or_aux(model_level_number,
standard_name='model_level_number',
attributes={'positive': 'up'})
level_height = _dim_or_aux(blev,
long_name='level_height',
units='m',
bounds=np.vstack((brlev, brsvd1)).T,
attributes={'positive': 'up'})
sigma = AuxCoord(bhlev,
long_name='sigma',
bounds=np.vstack((bhrlev, brsvd2)).T)
coords_and_dims.extend([(model_level_number, dim),
(level_height, dim),
(sigma, dim)])
factories.append(Factory(HybridHeightFactory,
[{'long_name': 'level_height'},
{'long_name': 'sigma'},
Reference('orography')]))
return coords_and_dims, factories
def _reshape_vector_args(values_and_dims):
"""
Reshape a group of (array, dimensions-mapping) onto all dimensions.
The resulting arrays are all mapped over the same dimensions; as many as
the maximum dimension number found in the inputs. Those dimensions not
mapped by a given input appear as length-1 dimensions in the output array.
The resulting arrays are thus all mutually compatible in arithmetic -- i.e.
can combine without broadcasting errors (provided that all inputs mapping
to a dimension define the same associated length).
Args:
* values_and_dims (iterable of (array-like, iterable of int)):
Input arrays with associated mapping dimension numbers.
The length of each 'dims' must match the ndims of the 'value'.
Returns:
* reshaped_arrays (iterable of arrays).
The inputs, transposed and reshaped onto common target dimensions.
"""
# Find maximum dimension index, which sets ndim of results.
max_dims = [max(dims) if dims else -1 for _, dims in values_and_dims]
max_dim = max(max_dims) if max_dims else -1
result = []
for value, dims in values_and_dims:
value = np.asarray(value)
if len(dims) != value.ndim:
raise ValueError('Lengths of dimension-mappings must match '
'input array dimensions.')
# Save dim sizes in original order.
original_shape = value.shape
if dims:
# Transpose values to put its dims in the target order.
dims_order = sorted(range(len(dims)),
key=lambda i_dim: dims[i_dim])
value = value.transpose(dims_order)
if max_dim != -1:
# Reshape to add any extra *1 dims.
shape = [1] * (max_dim + 1)
for i_dim, dim in enumerate(dims):
shape[dim] = original_shape[i_dim]
value = value.reshape(shape)
result.append(value)
return result
def _collapse_degenerate_points_and_bounds(points, bounds=None, rtol=1.0e-7):
"""
Collapse points (and optionally bounds) in any dimensions over which all
values are the same.
All dimensions are tested, and if degenerate are reduced to length 1.
Value equivalence is controlled by a tolerance, to avoid problems with
numbers from cftime.date2num, which has limited precision because of
the way it calculates with floats of days.
Args:
* points (:class:`numpy.ndarray`)):
Array of points values.
Kwargs:
* bounds (:class:`numpy.ndarray`)
Array of bounds values. This array should have an additional vertex
dimension (typically of length 2) when compared to the points array
i.e. bounds.shape = points.shape + (nvertex,)
Returns:
A (points, bounds) tuple.
"""
array = points
if bounds is not None:
array = np.vstack((points, bounds.T)).T
for i_dim in range(points.ndim):
if array.shape[i_dim] > 1:
slice_inds = [slice(None)] * points.ndim
slice_inds[i_dim] = slice(0, 1)
slice_0 = array[slice_inds]
if np.allclose(array, slice_0, rtol):
array = slice_0
points = array
if bounds is not None:
points = array[..., 0]
bounds = array[..., 1:]
return points, bounds
def _reduce_points_and_bounds(points, lower_and_upper_bounds=None):
"""
Reduce the dimensionality of arrays of coordinate points (and optionally
bounds).
Dimensions over which all values are the same are reduced to size 1, using
:func:`_collapse_degenerate_points_and_bounds`.
All size-1 dimensions are then removed.
If the bounds arrays are also passed in, then all three arrays must have
the same shape or be capable of being broadcast to match.
Args:
* points (array-like):
Coordinate point values.
Kwargs:
* lower_and_upper_bounds (pair of array-like, or None):
Corresponding bounds values (lower, upper), if any.
Returns:
dims (iterable of ints), points(array), bounds(array)
* 'dims' is the mapping from the result array dimensions to the
original dimensions. However, when 'array' is scalar, 'dims' will
be None (rather than an empty tuple).
* 'points' and 'bounds' are the reduced arrays.
If no bounds were passed, None is returned.
"""
orig_points_dtype = np.asarray(points).dtype
bounds = None
if lower_and_upper_bounds is not None:
lower_bounds, upper_bounds = np.broadcast_arrays(
*lower_and_upper_bounds)
orig_bounds_dtype = lower_bounds.dtype
bounds = np.vstack((lower_bounds, upper_bounds)).T
# Attempt to broadcast points to match bounds to handle scalars.
if bounds is not None and points.shape != bounds.shape[:-1]:
points, _ = np.broadcast_arrays(points, bounds[..., 0])
points, bounds = _collapse_degenerate_points_and_bounds(points, bounds)
used_dims = tuple(i_dim for i_dim in range(points.ndim)
if points.shape[i_dim] > 1)
reshape_inds = tuple([points.shape[dim] for dim in used_dims])
points = points.reshape(reshape_inds)
points = points.astype(orig_points_dtype)
if bounds is not None:
bounds = bounds.reshape(reshape_inds + (2,))
bounds = bounds.astype(orig_bounds_dtype)
if not used_dims:
used_dims = None
return used_dims, points, bounds
def _new_coord_and_dims(is_vector_operation,
name, units,
points, lower_and_upper_bounds=None):
"""
Make a new (coordinate, cube_dims) pair with the given points, name, units
and optional bounds.
In 'vector' style operation, the data arrays must have same number of
dimensions as the target cube, and additional operations are performed :
* dimensions with all points and bounds values the same are removed.
* the result coordinate may be an AuxCoord if a DimCoord cannot be made
(e.g. if values are non-monotonic).
Args:
* is_vector_operation (bool):
If True, perform 'vector' style operation.
* points (array-like):
Coordinate point values.
* name (string):
Standard name of coordinate.
* units (string or cf_unit.Unit):
Units of coordinate.
Kwargs:
* lower_and_upper_bounds (pair of array-like, or None):
Corresponding bounds values (lower, upper), if any.
Returns:
a new (coordinate, dims) pair.
"""
bounds = lower_and_upper_bounds
if is_vector_operation:
dims, points, bounds = _reduce_points_and_bounds(points, bounds)
else:
dims = None
coord = _dim_or_aux(points, bounds=bounds, standard_name=name, units=units)
return (coord, dims)
_HOURS_UNIT = cf_units.Unit('hours')
def _convert_time_coords(lbcode, lbtim, epoch_hours_unit,
t1, t2, lbft,
t1_dims=(), t2_dims=(), lbft_dims=()):
"""
Make time coordinates from the time metadata.
Args:
* lbcode(:class:`iris.fileformats.pp.SplittableInt`):
Scalar field value.
* lbtim (:class:`iris.fileformats.pp.SplittableInt`):
Scalar field value.
* epoch_hours_unit (:class:`cf_units.Unit`):
Epoch time reference unit.
* t1 (array-like or scalar):
Scalar field value or an array of values.
* t2 (array-like or scalar):
Scalar field value or an array of values.
* lbft (array-like or scalar):
Scalar field value or an array of values.
Kwargs:
* t1_dims, t2_dims, lbft_dims (tuples of int):
Cube dimension mappings for the array metadata. Each default to
to (). The length of each dims tuple should equal the dimensionality
of the corresponding array of values.
Returns:
A list of (coordinate, dims) tuples. The coordinates are instance of
:class:`iris.coords.DimCoord` if possible, otherwise they are instance
of :class:`iris.coords.AuxCoord`. When the coordinate is of length one,
the `dims` value is None rather than an empty tuple.
"""
def date2hours(t):
# netcdf4python has changed it's behaviour, at version 1.2, such
# that a date2num calculation returns a python float, not
# numpy.float64. The behaviour of round is to recast this to an
# int, which is not the desired behaviour for PP files.
# So, cast the answer to numpy.float_ to be safe.
epoch_hours = np.float_(epoch_hours_unit.date2num(t))
if t.minute == 0 and t.second == 0:
epoch_hours = round(epoch_hours)
return epoch_hours
def date2year(t_in):
return t_in.year
# Check whether inputs are all scalar, for faster handling of scalar cases.
do_vector = len(t1_dims) + len(t2_dims) + len(lbft_dims) > 0
if do_vector:
# Reform the input values so they have all the same number of
# dimensions, transposing where necessary (based on the dimension
# mappings) so that the dimensions are common across each array.
# Note: this does not _guarantee_ that the arrays are broadcastable,
# but subsequent arithmetic makes this assumption.
t1, t2, lbft = _reshape_vector_args([(t1, t1_dims), (t2, t2_dims),
(lbft, lbft_dims)])
date2hours = np.vectorize(date2hours)
date2year = np.vectorize(date2year)
t1_epoch_hours = date2hours(t1)
t2_epoch_hours = date2hours(t2)
hours_from_t1_to_t2 = t2_epoch_hours - t1_epoch_hours
hours_from_t2_to_t1 = t1_epoch_hours - t2_epoch_hours
coords_and_dims = []
if ((lbtim.ia == 0) and
(lbtim.ib == 0) and
(lbtim.ic in [1, 2, 3, 4]) and
(len(lbcode) != 5 or (len(lbcode) == 5 and
lbcode.ix not in [20, 21, 22, 23] and
lbcode.iy not in [20, 21, 22, 23]))):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'time', epoch_hours_unit, t1_epoch_hours))
if ((lbtim.ia == 0) and
(lbtim.ib == 1) and
(lbtim.ic in [1, 2, 3, 4]) and
(len(lbcode) != 5 or (len(lbcode) == 5
and lbcode.ix not in [20, 21, 22, 23]
and lbcode.iy not in [20, 21, 22, 23]))):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_period', _HOURS_UNIT, hours_from_t2_to_t1))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'time', epoch_hours_unit, t1_epoch_hours))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_reference_time', epoch_hours_unit,
t2_epoch_hours))
if ((lbtim.ib == 2) and
(lbtim.ic in [1, 2, 4]) and
(np.any(date2year(t1) != 0) and np.any(date2year(t2) != 0)) and
# Note: don't add time coordinates when years are zero and
# lbtim.ib == 2. These are handled elsewhere.
((len(lbcode) != 5) or (len(lbcode) == 5 and
lbcode.ix not in [20, 21, 22, 23]
and lbcode.iy not in [20, 21, 22, 23]))):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_period', _HOURS_UNIT,
lbft - 0.5 * hours_from_t1_to_t2,
[lbft - hours_from_t1_to_t2, lbft]))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'time', epoch_hours_unit,
0.5 * (t1_epoch_hours + t2_epoch_hours),
[t1_epoch_hours, t2_epoch_hours]))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_reference_time', epoch_hours_unit,
t2_epoch_hours - lbft))
if ((lbtim.ib == 3) and
(lbtim.ic in [1, 2, 4]) and
((len(lbcode) != 5) or (len(lbcode) == 5 and
lbcode.ix not in [20, 21, 22, 23] and
lbcode.iy not in [20, 21, 22, 23]))):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_period', _HOURS_UNIT,
lbft, [lbft - hours_from_t1_to_t2, lbft]))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'time', epoch_hours_unit,
t2_epoch_hours, [t1_epoch_hours, t2_epoch_hours]))
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_reference_time', epoch_hours_unit,
t2_epoch_hours - lbft))
if \
(len(lbcode) == 5) and \
(lbcode[-1] == 3) and \
(lbtim.ib == 2) and (lbtim.ic == 2):
coords_and_dims.append(_new_coord_and_dims(
do_vector, 'forecast_reference_time', epoch_hours_unit,
t2_epoch_hours - lbft))
return coords_and_dims
###############################################################################
def _model_level_number(lblev):
"""
Return model level number for an LBLEV value.
Args:
* lblev (int):
PP field LBLEV value.
Returns:
Model level number (integer).
"""
# See Word no. 33 (LBLEV) in section 4 of UM Model Docs (F3).
SURFACE_AND_ZEROTH_RHO_LEVEL_LBLEV = 9999
if lblev == SURFACE_AND_ZEROTH_RHO_LEVEL_LBLEV:
model_level_number = 0
else:
model_level_number = lblev
return model_level_number
def _convert_scalar_realization_coords(lbrsvd4):
"""
Encode scalar 'realization' (aka ensemble) numbers as CM data.
Returns a list of coords_and_dims.
"""
# Realization (aka ensemble) (--> scalar coordinates)
coords_and_dims = []
if lbrsvd4 != 0:
coords_and_dims.append(
(DimCoord(lbrsvd4, standard_name='realization'), None))
return coords_and_dims
def _convert_scalar_pseudo_level_coords(lbuser5):
"""
Encode scalar pseudo-level values as CM data.
Returns a list of coords_and_dims.
"""
coords_and_dims = []
if lbuser5 != 0:
coords_and_dims.append(
(DimCoord(lbuser5, long_name='pseudo_level', units='1'), None))
return coords_and_dims
def convert(f):
"""
Converts a PP field into the corresponding items of Cube metadata.
Args:
* f:
A :class:`iris.fileformats.pp.PPField` object.
Returns:
A :class:`iris.fileformats.rules.ConversionMetadata` object.
"""
factories = []
aux_coords_and_dims = []
# "Normal" (non-cross-sectional) Time values (--> scalar coordinates)
time_coords_and_dims = _convert_time_coords(
lbcode=f.lbcode, lbtim=f.lbtim,
epoch_hours_unit=f.time_unit('hours'),
t1=f.t1, t2=f.t2, lbft=f.lbft)
aux_coords_and_dims.extend(time_coords_and_dims)
# "Normal" (non-cross-sectional) Vertical levels
# (--> scalar coordinates and factories)
vertical_coords_and_dims, vertical_factories = \
_convert_vertical_coords(
lbcode=f.lbcode,
lbvc=f.lbvc,
blev=f.blev,
lblev=f.lblev,
stash=f.stash,
bhlev=f.bhlev,
bhrlev=f.bhrlev,
brsvd1=f.brsvd[0],
brsvd2=f.brsvd[1],
brlev=f.brlev)
aux_coords_and_dims.extend(vertical_coords_and_dims)
factories.extend(vertical_factories)
# Realization (aka ensemble) (--> scalar coordinates)
aux_coords_and_dims.extend(_convert_scalar_realization_coords(
lbrsvd4=f.lbrsvd[3]))
# Pseudo-level coordinate (--> scalar coordinates)
aux_coords_and_dims.extend(_convert_scalar_pseudo_level_coords(
lbuser5=f.lbuser[4]))
# All the other rules.
references, standard_name, long_name, units, attributes, cell_methods, \
dim_coords_and_dims, other_aux_coords_and_dims = _all_other_rules(f)
aux_coords_and_dims.extend(other_aux_coords_and_dims)
return ConversionMetadata(factories, references, standard_name, long_name,
units, attributes, cell_methods,
dim_coords_and_dims, aux_coords_and_dims)
def _all_other_rules(f):
"""
This deals with all the other rules that have not been factored into any of
the other convert_scalar_coordinate functions above.
"""
references = []
standard_name = None
long_name = None
units = None
attributes = {}
cell_methods = []
dim_coords_and_dims = []
aux_coords_and_dims = []
# Season coordinates (--> scalar coordinates)
if (f.lbtim.ib == 3 and f.lbtim.ic in [1, 2, 4] and
(len(f.lbcode) != 5 or
(len(f.lbcode) == 5 and
(f.lbcode.ix not in [20, 21, 22, 23] and
f.lbcode.iy not in [20, 21, 22, 23]))) and
f.lbmon == 12 and f.lbdat == 1 and f.lbhr == 0 and f.lbmin == 0 and
f.lbmond == 3 and f.lbdatd == 1 and f.lbhrd == 0 and
f.lbmind == 0):
aux_coords_and_dims.append(
(AuxCoord('djf', long_name='season', units='no_unit'),
None))
if (f.lbtim.ib == 3 and f.lbtim.ic in [1, 2, 4] and
((len(f.lbcode) != 5) or
(len(f.lbcode) == 5 and
f.lbcode.ix not in [20, 21, 22, 23]
and f.lbcode.iy not in [20, 21, 22, 23])) and
f.lbmon == 3 and f.lbdat == 1 and f.lbhr == 0 and f.lbmin == 0 and
f.lbmond == 6 and f.lbdatd == 1 and f.lbhrd == 0 and
f.lbmind == 0):
aux_coords_and_dims.append(
(AuxCoord('mam', long_name='season', units='no_unit'),
None))
if (f.lbtim.ib == 3 and f.lbtim.ic in [1, 2, 4] and
((len(f.lbcode) != 5) or
(len(f.lbcode) == 5 and
f.lbcode.ix not in [20, 21, 22, 23] and
f.lbcode.iy not in [20, 21, 22, 23])) and
f.lbmon == 6 and f.lbdat == 1 and f.lbhr == 0 and f.lbmin == 0 and
f.lbmond == 9 and f.lbdatd == 1 and f.lbhrd == 0 and
f.lbmind == 0):
aux_coords_and_dims.append(
(AuxCoord('jja', long_name='season', units='no_unit'),
None))
if (f.lbtim.ib == 3 and f.lbtim.ic in [1, 2, 4] and
((len(f.lbcode) != 5) or
(len(f.lbcode) == 5 and
f.lbcode.ix not in [20, 21, 22, 23] and
f.lbcode.iy not in [20, 21, 22, 23])) and
f.lbmon == 9 and f.lbdat == 1 and f.lbhr == 0 and f.lbmin == 0 and
f.lbmond == 12 and f.lbdatd == 1 and f.lbhrd == 0 and
f.lbmind == 0):
aux_coords_and_dims.append(
(AuxCoord('son', long_name='season', units='no_unit'),
None))
# Special case where year is zero and months match.
# Month coordinates (--> scalar coordinates)
if (f.lbtim.ib == 2 and f.lbtim.ic in [1, 2, 4] and
((len(f.lbcode) != 5) or
(len(f.lbcode) == 5 and
f.lbcode.ix not in [20, 21, 22, 23] and
f.lbcode.iy not in [20, 21, 22, 23])) and
f.lbyr == 0 and f.lbyrd == 0 and
f.lbmon == f.lbmond):
aux_coords_and_dims.append(
(AuxCoord(f.lbmon, long_name='month_number'),
None))
aux_coords_and_dims.append(
(AuxCoord(calendar.month_abbr[f.lbmon], long_name='month',
units='no_unit'),
None))
aux_coords_and_dims.append(
(DimCoord(points=f.lbft, standard_name='forecast_period', units='hours'),
None))
# "Normal" (i.e. not cross-sectional) lats+lons (--> vector coordinates)
if (f.bdx != 0.0 and f.bdx != f.bmdi and len(f.lbcode) != 5 and
f.lbcode[0] == 1):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzx, f.bdx, f.lbnpt,
standard_name=f._x_coord_name(),
units='degrees',
circular=(f.lbhem in [0, 4]),
coord_system=f.coord_system()),
1))
if (f.bdx != 0.0 and f.bdx != f.bmdi and len(f.lbcode) != 5 and
f.lbcode[0] == 2):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzx, f.bdx, f.lbnpt,
standard_name=f._x_coord_name(),
units='degrees',
circular=(f.lbhem in [0, 4]),
coord_system=f.coord_system(),
with_bounds=True),
1))
if (f.bdy != 0.0 and f.bdy != f.bmdi and len(f.lbcode) != 5 and
f.lbcode[0] == 1):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzy, f.bdy, f.lbrow,
standard_name=f._y_coord_name(),
units='degrees',
coord_system=f.coord_system()),
0))
if (f.bdy != 0.0 and f.bdy != f.bmdi and len(f.lbcode) != 5 and
f.lbcode[0] == 2):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzy, f.bdy, f.lbrow,
standard_name=f._y_coord_name(),
units='degrees',
coord_system=f.coord_system(),
with_bounds=True),
0))
if ((f.bdy == 0.0 or f.bdy == f.bmdi) and
(len(f.lbcode) != 5 or
(len(f.lbcode) == 5 and f.lbcode.iy == 10))):
dim_coords_and_dims.append(
(DimCoord(f.y, standard_name=f._y_coord_name(), units='degrees',
bounds=f.y_bounds, coord_system=f.coord_system()),
0))
if ((f.bdx == 0.0 or f.bdx == f.bmdi) and
(len(f.lbcode) != 5 or
(len(f.lbcode) == 5 and f.lbcode.ix == 11))):
dim_coords_and_dims.append(
(DimCoord(f.x, standard_name=f._x_coord_name(), units='degrees',
bounds=f.x_bounds, circular=(f.lbhem in [0, 4]),
coord_system=f.coord_system()),
1))
# Cross-sectional vertical level types (--> vector coordinates)
if (len(f.lbcode) == 5 and f.lbcode.iy == 2 and
(f.bdy == 0 or f.bdy == f.bmdi)):
dim_coords_and_dims.append(
(DimCoord(f.y, standard_name='height', units='km',
bounds=f.y_bounds, attributes={'positive': 'up'}),
0))
if (len(f.lbcode) == 5 and f.lbcode[-1] == 1 and f.lbcode.iy == 4):
dim_coords_and_dims.append(
(DimCoord(f.y, standard_name='depth', units='m',
bounds=f.y_bounds, attributes={'positive': 'down'}),
0))
if (len(f.lbcode) == 5 and f.lbcode.ix == 10 and f.bdx != 0 and
f.bdx != f.bmdi):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzx, f.bdx, f.lbnpt,
standard_name=f._y_coord_name(),
units='degrees',
coord_system=f.coord_system()),
1))
if (len(f.lbcode) == 5 and
f.lbcode.iy == 1 and
(f.bdy == 0 or f.bdy == f.bmdi)):
dim_coords_and_dims.append(
(DimCoord(f.y, long_name='pressure', units='hPa',
bounds=f.y_bounds),
0))
if (len(f.lbcode) == 5 and f.lbcode.ix == 1 and
(f.bdx == 0 or f.bdx == f.bmdi)):
dim_coords_and_dims.append((DimCoord(f.x, long_name='pressure',
units='hPa', bounds=f.x_bounds),
1))
# Cross-sectional time values (--> vector coordinates)
if (len(f.lbcode) == 5 and f.lbcode[-1] == 1 and f.lbcode.iy == 23):
dim_coords_and_dims.append(
(DimCoord(
f.y,
standard_name='time',
units=cf_units.Unit('days since 0000-01-01 00:00:00',
calendar=cf_units.CALENDAR_360_DAY),
bounds=f.y_bounds),
0))
if (len(f.lbcode) == 5 and f.lbcode[-1] == 1 and f.lbcode.ix == 23):
dim_coords_and_dims.append(
(DimCoord(
f.x,
standard_name='time',
units=cf_units.Unit('days since 0000-01-01 00:00:00',
calendar=cf_units.CALENDAR_360_DAY),
bounds=f.x_bounds),
1))
if (len(f.lbcode) == 5 and f.lbcode[-1] == 3 and f.lbcode.iy == 23 and
f.lbtim.ib == 2 and f.lbtim.ic == 2):
epoch_days_unit = cf_units.Unit('days since 0000-01-01 00:00:00',
calendar=cf_units.CALENDAR_360_DAY)
t1_epoch_days = epoch_days_unit.date2num(f.t1)
t2_epoch_days = epoch_days_unit.date2num(f.t2)
# The end time is exclusive, not inclusive.
dim_coords_and_dims.append(
(DimCoord(
np.linspace(t1_epoch_days, t2_epoch_days, f.lbrow,
endpoint=False),
standard_name='time',
units=epoch_days_unit,
bounds=f.y_bounds),
0))
# Site number (--> scalar coordinate)
if (len(f.lbcode) == 5 and f.lbcode[-1] == 1 and f.lbcode.ix == 13 and
f.bdx != 0):
dim_coords_and_dims.append(
(DimCoord.from_regular(f.bzx, f.bdx, f.lbnpt,
long_name='site_number', units='1'),
1))
# Site number cross-sections (???)
if (len(f.lbcode) == 5 and
13 in [f.lbcode.ix, f.lbcode.iy] and
11 not in [f.lbcode.ix, f.lbcode.iy] and
hasattr(f, 'lower_x_domain') and
hasattr(f, 'upper_x_domain') and
all(f.lower_x_domain != -1.e+30) and
all(f.upper_x_domain != -1.e+30)):
aux_coords_and_dims.append(
(AuxCoord((f.lower_x_domain + f.upper_x_domain) / 2.0,
standard_name=f._x_coord_name(), units='degrees',
bounds=np.array([f.lower_x_domain, f.upper_x_domain]).T,
coord_system=f.coord_system()),
1 if f.lbcode.ix == 13 else 0))
if (len(f.lbcode) == 5 and
13 in [f.lbcode.ix, f.lbcode.iy] and
10 not in [f.lbcode.ix, f.lbcode.iy] and
hasattr(f, 'lower_y_domain') and
hasattr(f, 'upper_y_domain') and
all(f.lower_y_domain != -1.e+30) and
all(f.upper_y_domain != -1.e+30)):
aux_coords_and_dims.append(
(AuxCoord((f.lower_y_domain + f.upper_y_domain) / 2.0,
standard_name=f._y_coord_name(), units='degrees',
bounds=np.array([f.lower_y_domain, f.upper_y_domain]).T,
coord_system=f.coord_system()),
1 if f.lbcode.ix == 13 else 0))
# LBPROC codings (--> cell method + attributes)
unhandled_lbproc = True
zone_method = None
time_method = None
if f.lbproc == 0:
unhandled_lbproc = False
elif f.lbproc == 64:
zone_method = 'mean'
elif f.lbproc == 128:
time_method = 'mean'
elif f.lbproc == 4096:
time_method = 'minimum'
elif f.lbproc == 8192:
time_method = 'maximum'
elif f.lbproc == 192:
time_method = 'mean'
zone_method = 'mean'
if time_method is not None:
if f.lbtim.ia != 0:
intervals = '{} hour'.format(f.lbtim.ia)
else:
intervals = None
if f.lbtim.ib == 2:
# Aggregation over a period of time.
cell_methods.append(CellMethod(time_method,
coords='time',
intervals=intervals))
unhandled_lbproc = False
elif f.lbtim.ib == 3 and f.lbproc == 128:
# Aggregation over a period of time within a year, over a number
# of years.
# Only mean (lbproc of 128) is handled as the min/max
# interpretation is ambiguous e.g. decadal mean of daily max,
# decadal max of daily mean, decadal mean of max daily mean etc.
cell_methods.append(
CellMethod('{} within years'.format(time_method),
coords='time', intervals=intervals))
cell_methods.append(
CellMethod('{} over years'.format(time_method),
coords='time'))
unhandled_lbproc = False
else:
# Generic cell method to indicate a time aggregation.
cell_methods.append(CellMethod(time_method,
coords='time'))
unhandled_lbproc = False
if zone_method is not None:
if f.lbcode == 1:
cell_methods.append(CellMethod(zone_method, coords='longitude'))
for coord, _dim in dim_coords_and_dims:
if coord.standard_name == 'longitude':
if len(coord.points) == 1:
coord.bounds = np.array([0. , 360.], dtype=np.float32)
else:
coord.guess_bounds()
unhandled_lbproc = False
elif f.lbcode == 101:
cell_methods.append(CellMethod(zone_method,
coords='grid_longitude'))
for coord, _dim in dim_coords_and_dims:
if coord.standard_name == 'grid_longitude':
if len(coord.points) == 1:
coord.bounds = np.array([0. , 360.], dtype=np.float32)
else:
coord.guess_bounds()
unhandled_lbproc = False
else:
unhandled_lbproc = True
if unhandled_lbproc:
attributes["ukmo__process_flags"] = tuple(sorted(
[name
for value, name in six.iteritems(LBPROC_MAP)
if isinstance(value, int) and f.lbproc & value]))
if (f.lbsrce % 10000) == 1111:
attributes['source'] = 'Data from Met Office Unified Model'
# Also define MO-netCDF compliant UM version.
um_major = (f.lbsrce // 10000) // 100
if um_major != 0:
um_minor = (f.lbsrce // 10000) % 100
attributes['um_version'] = '{:d}.{:d}'.format(um_major, um_minor)
if (f.lbuser[6] != 0 or
(f.lbuser[3] // 1000) != 0 or
(f.lbuser[3] % 1000) != 0):
attributes['STASH'] = f.stash
if str(f.stash) in STASH_TO_CF:
standard_name = STASH_TO_CF[str(f.stash)].standard_name
units = STASH_TO_CF[str(f.stash)].units
long_name = STASH_TO_CF[str(f.stash)].long_name
if (not f.stash.is_valid and f.lbfc in LBFC_TO_CF):
standard_name = LBFC_TO_CF[f.lbfc].standard_name
units = LBFC_TO_CF[f.lbfc].units
long_name = LBFC_TO_CF[f.lbfc].long_name
# Orography reference field (--> reference target)
if f.lbuser[3] == 33:
references.append(ReferenceTarget('orography', None))
# Surface pressure reference field (--> reference target)
if f.lbuser[3] == 409 or f.lbuser[3] == 1:
references.append(ReferenceTarget('surface_air_pressure', None))
return (references, standard_name, long_name, units, attributes,
cell_methods, dim_coords_and_dims, aux_coords_and_dims)
| dkillick/iris | lib/iris/fileformats/pp_load_rules.py | Python | lgpl-3.0 | 41,025 | [
"NetCDF"
] | 72619d7844426adc44f4ef3868a9dd0f513a4a6e3be230d106208e01d2283bf8 |
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import ORCA.Globals as Globals
from ORCA.utils.Path import cPath
from ORCA.vars.Helpers import GetEnvVar
from ORCA.utils.Platform import OS_GetSystemUserPath
def GetSystemTmpPath() -> cPath:
""" Gets the path to the tmp folder """
oPath:cPath
oPath = cPath(GetEnvVar(u"TMP"))
if oPath.Exists():
return oPath
oPath = OS_GetSystemUserPath() +"\\AppData\\Local\\Temp"
if oPath.Exists():
return oPath
oPath = Globals.oPathUserDownload+"orcatmp"
oPath.Create()
return oPath
| thica/ORCA-Remote | src/ORCA/utils/Platform/win/win_GetSystemTmpPath.py | Python | gpl-3.0 | 1,416 | [
"ORCA"
] | d5185712f6428daeedfb517cd89b28edfb161e58f6f099f947b518addabd3716 |
import math
from itertools import izip_longest
class NeuronConnection(object):
def __init__(self, parent, child, weight=0.4):
self.parent = parent
self.child = child
self.weight = weight
class Neuron(object):
def __init__(self, input=None, output=None, is_bias=False):
self.input = input
self.output = output
self.incoming = []
self.outgoing = []
self.is_bias = is_bias
self.delta = None
self.learning_rate = 0.3
def connect_child(self, child_neuron, weight=0.4):
"""Connect a child neuron to our output, then connect ourselves as input to child neuron"""
connection = NeuronConnection(self, child_neuron, weight=weight)
self.outgoing.append(connection)
child_neuron.incoming.append(connection)
def activate(self, value=None):
if self.is_bias:
return self.output
self.input = value or self.sum_inputs
self.output = 1 / (1 + math.exp(-self.input))
return self.output
def train(self, target_output=0):
if not self.is_bias:
if self.is_output:
# this is the derivative of the error function
self.delta = self.output - target_output
else:
self.delta = sum([n.weight * n.child.delta for n in self.outgoing])
for connection in self.outgoing:
gradient = self.output * connection.child.delta
connection.weight -= gradient * self.learning_rate
@property
def is_output(self):
# We have no outgoing neurons, so we're the output layer!
return not self.outgoing
@property
def sum_inputs(self):
# if self.input:
# # If we're an input
# return self.input
# else:
# Otherwise we're a normal neuron
sum = 0
for connection in self.incoming:
if connection.parent.output is None:
connection.parent.activate()
sum += connection.parent.output * connection.weight
return sum
class Layer(object):
def __init__(self, size=10):
self.size = size
self.neurons = [Neuron() for _ in range(size)]
def activate(self, values=None):
values = values or []
for neuron, value in izip_longest(self.neurons, values):
neuron.activate(value)
def train(self, target_outputs=list()):
# [n.train(target_output) for n in self.neurons]
[n.train(o) for n, o in izip_longest(self.neurons, target_outputs)]
def connect_with_layer(self, layer):
# Let's add our bias neuron now -- we know we're not on output layer because
# we're trying to attach to another layer!
self.neurons.append(Neuron(output=1, is_bias=True))
[n1.connect_child(n2) for n2 in layer.neurons for n1 in self.neurons]
class Network(object):
def __init__(self, sizes):
# add all layers
self.layers = [Layer(size=size) for size in sizes]
for index, layer in enumerate(self.layers):
if index + 1 < len(self.layers):
layer.connect_with_layer(self.layers[index + 1])
def activate(self, input_values):
# activate input layer
self.layers[0].activate(input_values)
for layer in self.layers[1:]:
layer.activate()
def train(self, target_outputs):
# train output layer, pass it expected outputs
self.layers[-1].train(target_outputs)
# train the rest in reverse, not passed expected outputs because they don't care
[layer.train() for layer in reversed(self.layers[0:-1])]
def calculate_error(self, target_outputs):
error = 0
for n, output in izip_longest(self.layers[-1].neurons, target_outputs):
error += 0.5 * math.pow(n.output - output, 2)
return error
class Trainer(object):
def __init__(self, network, training_data):
self.network = network
self.training_data = training_data
def train(self, epochs=10001, log_frequency=1000):
for epoch in xrange(epochs):
accumulated_error = 0
for input_data, target_output in self.training_data:
self.network.activate(input_data) # pass current loop input data
self.network.train(target_output) # pass current loop target data
accumulated_error += self.network.calculate_error(target_output)
accumulated_error /= len(self.training_data)
if epoch == 0 or epoch % log_frequency == 0 or epoch == epochs - 1:
print "Epoch ", epoch, "error =", accumulated_error
if __name__ == "__main__":
OR_GATE = (
# (input, output)
([0, 0], [0]),
([1, 0], [1]),
([1, 1], [1]),
([0, 1], [1]),
)
network = Network([2, 1])
trainer = Trainer(network, OR_GATE)
trainer.train()
| dev-coop/neural-net-hacking-examples | python/Part 4/neural_network_with_connections.py | Python | mit | 4,949 | [
"NEURON"
] | 339fa77391b6a83dad0cc7a2ed77fd35ef99c168034d02f6e63d6161150301c2 |
# -*- coding: utf-8 -*-
#
# pyps documentation build configuration file, created by
# sphinx-quickstart on Wed May 07 14:26:13 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../ext'))
sys.path.insert(0, os.path.abspath('../..'))
from pyps import version as proj_version
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
#reST content to prepend to every source file that is processed.
rst_prolog = """
.. |PYPS| replace:: :doc:`pyps <index>`
.. |pyps| replace:: :program:`pyps`
.. |TypeError| replace:: `~python:exceptions.TypeError`
.. |KeyError| replace:: `~python:exceptions.KeyError`
.. |ValueError| replace:: `~python:exceptions.ValueError`
.. |None| replace:: `~python:None`
.. _JSON: http://www.json.org/
.. _json: http://www.json.org/
.. _nose: https://nose.readthedocs.org/
.. _setuptools: https://pythonhosted.org/setuptools/
.. _python: http://python.org/
.. _sphinx: http://sphinx-doc.org/
.. _pypi: https://pypi.python.org/
.. _pip: https://pypi.python.org/pypi/pip
.. _jinja: http://jinja.pocoo.org/
.. _sphinx_rtd_theme: https://github.com/snide/sphinx_rtd_theme
"""
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
#'docit.ext',
#'sphinxcontrib.autoprogram',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyps'
copyright = u'%s, Brian Mearns' % proj_version.COPYRIGHT
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = proj_version.short_string()
# The full version, including alpha/beta/rc tags.
release = '%s (v%s)' % (version, proj_version.setuptools_string())
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_sphinxgen', '_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Autodoc for class will combine the classe's docstr and the __init__ function's.
autoclass_content = "both"
#autodoc_member_order = "groupwise"
autodoc_member_order = "bysource"
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(),]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pypsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyps.tex', u'pyps Documentation',
u'Brian Mearns', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyps', u'pyps Documentation',
[u'Brian Mearns'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyps', u'pyps Documentation',
u'Brian Mearns', 'pyps', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
#Some standard configuration options for intersphinx linking.
intersphinx_mapping = {
'python': ('http://docs.python.org/2', None),
'sphinx': ('http://sphinx-doc.org/', None),
'nose': ('https://nose.readthedocs.org/en/latest/', None),
'pillow': ('https://pillow.readthedocs.org/en/latest/', None),
'setuptools': ('https://pythonhosted.org/setuptools/', None),
'dj': ('https://django.readthedocs.org/en/latest/', None),
}
| mearns/pyps | sphinx/source/conf.py | Python | agpl-3.0 | 10,470 | [
"Brian"
] | 536116985b929a2bf4fa5ed64d55d899248f8c7dc9dfe54eeca5285f1a3e5388 |
from distutils.core import setup, Extension
from distutils.sysconfig import get_python_inc, get_python_lib
from kapteyn import __version__ as version
from glob import glob
import sys, os
try:
import numpy
except:
print '''
-- Error.
The Kapteyn Package requires NumPy, which seems to be unavailable here.
Please check your Python installation.
'''
sys.exit(1)
try:
wcslib_dir = glob('src/wcslib*/C/')[0]
except:
print '''
-- Error.
Unable to find WCSLIB source distribution.
'''
sys.exit(1)
include_dirs = []
numdir = os.path.dirname(numpy.__file__)
ipath = os.path.join(numdir, numpy.get_include())
include_dirs.append(ipath)
include_dirs.append('src')
include_dirs.append(wcslib_dir)
short_descr = "Kapteyn Package: Python modules for astronomical applications"
description = """The Kapteyn Package is a collection of Python modules
and applications developed by the computer group of the Kapteyn
Astronomical Institute, University of Groningen, The Netherlands. The
purpose of the package is to provide tools for the development of
astronomical applications with Python.
The package is suitable for both inexperienced and experienced users and
developers and documentation is provided for both groups. The
documentation also provides in-depth chapters about celestial
transformations, spectral translations and non-linear least squares fitting.
The package's most important features:
* The handling of spatial and spectral coordinates, WCS projections
and transformations between different sky systems. Spectral
translations (e.g., between frequencies and velocities) are supported
and also mixed coordinates. (Modules wcs and celestial, Module wcs
uses Mark Calabretta's WCSLIB which is distributed with the package.)
* Versatile tools for writing small and dedicated applications for
the inspection of FITS headers, the extraction and display of (FITS)
data, interactive inspection of this data (color editing) and for the
creation of plots with world coordinate information. (Module maputils)
As one example, a gallery of all-sky plots is provided.
* A class for the efficient reading, writing and manipulating simple
table-like structures in text files. (Module tabarray)
* Utilities for use with matplotlib such as obtaining coordinate
information from plots, interactively modifiable colormaps and timer
events (module mplutil); tools for parsing and interpreting coordinate
information entered by the user (module positions).
* A function to search for gaussian components in a profile (module
profiles) and a class for non-linear least squares curve fitting
(module kmpfit)"""
classifiers = [
['Development Status :: 5 - Production/Stable',
'Development Status :: 4 - Beta'][int('b' in version)],
'Programming Language :: Python',
'Programming Language :: Cython',
'Programming Language :: C',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Mathematics',
'License :: OSI Approved :: BSD License',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows'
]
download_url = "http://www.astro.rug.nl/software/kapteyn/kapteyn-%s.tar.gz" % version
wcsmod_src = [
"eterms.c",
"wcs.c",
"xyz.c"
]
wcslib_src = [
"cel.c",
"lin.c",
"log.c",
"prj.c",
"spc.c",
"sph.c",
"spx.c",
"tab.c",
"wcs.c",
"wcsfix.c",
"wcshdr.c",
"wcsprintf.c",
"wcstrig.c",
"wcsunits.c",
"wcsutil.c",
"wcserr.c",
"flexed/wcsulex.c",
"flexed/wcsutrn.c"
]
ndimg_src = [
"nd_image.c",
"ni_filters.c",
"ni_fourier.c",
"ni_interpolation.c",
"ni_measure.c",
"ni_morphology.c",
"ni_support.c",
]
wcs_src = ( ['src/' + source for source in wcsmod_src]
+ [wcslib_dir + source for source in wcslib_src] )
_nd_image_src = ['src/ndimg/' + source for source in ndimg_src]
define_macros = []
# MS Windows adjustments
#
if sys.platform == 'win32':
define_macros.append(('YY_NO_UNISTD_H', None))
define_macros.append(('_CRT_SECURE_NO_WARNINGS', None))
# avoid using buggy Apple compiler
#
if sys.platform=='darwin':
from distutils import ccompiler
import subprocess
import re
c = ccompiler.new_compiler()
process = subprocess.Popen(c.compiler+['--version'], stdout=subprocess.PIPE)
output = process.communicate()[0].strip()
version = output.split()[0]
if re.match('i686-apple-darwin[0-9]*-llvm-gcc-4.2', version):
os.environ['CC'] = 'clang'
setup(
name="kapteyn",
version=version,
description=short_descr,
author='J.P. Terlouw, M.G.R. Vogelaar',
author_email='gipsy@astro.rug.nl',
url='http://www.astro.rug.nl/software/kapteyn/',
download_url = download_url,
long_description=description,
platforms = ['Linux', 'Mac OSX', 'Windows'],
license = 'BSD',
classifiers = classifiers,
ext_package='kapteyn',
ext_modules=[
Extension(
"wcs", wcs_src,
include_dirs=include_dirs,
define_macros=define_macros
),
Extension(
"ascarray",
["src/ascarray.c"],
include_dirs=include_dirs
),
Extension(
"profiles",
["src/profiles.c", "src/gauestd.c"],
include_dirs=include_dirs
),
Extension(
"_nd_image", _nd_image_src,
include_dirs=include_dirs
),
Extension(
"kmpfit",
["src/kmpfit.c", "src/mpfit.c"],
include_dirs=include_dirs
)
],
package_dir={'kapteyn': 'kapteyn'},
packages=['kapteyn'],
package_data={'kapteyn': ['lut/*.lut']},
)
| cdeil/kapteyn-mirror | setup.py | Python | bsd-3-clause | 5,946 | [
"Gaussian"
] | 08beb6ff480c440f7699f7a360933f943ed958ee474f187763fb5ec2c5f2e7c6 |
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2007,2012,2014,2015, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Test single point, unrestricted time-dependent logfiles in cclib"""
import os
import unittest
import numpy
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericTDunTest(unittest.TestCase):
"""Generic time-dependent unrestricted HF/DFT unittest"""
number = 24
def testenergiesnumber(self):
"""Is the length of etenergies correct?"""
self.assertEqual(len(self.data.etenergies), self.number)
def testoscsnumber(self):
"""Is the length of eotscs correct?"""
self.assertEqual(len(self.data.etoscs), self.number)
def testrotatsnumber(self):
"""Is the length of etrotats correct?"""
self.assertEqual(len(self.data.etrotats), self.number)
def testsecsnumber(self):
"""Is the length of etsecs correct?"""
self.assertEqual(len(self.data.etsecs), self.number)
def testsymsnumber(self):
"""Is the length of etsyms correct?"""
self.assertEqual(len(self.data.etsyms), self.number)
def testsyms(self):
"""Is etsyms populated by singlets and triplets 50/50?"""
singlets = [sym for sym in self.data.etsyms if "Singlet" in sym]
triplets = [sym for sym in self.data.etsyms if "Triplet" in sym]
self.assertEqual(len(singlets), self.number/2)
self.assertEqual(len(triplets), self.number/2)
if __name__=="__main__":
import sys
sys.path.append(os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['TDun'])
suite.testall()
| ghutchis/cclib | test/data/testTDun.py | Python | lgpl-2.1 | 2,030 | [
"cclib"
] | 6238d9b393cbb779c2c6c1220798ab3eb942fdd3151e6913a6093e16fb365afc |
from repESP import resp_helpers, charges, graphs
from repESP.field_comparison import rms_and_rep, difference
from repESP.esp_fit_calc import FitCalc, IOpCalcSet
import shutil
import os
import copy
import pickle
import numpy as np
import matplotlib.pyplot as plt
# The purpose of the script is investigating the change in generated fitting
# points upon varying IOp 41-43 settings. The first part of this script creates
# input files and pickles filenames. The user should then run all .com files
# with Gaussian and run the second part of this script, which produces graphs.
def calc_min_max(alist):
return min(alist), max(alist)
def smart_range(min_max, low=0.9, high=1.1):
# Something like this:
# color_span = [0.8*min_max[0], 1.2*min_max[1]]
# but allowing for different signs
color_span = []
for elem, sign in zip(min_max, [-1, 1]):
if np.sign(elem) == sign:
color_span.append(high*elem)
else:
color_span.append(low*elem)
return color_span
def plot_range(alist, margin=0.05):
min_max = calc_min_max(alist)
diff = abs(min_max[1] - min_max[0])
return min_max[0] - margin*diff, min_max[1] + margin*diff
def calc_plot(calcs, to_plot, title, set_lim=False, save_to=None):
plt.scatter(list(range(len(calcs))), to_plot)
plt.xticks(list(range(len(calcs))), [elem[-6:] for elem in calcs],
rotation='vertical')
plt.title(title)
axes = plt.gca()
if set_lim:
axes.set_ylim(plot_range(to_plot))
graphs._save_or_display(save_to)
def check_color_span(values, color_span, default=None):
min_max = calc_min_max(values)
if color_span == []:
if default is None:
color_span = smart_range(min_max)
else:
color_span = default
if min_max[0] < color_span[0] or min_max[1] > color_span[1]:
# The extent of this is unlikely to be large, since both MK and
# CHelpG use a fixed exclusion radius (or do they? That's up to
# Gaussian's implementation and is to be investigated).
print("WARNING: Values on graph (min_max = {0:.4f}, {1:.4f}) will "
"be outside of color scale ({2:.4f}, {3:.4f})".format(
*min_max, *color_span))
return color_span
charge_type = 'mk'
path = '../data/methane/fit_points-1/'
# PART 1
if True:
os.mkdir(path)
shutil.copy('../data/methane/input/methane.chk', path)
# A simple investigation --- varying only IOp42
calc_set = IOpCalcSet(iop42=list(range(1, 6)))
calcs = []
print(calc_set.create_param_list())
for iop41, iop42, iop43 in zip(*calc_set.create_param_list()):
calc = FitCalc(path, 'methane', 'MP2/6-31+G(d,p)', charge_type, 0, 1,
iop41, iop42, iop43)
calc.create_input()
calcs.append(calc)
print("Created file: ", calc.filename)
# Pickle filenames for part 2
with open(path + "fit_points.p", 'wb') as f:
pickle.dump([elem.filename for elem in calcs], f)
# PART 2 --- run when the Gaussian calculations have been completed
if False:
with open(path + "fit_points.p", 'rb') as f:
calcs = pickle.load(f)
rms_list = []
charges_dict = {}
color_span = []
error_color_span = []
for calc in calcs:
g = resp_helpers.G09_esp(path + calc + '.esp')
charges.update_with_charges(charge_type, path + calc + '.log',
g.molecule)
with open(path + calc + "-charges.txt", "a") as fc:
for atom in g.molecule:
atom.print_with_charge(charge_type, fc)
if atom.label in charges_dict:
charges_dict[atom.label].append(atom.charges[charge_type])
else:
charges_dict[atom.label] = [atom.charges[charge_type]]
min_rms, min_rrms, rep_esp_field = rms_and_rep(g.field, g.molecule,
charge_type)
rms_list.append(min_rms)
print("\n", min_rms, file=fc)
# Default given as extremal values of methane CHelpG
color_span = check_color_span(g.field.values, color_span,
default=[-0.0045, 0.011])
diff_field = difference(g.field, rep_esp_field)
error_color_span = check_color_span(diff_field.values,
error_color_span,
default=[-0.0012, 0.019])
graphs.plot_points(
g.field, 2, title=calc, molecule=g.molecule,
plane_eqn=graphs.plane_through_atoms(g.molecule, 1, 2, 3),
dist_thresh=0.5, axes_limits=[(-5, 5)]*2, color_span=color_span,
save_to=path + calc[-6:] + '_V.pdf')
graphs.plot_points(
diff_field, 2, title=calc + " Errors", molecule=g.molecule,
plane_eqn=graphs.plane_through_atoms(g.molecule, 1, 2, 3),
dist_thresh=0.5, axes_limits=[(-5, 5)]*2,
color_span=error_color_span, save_to=path + calc[-6:] + '_E.pdf')
save_to = path + "RMS.pdf"
calc_plot(calcs, rms_list, charge_type.upper() + " RMS value",
set_lim=True, save_to=save_to)
for atom in g.molecule:
save_to = path + atom.atomic_number + str(atom.label) + "_charge.pdf"
title = "Charge on " + atom.atomic_number + str(atom.label)
calc_plot(calcs, charges_dict[atom.label], title, save_to=save_to)
| jszopi/repESP | scripts/old/fit_points_iop.py | Python | gpl-3.0 | 5,502 | [
"Gaussian"
] | f7a988eef3951a07751f82f3d7fa40f1538fbadaec3622d342cd66cdf9fb3506 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""Developer tools for MooseDocs."""
import argparse
import os
import re
import collections
import logging
import MooseDocs
import moosesqa
import moosetree
import mooseutils
import moosesyntax
from .. import common
#from ..common import exceptions
#from ..tree import syntax
from ..extensions import template
LOG = logging.getLogger(__name__)
def command_line_options(subparser, parent):
"""Define the 'syntax' command."""
parser = subparser.add_parser('syntax',
parents=[parent],
help="Tool for dumping application syntax to screen.")
parser.add_argument('--config', type=str, default='sqa_reports.yml',
help="The YAML config file for performing SQA checks.")
def main(opt):
"""./moosedocs syntax"""
# Setup logging
logger = logging.getLogger('MooseDocs')
logger.handlers = list()
logger.addHandler(moosesqa.SilentRecordHandler())
# Get the report objects for the applications
_, _, app_reports = moosesqa.get_sqa_reports(opt.config, app_report=True, doc_report=False, req_report=False)
# Loop through all reports and generate the stub pages
for report in app_reports:
report.getReport() # this is needed to generate the app syntax
print(report.app_syntax)
logger.handlers[0].clear() # don't report errors, that is the job for check command
return 0
| harterj/moose | python/MooseDocs/commands/syntax.py | Python | lgpl-2.1 | 1,734 | [
"MOOSE"
] | 81b237763c9e0f1de41cfcd6253fedae017f9898728d44f7bc2fbffa8c5a2efb |
import subprocess
import os
import json
from scipy.optimize import linprog
import sys
import pickle
import math
import numpy
from collections import namedtuple
from docplex.mp.model import Model
from docplex.util.environment import get_environment
#This program computes bidding strategy with the IBM Decision Optimization Library (CPLEX).
global dirr
dirr='/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])+'/'
class AWS_Instance:
def __init__(self,procs,ram,eph,name,limit,running,running_spot,historical_max,current_spot,current_od):
self.instance_type = name
self.procs = procs
self.ram = ram
self.storage = eph
self.limit = limit
self.running = running
self.running_spot = running_spot
self.historical_max = historical_max
self.current_od = current_od
self.current_spot = current_spot
#params for constraints
def get_user_params():
min_cores = int(raw_input("What is the minimum number of distributed cores required?"))
min_ram = int(raw_input("What is the minimum amount in GB of distributed RAM required?"))
min_free_storage = int(raw_input("What is the minimum amount in GB of free ephemeral storage required?"))
max_cost_hour = float(raw_input("What is the max cost that you are willing to pay per hour for your virtual cluster?"))
ram_per_job = int(raw_input("What amount of RAM is required per job?"))
procs_per_job = int(raw_input("How many Processors are required per job?"))
return min_cores,min_ram,min_free_storage,max_cost_hour,ram_per_job,procs_per_job
def handle_grep_non_zero_output(command):
try:
result = subprocess.check_output(command,shell=True)
return result
except subprocess.CalledProcessError as e:
result = e.output
return result
def define_A_matrix():
current_time = int(subprocess.check_output("date +%s",shell=True))
weeks_back = float(raw_input("How many weeks do you anticipate running your job for?"))
start_time = int(current_time-(weeks_back*604800))
eph = {}
eph_file = open(dirr+"resources/ephemeral_store_info.csv",'r')
for line in eph_file:
q = line.rstrip().split(',')
eph_value = int(q[3])
if eph_value > 0:
eph[q[0]] = eph_value
eph_file.close()
retrievable_account_limits = set()
gl_limits_file = open(dirr+"resources/gamelift_instances.txt",'r')
for line in gl_limits_file:
retrievable_account_limits.add(line.rstrip())
gl_limits_file.close()
aws_instance_file = open(dirr+"resources/instances.csv",'r')
aws_instances = []
os.system("aws ec2 describe-instances > ec2_instances.json")
os.system("aws ec2 describe-spot-instance-requests > ec2_spot_instances.json")
datacenters_fh = open(dirr+"resources/datacenters.txt",'r')
datacenters = []
for lines in datacenters_fh:
datacenters.append(lines.rstrip())
for i in range(0,len(datacenters)):
print(str(i+1)+" "+datacenters[i])
datacenter_idx = int(raw_input("Please enter the integer corresponding to the amazon datacenter in which you are in:"))
datacenter = datacenters[datacenter_idx-1]
os.system("gunzip -c "+dirr+"resources/odprices.gz > odprices")
print("Please visit https://console.aws.amazon.com/ec2/v2/home?region=REGION#Limits: replacing REGION with the region in which you plan to run this scalable cluster in and provide the requested information that is not available in the API but critical for proper bidding when prompted.")
idx = 0
pickleq = raw_input("Would you like to use a pickle file?")
if os.path.isfile(pickleq):
aws_instances = pickle.load( open(pickleq,"rb"))
else:
for line in aws_instance_file:
split_line = line.rstrip().split(',')
instance_name = split_line[0]
instance_ram_float = float(split_line[2])
instance_procs_int = int(split_line[1])
instance_eph_int = eph[instance_name] if eph.has_key(instance_name) else 0
running_ec2 = int(subprocess.check_output("grep \""+instance_name+"\" ec2_instances.json | wc -l",shell=True))
running_spot = int(subprocess.check_output("grep \""+instance_name+"\" ec2_spot_instances.json | wc -l",shell=True))
if instance_name in retrievable_account_limits:
os.system("aws gamelift describe-ec2-instance-limits --ec2-instance-type "+instance_name+" | jq -r '.EC2InstanceLimits[]' > i_temp.json")
with open("i_temp.json",'r') as jsf:
gamelift_api_out = json.load(jsf)
instance_limit_pre = int(gamelift_api_out["InstanceLimit"])
jsf.close()
else:
instance_limit_pre = int(raw_input("What is your account limit for "+instance_name+" in the current region being used?"))
instance_limit = instance_limit_pre-running_spot
historical_price_pre = handle_grep_non_zero_output("aws ec2 describe-spot-price-history --instance-types "+instance_name+" --end-time "+str(current_time)+" --start-time "+str(start_time)+" --product-descriptions='Linux/UNIX' --query 'SpotPriceHistory[*].{az:AvailabilityZone, price:SpotPrice}' | grep 'price' | sed 's/\"price\": \"//' | sed 's/^ *//' | sed 's/\",//' | uniq | sort | tail -1")
historical_price = float(historical_price_pre)
current_price_pre = float(handle_grep_non_zero_output("aws ec2 describe-spot-price-history --instance-types c4.large --start-time=$(date +%s) --product-descriptions=\"Linux/UNIX\" --query 'SpotPriceHistory[*].{az:AvailabilityZone, price:SpotPrice}' | grep 'price' | sed 's/\"price\": \"//' | sed 's/^ *//' | sed 's/\",//' | uniq | sort | tail -1"))
current_price=float(current_price_pre)
print("retrieved info for: "+instance_name)
od_string = handle_grep_non_zero_output("cat odprices | grep '"+instance_name+"' | grep -v 'Reserved' | grep 'Shared' | grep -v 'SUSE' | grep -v 'Windows' | grep 'Linux' | grep '"+datacenter+"'")
od_price = float(od_string.split(',')[9][1:-1])
new_instance_type = AWS_Instance(instance_procs_int,instance_ram_float,instance_eph_int,instance_name,instance_limit,running_ec2,running_spot,historical_price,current_price,od_price)
aws_instances.append(new_instance_type)
pickle.dump( aws_instances, open("instances.p", "wb"))
aws_instance_file.close()
return aws_instances
#characteristics of compute nodes (A)
def formulate_problem(aws_instances):
od_names = map(lambda name: name+".od",map(lambda instance_object: instance_object.instance_type, aws_instances))
spot_names = map(lambda name: name+".spot",map(lambda instance_object: instance_object.instance_type, aws_instances))
names = spot_names+od_names
spot_prices = map(lambda instance_object: instance_object.current_spot, aws_instances)
od_prices = map(lambda instance_object: instance_object.current_od, aws_instances)
prices = spot_prices+od_prices
procs_pre = map(lambda instance_object: instance_object.procs, aws_instances)
procs = procs_pre+procs_pre
gbRAM_pre = map(lambda instance_object: instance_object.ram, aws_instances)
gbRAM = gbRAM_pre+gbRAM_pre
freestorage_pre = map(lambda instance_object: instance_object.storage, aws_instances)
#print freestorage_pre
freestorage = freestorage_pre+freestorage_pre
mc_pre = map(lambda instance_object: instance_object.historical_max, aws_instances)
max_cost_in_previous_time_window = mc_pre+od_prices
account_limits_pre = map(lambda instance_object: instance_object.limit, aws_instances)
account_limits = account_limits_pre+account_limits_pre
num_types = len(procs_pre)
return num_types,names,prices,procs,gbRAM,freestorage,max_cost_in_previous_time_window,account_limits
def build_instance_model(num_types,names,prices,procs,gbRAM,freestorage,max_cost_in_previous_time_window,account_limits,min_cores,min_ram,min_free_storage,max_cost_hour,ram_per_job,procs_per_job,aws_instances):
return
#setting up LP problem formulation
def run_LP(num_types,names,prices,procs,gbRAM,freestorage,max_cost_in_previous_time_window,account_limits,min_cores,min_ram,min_free_storage,max_cost_hour,ram_per_job,procs_per_job,aws_instances,**kwargs):
avoid_instances = set()
rpj_helper = zip(names,procs)
ppj_helper = zip(names,gbRAM)
a1 = filter(lambda x: x[1] < ram_per_job, rpj_helper)
a2 = filter(lambda x: x[1] < procs_per_job, ppj_helper)
avoidp = a1+a2
avoid_names = map(lambda x: x[0],avoidp)
serversp = zip(names,max_cost_in_previous_time_window,[0]*num_types,account_limits)
server_characteristicsp = zip(names,procs,gbRAM,freestorage)
servers = filter(lambda x: x[0] not in avoid_names,serversp)
server_characteristics = filter(lambda x: x[0] not in avoid_names,server_characteristicsp)
job_parameters = []
job_parameters.append(("min_cores",min_cores,min_cores*5))
job_parameters.append(("min_ram",min_ram,min_ram*5))
job_parameters.append(("min_free_storage",min_free_storage,min_free_storage*5))
Server = namedtuple("Instance", ["name","cost","qmin","qmax"])
Job_param = namedtuple("Param", ["name","qmin","qmax"])
server = [Server(*s) for s in servers]
assert(len(server) > 0)
params = [Job_param(*j) for j in job_parameters]
server_info = {(sc[0], params[j].name): sc[1+j] for sc in server_characteristics for j in range(len(job_parameters))}
mdl = Model(name='Instance Bidding')
qty = {s: mdl.integer_var(lb=s.qmin,ub=s.qmax,name=s.name) for s in server}
for p in params:
amount = mdl.sum(qty[s] * server_info[s.name,p.name] for s in server)
mdl.add_range(p.qmin,amount,p.qmax)
mdl.add_kpi(amount, publish_name="Total %s" % p.name)
mdl.minimize(mdl.sum(qty[s] * s.cost for s in server))
mdl.print_information()
url = None
key = None
if not mdl.solve(url=url, key=key):
print("*** Problem has no solution")
else:
mdl.float_precision = 3
print("* model solved as function:")
mdl.report()
mdl.print_solution()
mdl.report_kpis()
mdl.export_as_lp("cplex.lp")
os.system("cat cplex.lp")
# Save the CPLEX solution as "solution.json" program output
with get_environment().get_output_stream("instances.json") as fp:
mdl.solution.export(fp, "json")
return
#mdl.add_constraints((mdl.inside_vars[prod] + mdl.outsiddde_vars[prod] >= prod[1], 'ct_demand_%s' % prod[0]) for prod in products)
#add filtering for running instances and job size
def start_bidding():
min_cores,min_ram,min_free_storage,max_cost_hour,ram_per_job,procs_per_job = get_user_params()
aws_instances = define_A_matrix()
if min_free_storage > 0:
aws_instances = filter(lambda x: x.storage > 0, aws_instances)
#aws_instances = filter(lambda x: x.procs > procs_per_job, aws_instances)
#aws_instances = filter(lambda x: x.ram > ram_per_job, aws_instances)
num_types,old_names,prices,procs,gbRAM,freestorage,max_cost_in_previous_time_window,account_limits = formulate_problem(aws_instances)
run_LP(num_types,old_names,prices,procs,gbRAM,freestorage,max_cost_in_previous_time_window,account_limits,min_cores,min_ram,min_free_storage,max_cost_hour,ram_per_job,procs_per_job,aws_instances)
#lp,names = recursive_lp(lp_output,lp_output_n,min_cores,min_ram,min_free_storage,max_cost_hour,ram_per_job,procs_per_job,old_names,aws_instances)
#lp_n,names_n = recursive_lp_n(lp_output,lp_output_n,min_cores,min_ram,min_free_storage,max_cost_hour,ram_per_job,procs_per_job,old_names,aws_instances)
return
def find_provisioning_info(name,aws_instances):
pre_desired_instance = filter(lambda x: x.instance_type == '.'.join(name.split('.')[:2]), aws_instances)
assert(len(pre_desired_instance) == 1)
desired_instance = pre_desired_instance[0]
#print procs
procs = str(desired_instance.procs)
ram = str(int(desired_instance.ram))
storage = desired_instance.storage
name = '.'.join(name.split('.')[:2])
return procs,ram,storage,name
def write_prov_file(lp_output,names,aws_instances):
prov_file = open(dirr+"prov.psv",'w')
out_data = zip(names,lp_output.x)
sum_deploy = 0
print("The follwoing is the LP generated provisioning:")
for elem in out_data:
pre_name = elem[0]
procs,ram,storage,name = find_provisioning_info(pre_name,aws_instances)
boolstr = "true" if storage > 0 else "false"
number_to_deploy = int(round(float(elem[1])))
sum_deploy += number_to_deploy
for count in range(0,number_to_deploy):
print(name+'|'+procs+'|'+ram+'|'+boolstr+"|aws\n")
prov_file.write(name+'|'+procs+'|'+ram+'|'+boolstr+"|aws\n")
prov_file.close()
if sum_deploy == 0:
sys.exit(1)
return
def go1():
try:
start_bidding()
return
except:
print "No feasible solution found, try again with different parameters"
return "exit",0
"""
if len(lp_output_n.x) > 0:
naive_out = zip(names_n,lp_output_n.x)
print "\n"
print "Going by the seat of your pants and choosing the cheapest options that meet your criteria at the curren moment would result in this bid:"
print filter(lambda x: x[1] != 0,naive_out)
else:
print "There is no solution"
if len(lp_output) > 0:
print "Taking in to account pricing variability, your ideal bid is:"
cost_out = zip(names,lp_output.x)
print filter(lambda x: x[1] != 0,cost_out)
"""
| kosticlab/aether | lp/ilp.py | Python | mit | 13,665 | [
"VisIt"
] | 90917a0f333d144605f4a94f4735b4fe1c66ce09621f5c5b994bee231cbcae68 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from OVarCall.utilOVar.exceptionUtil import InvalidBamFileManagement,InvalidMutationType,InsufficientBamReadFilter
import pysam
import math
import distutils.util
import logging
import contextlib
import re
import copy
class BamDAO:
def __init__(self, bamPath,settings=None):
# parameters for bam file object
self.__bamPath = bamPath
self.__bam = None
self.__chrDic = None
# setting parameters for overlapping pileup
self.__windowSize = 200
self.__minBQ = 15
self.__minMapQ = 30
self.__f = 2
self.__F = 3840
# parameter for read Filter
self.__maxInsDel = 2
self.__maxSNV = 2
self.__maxMutAll = 3
self.__maxSCProportion = 0.25
self.__maxLowMapReadProportion = 0.50
self.__nearestIndelDistance = 25 # | mutPos - indesPos | > 25
self.__nearestIndelReadThres = 20 # number of near indel read thres.
# parameter for BQ variant read filter
# self.__maxLowBQVariantNum = -1
# self.__maxLowBQVariantProportion = 1.0
self.__lowBQ = 15
self.__minAvgBaseQuality = -1
self.setParameters(settings)
def setParameters(self,settings):
if type(settings) is dict :
if 'windowSize' in settings :
self.__windowSize = int(settings['windowSize'])
if 'minBQ' in settings:
self.__minBQ = int(settings['minBQ'])
if 'minMapQ' in settings:
self.__minMapQ = int(settings['minMapQ'])
if 'f' in settings:
self.__f = int(settings['f'])
if 'F' in settings:
self.__F = int(settings['F'])
if 'maxInsDel' in settings:
self.__maxInsDel = int(settings['maxInsDel'])
if 'maxSNV' in settings:
self.__maxSNV = int(settings['maxSNV'])
if 'maxMutAll' in settings:
self.__maxMutAll = int(settings['maxMutAll'])
if 'maxSCProportion' in settings:
self.__maxSCProportion = float(settings['maxSCProportion'])
if 'maxLowMapReadProportion' in settings:
self.__maxLowMapReadProportion = float(settings['maxLowMapReadProportion'])
if 'nearestIndelDistance' in settings:
self.__nearestIndelDistance = int(settings['nearestIndelDistance'])
if 'nearestIndelReadThres' in settings:
self.__nearestIndelReadThres = int(settings['nearestIndelReadThres'])
if 'lowBQ' in settings:
self.__lowBQ = int(settings['lowBQ'])
if 'minAvgBaseQuality' in settings:
self.__minAvgBaseQuality = int(settings['minAvgBaseQuality'])
# if 'maxLowBQVariantNum' in settings:
# self.__maxLowBQVariantNum = int(settings['maxLowBQVariantNum'])
# if 'maxLowBQVariantProportion' in settings:
# self.__maxLowBQVariantProportion = float(settings['maxLowBQVariantProportion'])
# logging.info('maxLowBQVariantNum : ' +str(self.__maxLowBQVariantNum))
# logging.info('maxLowBQVariantProportion : ' +str(self.__maxLowBQVariantProportion))
def getHeaderDic(self):
if self.__bam is not None:
return copy.deepcopy(self.__bam.header)
else :
return None
@contextlib.contextmanager
def openBam(self,bamPath=None):
try:
if bamPath is not None:
self.__bamPath = bamPath
self.__bam = pysam.AlignmentFile(self.__bamPath, "rb")
self.__chrDic = {}
for chrIdx in range(len(self.__bam.header['SQ'])):
self.__chrDic[self.__bam.header['SQ'][chrIdx]['SN']] = chrIdx
yield
finally:
self.__closeBam()
def __closeBam(self):
if self.__bam is not None:
self.__bam.close()
self.__bam = None
def __filterReads(self,reads,TYPE,Chr,pos,ref,obs):
numAll = 0
numFiltered = 0
readList = []
indelReadsNum = 0
for read in reads:
if self.__filterFlaggedRead(read) :
if self.__filterLowMapRead(read,TYPE,Chr,pos,ref,obs):
readList.append(read)
else:
numFiltered += 1
if self.__nearestIndelDistance > 0 and self.__distToNearestIndel(read,TYPE,Chr,pos,ref,obs) > 0:
indelReadsNum += 1
numAll += 1
proportionLowMap = 0.0
if numAll > 0 :
proportionLowMap = (1.0 * numFiltered) / (1.0 * numAll)
if numAll == 0:
readList = None
if not(proportionLowMap <= self.__maxLowMapReadProportion):
readList = None
if self.__nearestIndelDistance > 0 and not(indelReadsNum <= self.__nearestIndelReadThres ):
readList = None
if self.__minAvgBaseQuality > 0 and TYPE == 'M':
profile = self.__getReadBQProfile(ref, obs, Chr, pos, TYPE, self.__lowBQ, reads)
if not( self.__minAvgBaseQuality <= profile['avg']):
logging.info(str(Chr)+" " + str(pos) + " " + TYPE + " " + str(ref) + " " + str(obs) + " : low avg bq variant pos fitered" )
logging.info(str(profile))
readList = None
return readList
def __filterLowMapRead(self,alignedSeg,TYPE,Chr,pos,ref,obs):
if alignedSeg.mapping_quality < self.__minMapQ:
return False
delNum = self.__delNum(alignedSeg)
insNum = self.__insNum(alignedSeg)
if not(delNum + insNum <= self.__maxInsDel):
return False
if not(self.__softClipProportion(alignedSeg) <= self.__maxSCProportion):
return False
snvNum = self.__snvNum(alignedSeg)
if not(snvNum <= self.__maxSNV):
return False
if not(delNum + insNum + snvNum <= self.__maxMutAll):
return False
return True
# pos is already transformed to the pysam Position
def __distToNearestIndel(self,alignedSeg,TYPE,Chr,pos,ref,obs):
if TYPE == "M":
if self.__delNum(alignedSeg) + self.__insNum(alignedSeg) >= 1:
mappedDic = self.__makeMappedDic(alignedSeg)
if (Chr,pos) in mappedDic:
for x in range(self.__nearestIndelDistance):
if x == 0 :
continue
if (Chr,pos+x) in mappedDic:
if mappedDic[(Chr,pos+x)]['base'] == '-' and mappedDic[(Chr,pos+x)]['cigar'] == 2:
return x
elif len(mappedDic[(Chr,pos+x)]['list']) > 0 and mappedDic[(Chr,pos+x)]['list'][0][2] == 1:
return x
if (Chr,pos-x) in mappedDic:
if mappedDic[(Chr,pos-x)]['base'] == '-' and mappedDic[(Chr,pos-x)]['cigar'] == 2:
return x
elif len(mappedDic[(Chr,pos-x)]['list']) > 0 and mappedDic[(Chr,pos-x)]['list'][0][2] == 1:
return x
return -1
else :
return -1
else:
return -1
def __filterFlaggedRead(self,alignedSeg):
if not(int(alignedSeg.flag) & self.__f == self.__f and int(alignedSeg.flag) & self.__F == 0):
return False
return True
def __softClipProportion(self,alignedSeg):
total = 0
SC = 0
cigars = alignedSeg.cigar
for cigar in cigars:
if cigar[0] == 4 : # deletion type cigar type
SC += cigar[1]
total += cigar[1]
ans = 0.0
if total > 0 :
ans = (SC * 1.0) / (total * 1.0)
return ans
def __snvNum(self,alignedSeg):
ans = 0
mdString = ""
tags = alignedSeg.tags
for tag in tags:
if tag[0] == 'MD':
mdString = tag[1]
while len(mdString) > 0:
firsts = self.__firstMD(mdString)
first = firsts["firstMD"]
types = firsts["type"]
mdString = mdString[len(first):len(mdString)]
if types == "mutation":
ans += 1
return ans
def __delNum(self,alignedSeg):
cigars = alignedSeg.cigar
ans = 0
for cigar in cigars:
if cigar[0] == 2 : # deletion type cigar type
ans += 1
return ans
def __insNum(self,alignedSeg):
cigars = alignedSeg.cigar
ans = 0
for cigar in cigars:
if cigar[0] == 1 : # deletion type cigar type
ans += 1
return ans
def __firstMD(self,mdString):
MDMatPattern = re.compile('(.*?)([0-9]+)(.*)')
MDMutPattern = re.compile('(.*?)([A-Za-z])(.*)')
MDDelPattern = re.compile('(.*?)(\^[A-Za-z]+)(.*)')
matResult = MDMatPattern.search(mdString)
if len(matResult.groups()[0]) == 0:
return {"firstMD": matResult.groups()[1], "type": "match"}
mutResult = MDMutPattern.search(mdString)
if len(mutResult.groups()[0]) == 0:
return {"firstMD": mutResult.groups()[1], "type": "mutation"}
delResult = MDDelPattern.search(mdString)
if len(delResult.groups()[0]) == 0:
return {"firstMD": delResult.groups()[1], "type": "deletion"}
def __makeMappedDic(self,alignedSeg):
Bases = alignedSeg.query_sequence
BQAry = alignedSeg.query_qualities
baseIdx = 0
mapIdx = alignedSeg.reference_start
mapChr = alignedSeg.reference_id # num
remCigar = alignedSeg.cigar
firstCigar = None
if len(remCigar) > 0:
firstCigar = remCigar[0]
remCigar.pop(0)
else:
firstCigar = None
ans = {}
while firstCigar is not None:
mlen = self.__MLenOne(firstCigar)
clen = self.__CLenOne(firstCigar)
cigartype = firstCigar[0]
if mlen == clen and mlen > 0: # M,EQ,X
for at in range(mlen):
ans[(mapChr, mapIdx + at)] = {'base': Bases[baseIdx + at],
'bq': BQAry[baseIdx + at],
'cigar': cigartype, 'list': []}
elif mlen == 0 and clen > 0: # I,S
if((mapChr, mapIdx - 1)) in ans:
ans[(mapChr, mapIdx - 1)]['list'].append(
(Bases[baseIdx:baseIdx + clen],
BQAry[baseIdx:baseIdx + clen],
cigartype))
else:
ans[(mapChr, -1)] = {'base': None,
'bq': None,
'cigar': None,
'list':
[(Bases[baseIdx:baseIdx + clen], BQAry[baseIdx:baseIdx + clen], cigartype)]}
elif mlen > 0 and clen == 0 and cigartype != 3: # D,P do not consider cigar N
for at in range(mlen):
ans[(mapChr, mapIdx + at)] = {'base': '-',
'bq': -1,
'cigar': cigartype, 'list': []}
elif mlen == 0 and clen == 0: # H
# this case is hard clipping and ignore this case
if((mapChr, mapIdx - 1)) in ans:
ans[(mapChr, mapIdx - 1)]['list'].append(
('-' * firstCigar[1],
'-' * firstCigar[1],
cigartype))
else:
ans[(mapChr, -1)] = {'base': None,
'bq': None,
'cigar': None,
'list':
[('-' * firstCigar[1], '-' * firstCigar[1], cigartype)]}
pass
baseIdx += clen
mapIdx += mlen
if len(remCigar) > 0:
firstCigar = remCigar[0]
remCigar.pop(0)
else:
firstCigar = None
return ans
def __MLenOne(self,cigar):
try:
ctype = cigar[0]
if ctype == 0 or ctype == 2 or ctype == 3 or ctype == 7 or ctype == 8 or ctype == 6:
return cigar[1]
else:
return 0
except Exception, e:
sys.stderr.writelines("isMlenOne invalid form of cigar")
raise e
def __CLenOne(self,cigar):
try:
ctype = cigar[0]
if ctype == 0 or ctype == 1 or ctype == 4 or ctype == 7 or ctype == 8:
return cigar[1]
else:
return 0
except Exception, e:
sys.stderr.writelines("isCLenOne invalid form of cigar")
raise e
"""
getState(ref,obs,Chr,pos,read,obsType):
の pos -> 0-indexed
'M' : pos の場所(0-indexed pysam 上) を
'I' : pos の場所(0-indexed pysam 上) の mappedList の ins を見る
'D' : 0-indexed pysam 上 の [pos,pos+len) を見る
"""
# True : there is non reference base read
def __checkSNV(self,ref,obs,Chr,pos,read,obsType,mapppedDic):
baseAtPos = None
if (Chr,pos) in mapppedDic:
baseAtPos = mapppedDic[(Chr,pos)]['base']
baseAtPos = baseAtPos.upper()
isref = (ref == baseAtPos)
return not(isref)
else : # no mapping
return None
def __checkIns(self,ref,obs,Chr,pos,read,obsType,mapppedDic):
if (Chr,pos) in mapppedDic:
if len(mapppedDic[(Chr,pos)]['list']) > 0:
for cElem in mapppedDic[(Chr,pos)]['list']:
if cElem[2] == 1: # cigar I
return True
return False
else : # no Ins
return False
else : # no mapping
return None
def __checkDel(self,ref,obs,Chr,pos,read,obsType,mapppedDic):
if (Chr,pos) in mapppedDic:
delList = []
delLen = 0
while (Chr,pos+1+delLen) in mapppedDic and mapppedDic[(Chr,pos+1+delLen)]['cigar'] == 2 :
delList.append(mapppedDic[(Chr,pos+1+delLen)])
delLen += 1
delString=''
for Del in delList:
if Del['base'] == '-':
delString += Del['base']
else :
break
if len(delString) > 0 :
return True
else :
return False
else: # no mapping
return None
def __getState2(self,ref,obs,Chr,pos,read1,read2,obsType,minBQ=None):
if not read1.is_reverse:
state1 = self.__getState(ref,obs,Chr,pos,read1,obsType,minBQ)
state2 = self.__getState(ref,obs,Chr,pos,read2,obsType,minBQ)
else :
state1 = self.__getState(ref,obs,Chr,pos,read2,obsType,minBQ)
state2 = self.__getState(ref,obs,Chr,pos,read1,obsType,minBQ)
refNum1 = 0
obsNum1 = 0
otherNum1 = 0
refNum2 = 0
obsNum2 = 0
otherNum2 = 0
if state1['ref']:
refNum1 += 1
if state1['obs']:
obsNum1 += 1
if state1['other'] is not None:
otherNum1 += 1
if state2['ref']:
refNum2 += 1
if state2['obs']:
obsNum2 += 1
if state2['other'] is not None:
otherNum2 += 1
Rp = refNum1
Obp = obsNum1
Otp = otherNum1
Rm = refNum2
Obm = obsNum2
Otm = otherNum2
RR = 0
ROb = 0
ROt = 0
ObR = 0
ObOb = 0
ObOt = 0
OtR = 0
OtOb = 0
OtOt = 0
if (refNum1+obsNum1+otherNum1) == 1 and (refNum2+obsNum2+otherNum2) == 1:
# Not counting as single read
Rp = 0
Obp = 0
Otp = 0
Rm = 0
Obm = 0
Otm = 0
if refNum1 > 0 and refNum2 > 0 :
RR += 1
elif refNum1 > 0 and obsNum2 > 0:
ROb += 1
elif refNum1 > 0 and otherNum2 > 0:
ROt += 1
elif obsNum1 > 0 and refNum2 > 0 :
ObR += 1
elif obsNum1 > 0 and obsNum2 > 0:
ObOb += 1
elif obsNum1 > 0 and otherNum2 > 0 :
ObOt += 1
elif otherNum1 > 0 and refNum2 > 0 :
OtR += 1
elif otherNum1 > 0 and obsNum2 > 0 :
OtOb += 1
elif otherNum1 > 0 and otherNum2 > 0:
OtOt += 1
return {'R+':Rp,'Ob+':Obp,'Ot+':Otp,'R-':Rm,'Ob-':Obm,'Ot-':Otm, \
'RR+-':RR,'ROb+-':ROb,'ROt+-':ROt, \
'ObR+-':ObR,'ObOb+-':ObOb,'ObOt+-':ObOt, \
'OtR+-':OtR,'OtOb+-':OtOb,'OtOt+-':OtOt}
def __getReadBQProfile(self, ref, obs, Chr, pos, obsType, minBQ, readBuffer):
ansDic = {'high':0,'low':0, 'avg':0}
readNumAll = 0
if obsType != 'M':
return ansDic
if readBuffer is None:
return ansDic
for read in readBuffer:
mappedDic = self.__makeMappedDic(read)
if (Chr,pos) in mappedDic:
if mappedDic[(Chr,pos)]['cigar'] == 0:
readNumAll +=1
ansDic['avg'] += mappedDic[(Chr,pos)]['bq']
if mappedDic[(Chr,pos)]['bq'] < minBQ:
ansDic['low'] += 1
else :
ansDic['high'] += 1
# logging.info(str(ansDic))
if readNumAll > 0:
ansDic['avg'] /= (1.0 * readNumAll)
return ansDic
def __getVariantReadBQProfile(self, ref, obs, Chr, pos, obsType, minBQ, readBuffer):
ansDic = {'high':0,'low':0}
if obsType != 'M':
return ansDic
if readBuffer is None:
return ansDic
for read in readBuffer:
mappedDic = self.__makeMappedDic(read)
if (Chr,pos) in mappedDic:
if mappedDic[(Chr,pos)]['base'] == obs and mappedDic[(Chr,pos)]['cigar'] == 0:
if mappedDic[(Chr,pos)]['bq'] < minBQ:
ansDic['low'] += 1
else :
ansDic['high'] += 1
logging.info(str(ansDic))
return ansDic
def __filterByVariantReadBQ(self, obsType, bqProfile, maxLowProportion, maxLowNum):
if obsType != 'M':
return True # passing this filter when I or D
proportion = 0.0
lowNum = bqProfile['low']
highNum = bqProfile['high']
if lowNum + highNum > 0:
proportion = (1.0*lowNum)/(1.0*lowNum + 1.0*highNum)
if not (proportion < maxLowProportion) or not( lowNum < maxLowNum ) :
return False
return True
def __getState(self,ref,obs,Chr,pos,read,obsType,minBQ=None):
mapppedDic = self.__makeMappedDic(read)
isPlus = not(read.is_reverse)
# if read.mapping_quality < self.__minMapQ:
# return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
if minBQ is None:
minBQ = self.__minBQ
if obsType == 'M':
baseAtPos = None
if (Chr,pos) in mapppedDic:
if mapppedDic[(Chr,pos)]['base'] == '-':
if mapppedDic[(Chr,pos)]['cigar'] == 2:
# D
return {'ref':False,'obs':False,'other':True,'isPlus':isPlus}
else :
# other
return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
if mapppedDic[(Chr,pos)]['bq'] < minBQ:
return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
insAns = self.__checkIns(ref,obs,Chr,pos,read,obsType,mapppedDic)
delAns = self.__checkDel(ref,obs,Chr,pos,read,obsType,mapppedDic)
if insAns or delAns:
return {'ref':False,'obs':False,'other':True,'isPlus':isPlus}
baseAtPos = mapppedDic[(Chr,pos)]['base']
baseAtPos = baseAtPos.upper()
isref = (ref == baseAtPos)
isobs = (obs == baseAtPos)
if isref or isobs :
return {'ref':isref,'obs':isobs,'other':None,'isPlus':isPlus}
else:
return {'ref':isref,'obs':isobs,'other':baseAtPos,'isPlus':isPlus}
else : # no mapping
return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
elif obsType == 'I':
if (Chr,pos) in mapppedDic:
if mapppedDic[(Chr,pos)]['base'] == '-':
if mapppedDic[(Chr,pos)]['cigar'] == 2:
# D
return {'ref':False,'obs':False,'other':True,'isPlus':isPlus}
else :
# other
return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
if mapppedDic[(Chr,pos)]['bq'] < minBQ:
return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
snvAns = self.__checkSNV(ref,obs,Chr,pos,read,obsType,mapppedDic)
delAns = self.__checkDel(ref,obs,Chr,pos,read,obsType,mapppedDic)
if snvAns or delAns:
return {'ref':False,'obs':False,'other':True,'isPlus':isPlus}
if len(mapppedDic[(Chr,pos)]['list']) > 0:
for cElem in mapppedDic[(Chr,pos)]['list']:
if cElem[2] == 1: # cigar I
isobs = (cElem[0] == obs)
if isobs:
return {'ref':False,'obs':isobs,'other':None,'isPlus':isPlus}
else:
return {'ref':False,'obs':False,'other':cElem[0],'isPlus':isPlus}
return {'ref':True,'obs':False,'other':None,'isPlus':isPlus}
else : # no Ins
return {'ref':True,'obs':False,'other':None,'isPlus':isPlus}
else :
return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
elif obsType == 'D':
if (Chr,pos) in mapppedDic:
if mapppedDic[(Chr,pos)]['base'] == '-':
if mapppedDic[(Chr,pos)]['cigar'] == 2:
# D
return {'ref':False,'obs':False,'other':True,'isPlus':isPlus}
else :
# other
return {'ref':False,'obs':False,'other':True,'isPlus':isPlus}
# return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
if mapppedDic[(Chr,pos)]['bq'] < minBQ:
return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
snvAns = self.__checkSNV(ref,obs,Chr,pos,read,obsType,mapppedDic)
insAns = self.__checkIns(ref,obs,Chr,pos,read,obsType,mapppedDic)
if snvAns or insAns:
return {'ref':False,'obs':False,'other':True,'isPlus':isPlus}
delList = []
delLen = 0
while (Chr,pos+1+delLen) in mapppedDic and mapppedDic[(Chr,pos+1+delLen)]['cigar'] == 2:
delList.append(mapppedDic[(Chr,pos+1+delLen)])
delLen += 1
delString=''
for Del in delList:
if Del['base'] == '-':
delString += Del['base']
else :
break
if len(delString) > 0:
if len(delString) == len(obs):
return {'ref':False,'obs':True,'other':None,'isPlus':isPlus}
else :
return {'ref':False,'obs':False,'other':delString,'isPlus':isPlus}
else:
return {'ref':True,'obs':False,'other':None,'isPlus':isPlus}
else:
return {'ref':False,'obs':False,'other':None,'isPlus':isPlus}
def __mutListToPysam(self,pos):
return pos-1
def __printOverlappingReads(self,TYPE,Chr,pos,ref,obs):
print(TYPE+' '+str(Chr)+' '+str(pos)+' '+str(ref)+' '+str(obs))
if self.__bam is None:
raise InvalidBamFileManagement
pos = int(pos)
readList = {}
readBuffer = []
for read in self.__bam.fetch(Chr,max(pos-self.__windowSize,0),pos+self.__windowSize):
readBuffer.append(read)
readBuffer = self.__filterReads(readBuffer,TYPE,self.__chrDic[Chr],self.__mutListToPysam(pos),ref,obs)
for read in readBuffer:
if read.query_name not in readList:
readList[read.query_name] = []
readList[read.query_name].append(read)
if readBuffer is None:
print("No reads")
else :
totalOverlap = 0
for ID in readList:
if len(readList[ID]) == 2:
reads = []
if readList[ID][1].is_reverse:
dics =[self.__makeMappedDic(readList[ID][0]),self.__makeMappedDic(readList[ID][1])]
reads = [readList[ID][0],readList[ID][1]]
else:
dics =[self.__makeMappedDic(readList[ID][1]),self.__makeMappedDic(readList[ID][0])]
reads = [readList[ID][1],readList[ID][0]]
coverNum = 0
basesList = []
bqList = []
mappedLengths = []
foundPosition = []
foundBases = []
for dic in dics:
mappedLengths.append(len(dic.keys()))
readString = ''
bqString = ''
cycle = 0
for at in sorted(dic.keys()):
if at[1] == pos-1:
foundPosition.append(cycle)
coverNum += 1
readString += " "
bqString += " "
if len(dic[at]['list']) > 0 or dic[at]['bq'] is None:
readString += '^'
foundBases.append('^')
else:
readString += dic[at]['base']
foundBases.append(dic[at]['base'])
if dic[at]['base'] == '-' or dic[at]['bq'] is None:
bqString += '-'
else:
bqString += chr(dic[at]['bq']+33)
readString += " "
bqString += " "
else :
if len(dic[at]['list']) > 0 or dic[at]['bq'] is None:
readString += '^'
else:
readString += dic[at]['base']
if dic[at]['base'] == '-' or dic[at]['bq'] is None:
bqString += '-'
else:
bqString += chr(dic[at]['bq']+33)
cycle += 1
basesList.append(readString)
bqList.append(bqString)
if coverNum == 2:
totalOverlap +=1
if (TYPE == 'M' and foundBases[0] == obs and foundBases[1] == obs):
print("")
print(reads[0].template_length)
print(basesList[0])
print(bqList[0])
print(basesList[1])
print(bqList[1])
def getOverlapInformation(self,TYPE,Chr,pos,ref,obs,lowBQ=None):
if self.__bam is None:
raise InvalidBamFileManagement
pos = int(pos)
obsType = TYPE
pileAns = None
pileAnsLowBQ = None
keys = None
if obsType == 'M' or obsType == 'I' or obsType == 'D':
pileAns = {'R+':0,'Ob+':0,'Ot+':0,'R-':0,'Ob-':0,'Ot-':0, \
'RR+-':0,'ROb+-':0,'ROt+-':0, \
'ObR+-':0,'ObOb+-':0,'ObOt+-':0, \
'OtR+-':0,'OtOb+-':0,'OtOt+-':0}
pileAnsLowBQ = {'R+':0,'Ob+':0,'Ot+':0,'R-':0,'Ob-':0,'Ot-':0, \
'RR+-':0,'ROb+-':0,'ROt+-':0, \
'ObR+-':0,'ObOb+-':0,'ObOt+-':0, \
'OtR+-':0,'OtOb+-':0,'OtOt+-':0}
keys = ['R+','Ob+','Ot+','R-','Ob-','Ot-', \
'RR+-','ROb+-','ROt+-', \
'ObR+-','ObOb+-','ObOt+-', \
'OtR+-','OtOb+-','OtOt+-']
readList = {}
readBuffer = []
for read in self.__bam.fetch(Chr,max(pos-self.__windowSize,0),pos+self.__windowSize):
readBuffer.append(read)
readBuffer = self.__filterReads(readBuffer,TYPE,self.__chrDic[Chr],self.__mutListToPysam(pos),ref,obs)
if readBuffer is None:
if lowBQ is None:
return (None,None)
else:
return (None,None,None)
for read in readBuffer:
if read.query_name not in readList:
readList[read.query_name] = []
readList[read.query_name].append(read)
for ID in readList:
if len(readList[ID]) == 2:
pairAns = self.__getState2(ref,obs,self.__chrDic[Chr],\
self.__mutListToPysam(pos),readList[ID][0],readList[ID][1],obsType)
for key in keys:
pileAns[key] += pairAns[key]
elif len(readList[ID]) == 1:
singleAns = self.__getState(ref,obs,self.__chrDic[Chr],\
self.__mutListToPysam(pos),readList[ID][0],obsType)
plusStr = None
if singleAns['isPlus']:
plusStr = '+'
else:
plusStr = '-'
if singleAns['ref']:
pileAns['R'+plusStr] += 1
elif singleAns['obs']:
pileAns['Ob'+plusStr] += 1
elif singleAns['other'] is not None:
pileAns['Ot'+plusStr] += 1
else :
raise InsufficientBamReadFilter
if lowBQ is not None:
for ID in readList:
if len(readList[ID]) == 2:
pairAnsLowBQ = self.__getState2(ref,obs,self.__chrDic[Chr],\
self.__mutListToPysam(pos),readList[ID][0],readList[ID][1],obsType,lowBQ)
for key in keys:
pileAnsLowBQ[key] += pairAnsLowBQ[key]
elif len(readList[ID]) == 1:
singleAnsLowBQ = self.__getState(ref,obs,self.__chrDic[Chr],\
self.__mutListToPysam(pos),readList[ID][0],obsType,lowBQ)
plusStr = None
if singleAnsLowBQ['isPlus']:
plusStr = '+'
else:
plusStr = '-'
if singleAnsLowBQ['ref']:
pileAnsLowBQ['R'+plusStr] += 1
elif singleAnsLowBQ['obs']:
pileAnsLowBQ['Ob'+plusStr] += 1
elif singleAnsLowBQ['other'] is not None:
pileAnsLowBQ['Ot'+plusStr] += 1
else :
raise InsufficientBamReadFilter
headCols = [obsType,Chr,pos,ref,obs]
bodyCols = []
bodyColsLowBQ = []
for key in keys:
bodyCols.append(pileAns[key])
if lowBQ is not None:
bodyColsLowBQ.append(pileAnsLowBQ[key])
if lowBQ is None:
return (headCols,bodyCols)
else:
return [headCols,bodyCols,bodyColsLowBQ]
else :
raise InvalidMutationType
def getReadsAround(self,TYPE,Chr,pos,ref,obs,ignoreObs=False):
if self.__bam is None:
raise InvalidBamFileManagement
pos = int(pos)
obsType = TYPE
if obsType == 'M' or obsType == 'I' or obsType == 'D':
# readList = {}
readBuffer = []
ansList = []
for read in self.__bam.fetch(Chr,max(pos-self.__windowSize,0),pos+self.__windowSize):
readBuffer.append(read)
for read in readBuffer:
singleAns = self.__getState(ref,obs,self.__chrDic[Chr],self.__mutListToPysam(pos),read,obsType)
if not ignoreObs:
ansList.append(read)
elif ignoreObs and ( singleAns['obs'] == 0 ):
ansList.append(read)
return ansList
| takumorizo/OVarCall | lib/OVarCall/utilOVar/bamDAO.py | Python | gpl-3.0 | 34,172 | [
"pysam"
] | 6c7265315d06f1411cd29b2be2fcbb019b354d32d8bb3512631274edfc689947 |
__all__ = [
'get_selected_array_name',
'get_selected_array_field',
'copy_arrays_to_point_data',
'get_numpy_array',
'get_vtk_array',
'add_array',
'get_selected_array',
'search_for_array',
'get_all_array_names',
]
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from . import errors as _helpers
def get_selected_array_name(algorithm, idx):
"""Gets the name of the input array for a given index on a VTK algorithm
Args:
algorithm (vtkAlgorithm): A vtkAlgorithm class instantiation
idx (int): the input array index
Return:
str : the name of the input array for the given index
"""
info = algorithm.GetInputArrayInformation(idx)
return info.Get(vtk.vtkDataObject.FIELD_NAME())
def get_selected_array_field(algorithm, idx):
"""Gets the field of the input array for a given index on a VTK algorithm
Args:
algorithm (vtkAlgorithm) : A vtkAlgorithm class instantiation
idx (int) : the input array index
Return:
int : the field type of the input array for the given index
"""
info = algorithm.GetInputArrayInformation(idx)
return info.Get(vtk.vtkDataObject.FIELD_ASSOCIATION())
def get_field_id_by_name(field):
"""Get the field ID by name."""
fields = dict(
point=0,
pt=0,
p=0,
cell=1,
c=1,
field=2,
f=2,
row=6,
r=6,
)
field = field.lower()
try:
return fields[field]
except KeyError:
raise _helpers.PVGeoError(
'Field association not defined. Try inputing `point`, `cell`, `field`, or `row`.'
)
def copy_arrays_to_point_data(pdi, pdo, field):
"""Copys arrays from an input to an ouput's point data.
Args:
pdi (vtkDataObject) : The input data object to copy from
pdo (vtkDataObject) : The output data object to copy over to
field (int or str) : the field type id or name
Return:
vtkDataObject : returns the output data object parameter
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
# Point Data
if field == 0:
for i in range(pdi.GetPointData().GetNumberOfArrays()):
arr = pdi.GetPointData().GetArray(i)
pdo.GetPointData().AddArray(arr)
# Cell Data: DO NOT USE
elif field == 1:
for i in range(pdi.GetCellData().GetNumberOfArrays()):
arr = pdi.GetCellData().GetArray(i)
pdo.GetPointData().AddArray(arr)
# Field Data:
elif field == 2:
for i in range(pdi.GetFieldData().GetNumberOfArrays()):
arr = pdi.GetFieldData().GetArray(i)
pdo.GetPointData().AddArray(arr)
# Row Data:
elif field == 6:
for i in range(pdi.GetRowData().GetNumberOfArrays()):
arr = pdi.GetRowData().GetArray(i)
pdo.GetPointData().AddArray(arr)
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
# Field Data
return pdo
def get_numpy_array(wpdi, field, name):
"""Grabs an array from vtkDataObject given its name and field association.
Args:
wpdi (wrapped vtkDataObject) : the input data object wrapped using
vtk dataset adapter
field (int or str) : the field type id or name
name (str) : the name of the input array for the given index
Return:
numpy.array : a wrapped ``vtkDataArray`` for NumPy
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
if not isinstance(wpdi, vtk.numpy_interface.dataset_adapter.DataObject):
wpdi = dsa.WrapDataObject(wpdi)
# Point Data
if field == 0:
arr = wpdi.PointData[name]
# Cell Data:
elif field == 1:
arr = wpdi.CellData[name]
# Field Data:
elif field == 2:
arr = wpdi.FieldData[name]
# Row Data:
elif field == 6:
arr = wpdi.RowData[name]
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
return arr
def get_vtk_array(pdi, field, name):
"""Grabs an array from vtkDataObject given its name and field association.
Args:
pdi (vtkDataObject) : the input data object
field (int or str) : the field type id or name
name (str) : the name of the input array for the given index
Return:
vtkDataArray : the array from input field and name
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
# Point Data
if field == 0:
arr = pdi.GetPointData().GetArray(name)
# Cell Data:
elif field == 1:
arr = pdi.GetCellData().GetArray(name)
# Field Data:
elif field == 2:
arr = pdi.GetFieldData().GetArray(name)
# Row Data:
elif field == 6:
arr = pdi.GetRowData().GetArray(name)
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
return arr
def get_selected_array(algorithm, wpdi, idx):
"""Gets selectected array at index idx wrapped for NumPy
Args:
algorithm (vtkAlgorithm) : A vtkAlgorithm class instantiation
wpdi (wrapped vtkDataObject) : the input data object wrapped using vtk
dataset adapter
idx (int) : the input array index
Return:
numpy.array : a wrapped ``vtkDataArray`` for NumPy
"""
name = get_selected_array_name(algorithm, idx)
field = get_selected_array_field(algorithm, idx)
return get_array(wpdi, field, name)
def add_array(pdo, field, vtkArray):
"""Adds an array to a vtkDataObject given its field association.
Args:
pdo (vtkDataObject) : the output data object
field (int or str) : the field type id or name
vtkArray (vtkDataArray) : the data array to add to the output
Return:
vtkDataObject : the output data object with the data array added
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
# Point Data
if field == 0:
pdo.GetPointData().AddArray(vtkArray)
# Cell Data:
elif field == 1:
pdo.GetCellData().AddArray(vtkArray)
# Field Data:
elif field == 2:
pdo.GetFieldData().AddArray(vtkArray)
# Row Data:
elif field == 6:
pdo.GetRowData().AddArray(vtkArray)
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
return pdo
def _get_data(pdi, field):
"""Gets data field from input vtkDataObject"""
data = None
if isinstance(field, str):
field = get_field_id_by_name(field)
try:
# Point Data
if field == 0:
data = pdi.GetPointData()
# Cell Data:
elif field == 1:
data = pdi.GetCellData()
# Field Data:
elif field == 2:
data = pdi.GetFieldData()
# Row Data:
elif field == 6:
data = pdi.GetRowData()
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
except AttributeError:
raise _helpers.PVGeoError(
'Input data does not have field type `{}`.'.format(field)
)
return data
def get_array(pdi, field, name):
"""Gets an array from a vtkDataObject given its field association and name.
Notes:
- Point Data: 0
- Cell Data: 1
- Field Data: 2
- Row Data: 6
Args:
pdi (vtkDataObject) : the input data object
field (int or str) : the field type id or name
name (str) : the data array name
Return:
vtkDataObject: the output data object
"""
if isinstance(field, str):
field = get_field_id_by_name(field)
data = _get_data(pdi, field)
return data.GetArray(name)
def search_for_array(pdi, name):
def _search_field(field):
data = _get_data(pdi, field)
for i in range(data.GetNumberOfArrays()):
if data.GetArrayName(i) == name:
return data.GetArray(i)
return None
fields = [0, 1, 2, 6]
for field in fields:
try:
arr = _search_field(field)
except _helpers.PVGeoError:
continue
if arr is not None:
# We found it!
return arr, field
raise _helpers.PVGeoError('Array `{}` not found in input data.'.format(name))
return None
def get_all_array_names(dataset, field):
if isinstance(field, str):
field = get_field_id_by_name(field)
if not isinstance(dataset, vtk.numpy_interface.dataset_adapter.DataObject):
wpdi = dsa.WrapDataObject(dataset)
else:
wpdi = dataset
# Point Data
if field == 0:
return wpdi.PointData.keys()
# Cell Data:
elif field == 1:
return wpdi.CellData.keys()
# Field Data:
elif field == 2:
return wpdi.FieldData.keys()
# Row Data:
elif field == 6:
return wpdi.RowData.keys()
else:
raise _helpers.PVGeoError(
'Field association ({}) not defined. Try inputing Point, Cell, Field, or Row data.'.format(
field
)
)
return None
| banesullivan/ParaViewGeophysics | PVGeo/_helpers/arrays.py | Python | bsd-3-clause | 9,733 | [
"VTK"
] | fa12563c67dae7a214458dd97bfd5d14584cf0ba72357d596f76829b3d20886b |
#!/usr/bin/python3
#
# File: lsf_status.py
#
# Author: George Papadimitriou
# e-mail: georgpap@isi.edu
# Author: Brian Bockelman
# e-mail: bbockelm@cse.unl.edu
#
#
# Copyright (c) University of Nebraska-Lincoln. 2012
# Copyright (c) University of Wisconsin-Madison. 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Query LSF for the status of a given job
Internally, it creates a cache of the LSF bjobs response and will reuse this
for subsequent queries.
"""
from __future__ import print_function
import os
import re
import pwd
import sys
import time
import errno
import fcntl
import random
import struct
import subprocess
import signal
import tempfile
import traceback
import pickle
import csv
import binascii
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
cache_timeout = 60
launchtime = time.time()
def log(msg):
"""
A very lightweight log - not meant to be used in production, but helps
when debugging scale tests
"""
print(time.strftime("%x %X"), os.getpid(), msg, file=sys.stderr)
def to_str(strlike, encoding="latin-1", errors="strict"):
"""Turns a bytes into a str or leaves it alone.
The default encoding is latin-1 (which will not raise
a UnicodeDecodeError); best to use when you want to treat the data
as arbitrary bytes, but some function is expecting a str.
"""
if isinstance(strlike, bytes):
return strlike.decode(encoding, errors)
return strlike
def createCacheDir():
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "bjobs_cache_%s" % username)
try:
os.mkdir(cache_dir, 0o755)
except OSError as oe:
if oe.errno != errno.EEXIST:
raise
s = os.stat(cache_dir)
if s.st_uid != uid:
raise Exception("Unable to check cache because it is owned by UID %d" % s.st_uid)
return cache_dir
def initLog():
"""
Determine whether to create a logfile based on the presence of a file
in the user's bjobs cache directory. If so, make the logfile there.
"""
cache_dir = createCacheDir()
if os.path.exists(os.path.join(cache_dir, "lsf_status.debug")):
filename = os.path.join(cache_dir, "lsf_status.log")
else:
filename = "/dev/null"
fd = open(filename, "a")
# Do NOT close the file descriptor blahp originally hands us for stderr.
# This causes blahp to lose all status updates.
os.dup(2)
os.dup2(fd.fileno(), 2)
# Something else from a prior life - see gratia-probe-common's GratiaWrapper.py
def ExclusiveLock(fd, timeout=120):
"""
Grabs an exclusive lock on fd
If the lock is owned by another process, and that process is older than the
timeout, then the other process will be signaled. If the timeout is
negative, then the other process is never signaled.
If we are unable to hold the lock, this call will not block on the lock;
rather, it will throw an exception.
By default, the timeout is 120 seconds.
"""
# POSIX file locking is cruelly crude. There's nothing to do besides
# try / sleep to grab the lock, no equivalent of polling.
# Why hello, thundering herd.
# An alternate would be to block on the lock, and use signals to interupt.
# This would mess up Gratia's flawed use of signals already, and not be
# able to report on who has the lock. I don't like indefinite waits!
max_time = 30
starttime = time.time()
tries = 1
while time.time() - starttime < max_time:
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return
except IOError as ie:
if not ((ie.errno == errno.EACCES) or (ie.errno == errno.EAGAIN)):
raise
if check_lock(fd, timeout):
time.sleep(.2) # Fast case; however, we have *no clue* how
# long it takes to clean/release the old lock.
# Nor do we know if we'd get it if we did
# fcntl.lockf w/ blocking immediately. Blech.
# Check again immediately, especially if this was the last
# iteration in the for loop.
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return
except IOError as ie:
if not ((ie.errno == errno.EACCES) or (ie.errno == errno.EAGAIN)):
raise
sleeptime = random.random()
log("Unable to acquire lock, try %i; will sleep for %.2f " \
"seconds and try for %.2f more seconds." % (tries, sleeptime, max_time - (time.time()-starttime)))
tries += 1
time.sleep(sleeptime)
log("Fatal exception - Unable to acquire lock")
raise Exception("Unable to acquire lock")
def check_lock(fd, timeout):
"""
For internal use only.
Given a fd that is locked, determine which process has the lock.
Kill said process if it is older than "timeout" seconds.
This will log the PID of the "other process".
"""
pid = get_lock_pid(fd)
if pid == os.getpid():
return True
if timeout < 0:
log("Another process, %d, holds the cache lock." % pid)
return False
try:
age = get_pid_age(pid)
except:
log("Another process, %d, holds the cache lock." % pid)
log("Unable to get the other process's age; will not time it out.")
return False
log("Another process, %d (age %d seconds), holds the cache lock." % (pid, age))
if age > timeout:
os.kill(pid, signal.SIGKILL)
else:
return False
return True
linux_struct_flock = "hhxxxxqqixxxx"
try:
os.O_LARGEFILE
except AttributeError:
start_len = "hhlli"
def get_lock_pid(fd):
# For reference, here's the definition of struct flock on Linux
# (/usr/include/bits/fcntl.h).
#
# struct flock
# {
# short int l_type; /* Type of lock: F_RDLCK, F_WRLCK, or F_UNLCK. */
# short int l_whence; /* Where `l_start' is relative to (like `lseek'). */
# __off_t l_start; /* Offset where the lock begins. */
# __off_t l_len; /* Size of the locked area; zero means until EOF. */
# __pid_t l_pid; /* Process holding the lock. */
# };
#
# Note that things are different on Darwin
# Assuming off_t is unsigned long long, pid_t is int
try:
if sys.platform == "darwin":
arg = struct.pack("QQihh", 0, 0, 0, fcntl.F_WRLCK, 0)
else:
arg = struct.pack(linux_struct_flock, fcntl.F_WRLCK, 0, 0, 0, 0)
result = fcntl.fcntl(fd, fcntl.F_GETLK, arg)
except IOError as ie:
if ie.errno != errno.EINVAL:
raise
log("Unable to determine which PID has the lock due to a " \
"python portability failure. Contact the developers with your" \
" platform information for support.")
return False
if sys.platform == "darwin":
_, _, pid, _, _ = struct.unpack("QQihh", result)
else:
_, _, _, _, pid = struct.unpack(linux_struct_flock, result)
return pid
def get_pid_age(pid):
now = time.time()
st = os.stat("/proc/%d" % pid)
return now - st.st_ctime
def bjobs(jobid=""):
"""
Call bjobs directly for a jobid.
If none is specified, query all jobid's.
Returns a python dictionary with the job info.
"""
bjobs = get_bjobs_location()
command = (bjobs, '-V')
bjobs_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
bjobs_version, _ = bjobs_process.communicate()
bjobs_version = to_str(bjobs_version)
log(bjobs_version)
starttime = time.time()
log("Starting bjobs.")
if jobid != "":
bjobs_process = subprocess.Popen(("%s -UF %s" % (bjobs, jobid)), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=True)
else:
bjobs_process = subprocess.Popen(("%s -UF -a" % bjobs), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=True)
bjobs_process_stdout, bjobs_process_stderr = bjobs_process.communicate()
bjobs_process_stdout = to_str(bjobs_process_stdout)
bjobs_process_stderr = to_str(bjobs_process_stderr)
if bjobs_process_stderr == "":
result = parse_bjobs_fd(bjobs_process_stdout.splitlines())
elif jobid != "":
result = {jobid: {'BatchJobId': '"%s"' % jobid, 'JobStatus': '3', 'ExitCode': ' 0'}}
else:
result = {}
exit_code = bjobs_process.returncode
log("Finished bjobs (time=%f)." % (time.time()-starttime))
if exit_code:
raise Exception("bjobs failed with exit code %s" % str(exit_code))
# If the job has completed...
if jobid != "" and "JobStatus" in result[jobid] and (result[jobid]["JobStatus"] == '4' or result[jobid]["JobStatus"] == '3'):
# Get the finished job stats and update the result
finished_job_stats = get_finished_job_stats(jobid)
result[jobid].update(finished_job_stats)
return result
def which(program):
"""
Determine if the program is in the path.
arg program: name of the program to search
returns: full path to executable, or None if executable is not found
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
#def convert_cpu_to_seconds(cpu_string):
# import re
# h,m,s = re.split(':',cpu_string)
# return int(h) * 3600 + int(m) * 60 + int(s)
cpu_time_re = re.compile("CPU time used is ([0-9.]+) seconds")
max_mem_re = re.compile("MAX MEM: ([0-9.]+) (\w+);")
exit_status_re = re.compile("Exited [\w ]+ (-?[0-9]+). The CPU")
_cluster_type_cache = None
def get_finished_job_stats(jobid):
"""
Get a completed job's statistics such as used RAM and cpu usage.
"""
# List the attributes that we want
return_dict = { "ImageSize": 0, "ExitCode": 0, "RemoteUserCpu": 0 }
# First, determine if this is an lsf machine.
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "bjobs_cache_%s" % username)
cluster_type_file = os.path.join(cache_dir, "cluster_type")
global _cluster_type_cache
if not _cluster_type_cache:
# Look for the special file, cluster_type
if os.path.exists(cluster_type_file):
_cluster_type_cache = open(cluster_type_file).read()
else:
# No idea what type of cluster is running, not set, so give up
log("cluster_type file is not present, not checking for completed job statistics")
return return_dict
# LSF completion
if _cluster_type_cache == "lsf":
log("Querying bjobs for completed job for jobid: %s" % (str(jobid)))
bhist_process = subprocess.Popen(("bhist -UF %s" % str(jobid)), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=True)
bhist_process_stdout, _ = bhist_process.communicate()
bhist_process_stdout = to_str(bhist_process_stdout)
for line in bhist_process_stdout.splitlines():
line = line.strip()
m = exit_status_re.search(line)
if m:
return_dict["ExitCode"] = int(m.group(1))
continue
m = cpu_time_re.search(line)
if m:
return_dict['RemoteUserCpu'] = m.group(1)
continue
m = max_mem_re.search(line)
if m:
mem_unit = m.group(2)
factor = 1
if mem_unit[0] == 'M':
factor = 1024
elif mem_unit[0] == 'G':
factor = 1024**2
elif mem_unit[0] == 'T':
factor = 1024**3
elif mem_unit[0] == 'P':
factor = 1024**4
elif mem_unit[0] == 'E':
factor = 1024**5
return_dict["ImageSize"] = int(float(m.group(1))) * factor
return return_dict
_bjobs_location_cache = None
def get_bjobs_location():
"""
Locate the copy of bjobs the blahp configuration wants to use.
"""
global _bjobs_location_cache
if _bjobs_location_cache != None:
return _bjobs_location_cache
load_config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'blah_load_config.sh')
if os.path.exists(load_config_path) and os.access(load_config_path, os.R_OK):
cmd = "/bin/bash -c 'source %s && echo $lsf_binpath/bjobs'" % load_config_path
else:
cmd = 'which bjobs'
child_stdout = os.popen(cmd)
output = child_stdout.read()
output = to_str(output)
location = output.split("\n")[0].strip()
if child_stdout.close():
raise Exception("Unable to determine bjobs location: %s" % output)
_bjobs_location_cache = location
return location
job_id_re = re.compile("Job <([0-9]+)>")
exec_host_re = re.compile("Started [0-9]+ Task\(s\) on Host\(s\) ([\w< >]+),")
status_re = re.compile("Status <(\w+)>")
exit_status_re = re.compile("Exited [\w ]+ (-?[0-9]+). The CPU")
status_mapping = {"PEND": 1, "RUN": 2, "EXIT": 4, "DONE": 4, "PSUSP": 5, "USUSP": 5, "SSUSP": 5}
def parse_bjobs_fd(fd):
"""
Parse the stdout fd of "bjobs -UF" into a python dictionary containing
the information we need.
"""
job_info = {}
cur_job_id = None
cur_job_info = {}
for line in fd:
line = line.strip()
m = job_id_re.search(line)
if m:
if cur_job_id:
job_info[cur_job_id] = cur_job_info
cur_job_id = m.group(1)
#print cur_job_id, line
cur_job_info = {"BatchJobId": '"%s"' % cur_job_id.split(".")[0]}
if cur_job_id == None:
continue
m = status_re.search(line)
if m:
status = status_mapping.get(m.group(1), 0)
if status != 0:
cur_job_info["JobStatus"] = str(status)
cur_job_info["JobStatusInfo"] = m.group(1)
continue
m = exec_host_re.search(line)
if m:
worker_set = set(m.group(1).translate(None, "<>").split(" "))
cur_job_info["WorkerNode"] = '"%s"' % ' '.join(worker_set)
continue
if cur_job_info["JobStatusInfo"] == "RUN":
cur_job_info["ExitCode"] = ' 0'
elif cur_job_info["JobStatusInfo"] == "EXIT":
m = exit_status_re.search(line)
if m:
cur_job_info["ExitCode"] = ' %s' % m.group(1)
continue
if cur_job_id:
job_info[cur_job_id] = cur_job_info
return job_info
def job_dict_to_string(info):
result = ["%s=%s;" % (i[0], i[1]) for i in info.items()]
return "[" + " ".join(result) + " ]"
def fill_cache(cache_location):
log("Starting query to fill cache.")
results = bjobs()
log("Finished query to fill cache.")
(fd, filename) = tempfile.mkstemp(dir = "/var/tmp")
# Open the file with a proper python file object
f = os.fdopen(fd, "w")
writer = csv.writer(f, delimiter='\t')
try:
try:
for key, val in results.items():
key = key.split(".")[0]
str_val = binascii.b2a_hex(pickle.dumps(val))
if str is not bytes:
str_val = str_val.decode()
writer.writerow([key, str_val])
os.fsync(fd)
except:
os.unlink(filename)
raise
finally:
f.close()
os.rename(filename, cache_location)
# Create the cluster_type file
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "bjobs_cache_%s" % username)
cluster_type_file = os.path.join(cache_dir, "cluster_type")
(fd, filename) = tempfile.mkstemp(dir = "/var/tmp")
global _cluster_type_cache
if which("bhist"):
os.write(fd, "lsf")
_cluster_type_cache = "lsf"
else:
log("Unable to find cluster type")
os.close(fd)
os.rename(filename, cluster_type_file)
global launchtime
launchtime = time.time()
cache_line_re = re.compile("([0-9]+[\.\w\-]+):\s+(.+)")
def cache_to_status(jobid, fd):
reader = csv.reader(fd, delimiter='\t')
for row in reader:
if row[0] == jobid:
bytes_val = row[1]
if str is not bytes:
bytes_val = bytes_val.encode()
return pickle.loads(binascii.a2b_hex(bytes_val))
def check_cache(jobid, recurse=True):
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "bjobs_cache_%s" % username)
if recurse:
try:
s = os.stat(cache_dir)
except OSError as oe:
if oe.errno != 2:
raise
os.mkdir(cache_dir, 0o755)
s = os.stat(cache_dir)
if s.st_uid != uid:
raise Exception("Unable to check cache because it is owned by UID %d" % s.st_uid)
cache_location = os.path.join(cache_dir, "blahp_results_cache")
try:
fd = open(cache_location, "r+")
except IOError as ie:
if ie.errno != 2:
raise
# Create an empty file so we can hold the file lock
fd = open(cache_location, "w+")
ExclusiveLock(fd)
# If someone grabbed the lock between when we opened and tried to
# acquire, they may have filled the cache
if os.stat(cache_location).st_size == 0:
fill_cache(cache_location)
fd.close()
if recurse:
return check_cache(jobid, recurse=False)
else:
return None
ExclusiveLock(fd)
s = os.fstat(fd.fileno())
if s.st_uid != uid:
raise Exception("Unable to check cache file because it is owned by UID %d" % s.st_uid)
if (s.st_size == 0) or (launchtime - s.st_mtime > cache_timeout):
# If someone filled the cache between when we opened the file and
# grabbed the lock, we may not need to fill the cache.
s2 = os.stat(cache_location)
if (s2.st_size == 0) or (launchtime - s2.st_mtime > cache_timeout):
fill_cache(cache_location)
if recurse:
return check_cache(jobid, recurse=False)
else:
return None
return cache_to_status(jobid, fd)
def main():
initLog()
# Accept the optional -w argument, but ignore it
if len(sys.argv) == 2:
jobid_arg = sys.argv[1]
elif len(sys.argv) == 3 and sys.argv[1] == "-w":
jobid_arg = sys.argv[2]
else:
print("1Usage: lsf_status.sh lsf/<date>/<jobid>")
return 1
jobid = jobid_arg.split("/")[-1].split(".")[0]
log("Checking cache for jobid %s" % jobid)
cache_contents = None
try:
cache_contents = check_cache(jobid)
except Exception as e:
msg = "1ERROR: Internal exception, %s" % str(e)
log(msg)
#print msg
if not cache_contents:
log("Jobid %s not in cache; querying LSF" % jobid)
results = bjobs(jobid)
log("Finished querying LSF for jobid %s" % jobid)
if not results or jobid not in results:
log("1ERROR: Unable to find job %s" % jobid)
print("1ERROR: Unable to find job %s" % jobid)
else:
log("0%s" % job_dict_to_string(results[jobid]))
print("0%s" % job_dict_to_string(results[jobid]))
else:
log("Jobid %s in cache." % jobid)
log("0%s" % job_dict_to_string(cache_contents))
if cache_contents["JobStatus"] == '4' or cache_contents["JobStatus"] == '3':
finished_job_stats = get_finished_job_stats(jobid)
cache_contents.update(finished_job_stats)
print("0%s" % job_dict_to_string(cache_contents))
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except SystemExit:
raise
except Exception as e:
exc_traceback = sys.exc_info()[2]
tb = traceback.extract_tb(exc_traceback)
log(traceback.format_exc())
print("1ERROR: {0}: {1} (file {2}, line {3})".format(e.__class__.__name__, str(e).replace("\n", "\\n"),
tb[-1].filename, tb[-1].lineno))
sys.exit(0)
| htcondor/htcondor | src/blahp/src/scripts/lsf_status.py | Python | apache-2.0 | 21,206 | [
"Brian"
] | 9a99061639c1c975c971d8d5d65a40a0aa5af6d3858577c63bdc79ba971ce542 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.