text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Mar 18, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 18, 2012"
import unittest
import os
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class BorgQueenTest(unittest.TestCase):
def setUp(self):
drone = VaspToComputedEntryDrone()
self.queen = BorgQueen(drone, test_dir, 1)
def test_get_data(self):
data = self.queen.get_data()
self.assertEqual(len(data), 2)
def test_load_data(self):
drone = VaspToComputedEntryDrone()
queen = BorgQueen(drone)
queen.load_data(os.path.join(test_dir, "assimilated.json"))
self.assertEqual(len(queen.get_data()), 1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/apps/borg/tests/test_queen.py
|
Python
|
mit
| 1,133
|
[
"pymatgen"
] |
d01ad9dc8a26c5857b20c668984657f6c4dd1a1e9c7a09a0104c24a463b4e824
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Protein'
db.create_table('neuroelectro_protein', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('gene', self.gf('django.db.models.fields.CharField')(max_length=20)),
('name', self.gf('django.db.models.fields.CharField')(max_length=300)),
('allenid', self.gf('django.db.models.fields.IntegerField')()),
('entrezid', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('neuroelectro', ['Protein'])
# Adding M2M table for field synonyms on 'Protein'
db.create_table('neuroelectro_protein_synonyms', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('protein', models.ForeignKey(orm['neuroelectro.protein'], null=False)),
('proteinsyn', models.ForeignKey(orm['neuroelectro.proteinsyn'], null=False))
))
db.create_unique('neuroelectro_protein_synonyms', ['protein_id', 'proteinsyn_id'])
# Adding M2M table for field in_situ_expts on 'Protein'
db.create_table('neuroelectro_protein_in_situ_expts', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('protein', models.ForeignKey(orm['neuroelectro.protein'], null=False)),
('insituexpt', models.ForeignKey(orm['neuroelectro.insituexpt'], null=False))
))
db.create_unique('neuroelectro_protein_in_situ_expts', ['protein_id', 'insituexpt_id'])
# Adding model 'InSituExpt'
db.create_table('neuroelectro_insituexpt', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('imageseriesid', self.gf('django.db.models.fields.IntegerField')()),
('plane', self.gf('django.db.models.fields.CharField')(max_length=20)),
('valid', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('neuroelectro', ['InSituExpt'])
# Adding M2M table for field regionexprs on 'InSituExpt'
db.create_table('neuroelectro_insituexpt_regionexprs', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('insituexpt', models.ForeignKey(orm['neuroelectro.insituexpt'], null=False)),
('regionexpr', models.ForeignKey(orm['neuroelectro.regionexpr'], null=False))
))
db.create_unique('neuroelectro_insituexpt_regionexprs', ['insituexpt_id', 'regionexpr_id'])
# Adding model 'BrainRegion'
db.create_table('neuroelectro_brainregion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=500)),
('abbrev', self.gf('django.db.models.fields.CharField')(max_length=10)),
('isallen', self.gf('django.db.models.fields.BooleanField')(default=False)),
('allenid', self.gf('django.db.models.fields.IntegerField')(default=0, null=True)),
('treedepth', self.gf('django.db.models.fields.IntegerField')(null=True)),
('color', self.gf('django.db.models.fields.CharField')(max_length=10, null=True)),
))
db.send_create_signal('neuroelectro', ['BrainRegion'])
# Adding model 'RegionExpr'
db.create_table('neuroelectro_regionexpr', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('val', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal('neuroelectro', ['RegionExpr'])
# Adding model 'Neuron'
db.create_table('neuroelectro_neuron', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=500)),
('nlex_id', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
))
db.send_create_signal('neuroelectro', ['Neuron'])
# Adding M2M table for field synonyms on 'Neuron'
db.create_table('neuroelectro_neuron_synonyms', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('neuron', models.ForeignKey(orm['neuroelectro.neuron'], null=False)),
('neuronsyn', models.ForeignKey(orm['neuroelectro.neuronsyn'], null=False))
))
db.create_unique('neuroelectro_neuron_synonyms', ['neuron_id', 'neuronsyn_id'])
# Adding M2M table for field regions on 'Neuron'
db.create_table('neuroelectro_neuron_regions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('neuron', models.ForeignKey(orm['neuroelectro.neuron'], null=False)),
('brainregion', models.ForeignKey(orm['neuroelectro.brainregion'], null=False))
))
db.create_unique('neuroelectro_neuron_regions', ['neuron_id', 'brainregion_id'])
# Adding model 'NeuronSyn'
db.create_table('neuroelectro_neuronsyn', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('term', self.gf('django.db.models.fields.CharField')(max_length=500)),
))
db.send_create_signal('neuroelectro', ['NeuronSyn'])
# Adding model 'ProteinSyn'
db.create_table('neuroelectro_proteinsyn', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('term', self.gf('django.db.models.fields.CharField')(max_length=500)),
))
db.send_create_signal('neuroelectro', ['ProteinSyn'])
def backwards(self, orm):
# Deleting model 'Protein'
db.delete_table('neuroelectro_protein')
# Removing M2M table for field synonyms on 'Protein'
db.delete_table('neuroelectro_protein_synonyms')
# Removing M2M table for field in_situ_expts on 'Protein'
db.delete_table('neuroelectro_protein_in_situ_expts')
# Deleting model 'InSituExpt'
db.delete_table('neuroelectro_insituexpt')
# Removing M2M table for field regionexprs on 'InSituExpt'
db.delete_table('neuroelectro_insituexpt_regionexprs')
# Deleting model 'BrainRegion'
db.delete_table('neuroelectro_brainregion')
# Deleting model 'RegionExpr'
db.delete_table('neuroelectro_regionexpr')
# Deleting model 'Neuron'
db.delete_table('neuroelectro_neuron')
# Removing M2M table for field synonyms on 'Neuron'
db.delete_table('neuroelectro_neuron_synonyms')
# Removing M2M table for field regions on 'Neuron'
db.delete_table('neuroelectro_neuron_regions')
# Deleting model 'NeuronSyn'
db.delete_table('neuroelectro_neuronsyn')
# Deleting model 'ProteinSyn'
db.delete_table('neuroelectro_proteinsyn')
models = {
'neuroelectro.brainregion': {
'Meta': {'object_name': 'BrainRegion'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'neuroelectro.insituexpt': {
'Meta': {'object_name': 'InSituExpt'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imageseriesid': ('django.db.models.fields.IntegerField', [], {}),
'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'neuroelectro.neuron': {
'Meta': {'object_name': 'Neuron'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.neuronsyn': {
'Meta': {'object_name': 'NeuronSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.protein': {
'Meta': {'object_name': 'Protein'},
'allenid': ('django.db.models.fields.IntegerField', [], {}),
'entrezid': ('django.db.models.fields.IntegerField', [], {}),
'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'})
},
'neuroelectro.proteinsyn': {
'Meta': {'object_name': 'ProteinSyn'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'neuroelectro.regionexpr': {
'Meta': {'object_name': 'RegionExpr'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'val': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['neuroelectro']
|
neuroelectro/neuroelectro_org
|
neuroelectro/south_migrations/0001_initial.py
|
Python
|
gpl-2.0
| 11,097
|
[
"NEURON"
] |
84e149d92a89cf841ef7ddb95e8094726ab5bd983874325e17260e3a6fbf5841
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPreprocesscore(RPackage):
"""A collection of pre-processing functions
A library of core preprocessing routines."""
homepage = "https://bioconductor.org/packages/preprocessCore"
git = "https://git.bioconductor.org/packages/preprocessCore.git"
version('1.52.1', commit='91de4ab67315dc2af68554ae3c48823f4b1ea8ac')
version('1.46.0', commit='8cfc3938c1b08424587f437ed6cd2ec43512500e')
version('1.44.0', commit='dc1dc61fc562aaff3fd9b11ab0d48c2d6b3a5b81')
version('1.42.0', commit='2e3a8baeacfaa1526d51252642772ea951015bba')
version('1.40.0', commit='969bb0e5cbd63d569502ae4e6eaadc1e216646dd')
version('1.38.1', commit='c58cb4c720eda0f1c733b989b14912093a7c5fbc')
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-preprocesscore/package.py
|
Python
|
lgpl-2.1
| 934
|
[
"Bioconductor"
] |
d42cfb5582a1cdc0a8f9f7236058b590465431933129c64b5df286bfd28bc020
|
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "Donovan Parks"
__copyright__ = "Copyright 2014"
__credits__ = ["Donovan Parks"]
__license__ = "GPL3"
__maintainer__ = "Donovan Parks"
__email__ = "donovan.parks@gmail.com"
__status__ = "Development"
import os
import sys
import logging
import biolib.seq_io as seq_io
import biolib.seq_tk as seq_tk
from biolib.common import concatenate_files
from biolib.taxonomy import Taxonomy
from biolib.external.blast import Blast
from biolib.external.diamond import Diamond
from biolib.external.muscle import Muscle
from biolib.external.mafft import Mafft
from biolib.external.fasttree import FastTree
from biolib.external.execute import check_dependencies
from mingle.arb_parser import ArbParser
from mingle.common import validate_seq_ids
from numpy import (percentile as np_percentile,
mean as np_mean)
import dendropy
class BlastWorkflow():
"""Blast-based workflow for building a gene tree."""
def __init__(self, cpus):
"""Initialization.
Parameters
----------
cpus : int
Number of cpus to use during homology search.
"""
check_dependencies(['mafft',
'muscle',
'FastTreeMP',
'raxmlHPC-PTHREADS-SSE3',
'blastp',
't2t',
'seqmagick',
'trimal'])
self.logger = logging.getLogger('timestamp')
self.cpus = cpus
def extract_homologs_and_context(self, homologs, db_file, output_file):
"""Extract homologs sequences from database file, and local gene context.
This function extract sequences information for each
homolog and writes this to file for downstream processing.
In addition, it determines the local gene context for each
gene. Specifically, it saves the annotations for the
3 genes prior to and after a given gene.
This function assumes the database is sorted according
to the order genes are identified on each contig.
Parameters
----------
homologs : iterable
Unique identifiers of sequences to extract
db_file : str
Fasta file with sequences.
output_file : str
File to write homologs.
Returns
-------
dict
d[seq_id] -> list of annotations for pre-context genes
dict
d[seq_id] -> list of annotations for post-context genes
"""
gene_precontext = {}
gene_postcontext = {}
if len(homologs) == 0:
return gene_precontext, gene_postcontext
if type(homologs) is not set:
homologs = set(homologs)
fout = open(output_file, 'w')
local_context = [('unknown~unknown_x', None)] * 3
post_context_counter = {}
for seq_id, seq, annotation in seq_io.read_fasta_seq(db_file, keep_annotation=True):
if seq_id in homologs:
fout.write('>' + seq_id + ' ' + annotation + '\n')
fout.write(seq + '\n')
gene_precontext[seq_id] = list(local_context)
post_context_counter[seq_id] = 3
# record 3 precontext genes
local_context[0] = local_context[1]
local_context[1] = local_context[2]
local_context[2] = (seq_id, annotation)
# record 3 postcontext genes
if len(post_context_counter):
key_to_remove = None
for seq_id, count in post_context_counter.iteritems():
count -= 1
if count == -1:
gene_postcontext[seq_id] = list(local_context)
key_to_remove = seq_id
else:
post_context_counter[seq_id] = count
if key_to_remove:
post_context_counter.pop(key_to_remove)
fout.close()
# filter gene context to contain only genes on the same scaffold
gene_precontext = self._filter_gene_context(gene_precontext)
gene_postcontext = self._filter_gene_context(gene_postcontext)
return gene_precontext, gene_postcontext
def _filter_gene_context(self, gene_context):
"""Filter gene context to contain only genes on the same scaffold.
This function assumes sequence identifies have the following format:
<genome_id>~<scaffold_id>_<gene_#> [gtdb_taxonomy] [NCBI organism name] [annotation]
Parameters
----------
gene_context : d[seq_id] -> [(seq_id, annotation), ..., (seq_id, annotation)]
Gene context.
Returns
-------
dict: d[seq_id] -> [annotation, ..., annotation]
Filtered to contain only annotations from the same scaffold.
"""
filtered_gene_context = {}
for seq_id, context in gene_context.iteritems():
_genome_id, gene_id = seq_id.split('~')
scaffold_id = gene_id[0:gene_id.rfind('_')]
filtered_context = []
for local_seq_id, annotation in context:
_local_genome_id, local_gene_id = local_seq_id.split('~')
local_scaffold_id = local_gene_id[0:local_gene_id.rfind('_')]
# strip organism name and IMG gene id
annotation = annotation[0:annotation.rfind('[')]
annotation = annotation[0:annotation.rfind('[')].strip()
if scaffold_id == local_scaffold_id:
filtered_context.append(annotation)
filtered_gene_context[seq_id] = filtered_context
return filtered_gene_context
def create_arb_metadata(self,
homologs,
msa_output,
taxonomy,
metadata,
gene_precontext,
gene_postcontext,
output_file):
"""Create metadata file suitable for import into ARB.
Parameters
----------
homologs : d[seq_id] -> namedtuple of BlastHit information
BLAST results for identified homologs.
msa_output : str
Fasta file with aligned homologs.
taxonomy : d[genome_id] -> list of taxa
Taxonomic information for genomes.
metadata : d[key] - string
Additional metadata to write to ARB file.
gene_precontext : d[seq_id] -> list of annotations for pre-context genes
Annotation for genes preceding a gene.
gene_postcontext: d[seq_id] -> list of annotations for post-context genes
Annotation for genes following a gene.
output_file : str
File to write metadata information.
"""
arb_metadata_list = []
for seq_id, seq, annotation in seq_io.read_seq(msa_output, keep_annotation=True):
if '~' in seq_id:
genome_id, scaffold_gene_id = seq_id.split('~')
else:
scaffold_gene_id = seq_id
genome_id = ''
arb_metadata = {}
arb_metadata['db_name'] = seq_id
arb_metadata['genome_id'] = genome_id
arb_metadata['scaffold_id'] = scaffold_gene_id[0:scaffold_gene_id.rfind('_')]
arb_metadata['scaffold_gene_id'] = scaffold_gene_id
arb_metadata['gtdb_tax_string'] = ';'.join(taxonomy.get(genome_id, ''))
arb_metadata['aligned_seq'] = seq
for k, v in metadata.iteritems():
arb_metadata[k] = v
arb_metadata['gene_precontext'] = ' -> '.join(gene_precontext.get(seq_id, []))
arb_metadata['gene_postcontext'] = ' <- '.join(gene_postcontext.get(seq_id, []))
hit_info = homologs.get(seq_id, None)
if hit_info:
arb_metadata['blast_evalue'] = '%.1g' % hit_info.evalue
arb_metadata['blast_bitscore'] = '%.1f' % hit_info.bitscore
arb_metadata['blast_perc_identity'] = '%.1f' % hit_info.perc_identity
arb_metadata['blast_subject_perc_alignment_len'] = '%.1f' % hit_info.subject_perc_aln_len
arb_metadata['blast_query_perc_alignment_len'] = '%.1f' % hit_info.query_perc_aln_len
arb_metadata['blast_query_id'] = hit_info.query_id
if annotation:
annotation_split = annotation.split('] [')
if len(annotation_split) == 3:
# assume format is [gtdb_taxonomy] [NCBI organism name] [annotation]
gtdb_taxonomy, organism_name, gene_annotation = annotation_split
gtdb_taxonomy = gtdb_taxonomy.replace('[', '')
gene_annotation = gene_annotation.replace(']', '')
else:
# no idea what the format is, so just save the annotation
gene_annotation = annotation
organism_name = ''
gtdb_taxonomy = ''
arb_metadata['gene_annotation'] = gene_annotation
arb_metadata['organism'] = organism_name
arb_metadata['full_name'] = organism_name
arb_metadata_list.append(arb_metadata)
fout = open(output_file, 'w')
arb_parser = ArbParser()
arb_parser.write(arb_metadata_list, fout)
fout.close()
def _gene_distribution(self, seq_file):
"""Calculate length distribution of sequences."""
gene_lens = []
for seq_id, seq in seq_io.read_seq(seq_file):
gene_lens.append(len(seq))
p10, p50, p90 = np_percentile(gene_lens, [10, 50, 90])
return np_mean(gene_lens), max(gene_lens), min(gene_lens), p10, p50, p90
def _remove_stop_codons(self, input_file, output_file):
"""Remove stop codons at end of sequences."""
fout = open(output_file, 'w')
for seq_id, seq, annotation in seq_io.read_seq(input_file, keep_annotation=True):
fout.write('>%s %s\n' % (seq_id, annotation))
if seq[-1] == '*':
seq = seq[0:-1]
fout.write('%s\n' % seq)
fout.close()
def run(self, query_proteins,
db_file, custom_db_file,
taxonomy_file, custom_taxonomy_file,
evalue, per_identity, per_aln_len, max_matches, homology_search,
min_per_taxa, consensus, min_per_bp, use_trimAl, restrict_taxon,
msa_program, tree_program, prot_model,
output_dir):
"""Infer a gene tree for homologs genes identified by blast.
Workflow for inferring a gene tree from sequences identified as being
homologs to a set of query proteins. Homologs are identified using BLASTP
and a set of user-defined parameters.
Parameters
----------
query_proteins : str
Fasta file containing query proteins.
db_file : str
BLAST database of reference proteins.
custom_db_file : str
Custom database of proteins.
taxonomy_file : str
Taxonomic assignment of each reference genomes.
custom_taxonomy_file : str
Taxonomic assignment of genomes in custom database.
evalue : float
E-value threshold used to define homolog.
per_identity : float
Percent identity threshold used to define a homolog.
per_aln_len : float
Alignment length threshold used to define a homolog.
max_matches : int
Maximum matches per query protein.
metadata : dict[genome_id] -> metadata dictionary
Metadata for genomes.
homology_search : str
Type of homology search to perform.
min_per_taxa : float
Minimum percentage of taxa required to retain a column.
consensus : float
Minimum percentage of the same amino acid required to retain column.
min_per_bp : float
Minimum percentage of base pairs required to keep trimmed sequence.
use_trimAl : boolean
Filter columns using trimAl.
restrict_taxon : str
Restrict alignment to specific taxonomic group (e.g., k__Archaea).
msa_program : str
Program to use for multiple sequence alignment ['mafft', 'muscle'].
tree_program : str
Program to use for tree inference ['fasttree', 'raxml'].
prot_model : str
Protein substitution model for tree inference ['WAG', 'LG'].
output_dir : str
Directory to store results.
"""
if not os.path.exists(query_proteins):
self.logger.error('Missing query file: %s' % query_proteins)
sys.exit()
if not os.path.exists(taxonomy_file):
self.logger.error('Missing taxonomy file: %s' % taxonomy_file)
sys.exit()
if not os.path.exists(db_file):
self.logger.error('Missing database file: %s' % db_file)
sys.exit()
# validate query sequence names for use with mingle
validate_seq_ids(query_proteins)
# read taxonomy file
self.logger.info('Reading taxonomy file.')
taxonomy = Taxonomy().read(taxonomy_file)
if custom_taxonomy_file:
custom_taxonomy = Taxonomy().read(custom_taxonomy_file)
taxonomy.update(custom_taxonomy)
# report distribution of query genes
mean_len, max_len, min_len, p10, p50, p90 = self._gene_distribution(query_proteins)
self.logger.info('Query gene lengths: min, mean, max = %d, %.1f, %d | p10, p50, p90 = %.1f, %.1f, %.1f' % (
min_len, mean_len, max_len,
p10, p50, p90))
# identify homologs using BLASTP
self.logger.info('Identifying homologs using %s.' % homology_search)
blast = Blast(self.cpus)
blast_output = os.path.join(output_dir, 'reference_hits.tsv')
if homology_search == 'diamond':
diamond = Diamond(self.cpus)
diamond.blastp(query_proteins, db_file, evalue, per_identity, per_aln_len, max_matches, blast_output, output_fmt='custom')
else:
blast.blastp(query_proteins, db_file, blast_output, evalue, max_matches, output_fmt='custom', task=homology_search)
homologs = blast.identify_homologs(blast_output, evalue, per_identity, per_aln_len)
self.logger.info('Identified %d homologs in reference database.' % len(homologs))
custom_homologs = None
if custom_db_file:
custom_blast_output = os.path.join(output_dir, 'custom_hits.tsv')
if homology_search == 'diamond':
diamond = Diamond(self.cpus)
diamond.blastp(query_proteins, custom_db_file, evalue, per_identity, per_aln_len, max_matches, custom_blast_output, output_fmt='custom')
else:
blast.blastp(query_proteins, custom_db_file, custom_blast_output, evalue, max_matches, output_fmt='custom', task=homology_search)
custom_homologs = blast.identify_homologs(custom_blast_output, evalue, per_identity, per_aln_len)
self.logger.info('Identified %d homologs in custom database.' % len(custom_homologs))
# restrict homologs to specific taxonomic group
if restrict_taxon:
self.logger.info('Restricting homologs to %s.' % restrict_taxon)
restricted_homologs = {}
for query_id, hit in homologs.iteritems():
genome_id = hit.subject_id.split('~')[0]
if restrict_taxon in taxonomy[genome_id]:
restricted_homologs[query_id] = hit
self.logger.info('%d of %d homologs in reference database are from the specified group.' % (len(restricted_homologs), len(homologs)))
homologs = restricted_homologs
if len(homologs) == 0:
self.logger.error('Too few homologs were identified. Gene tree cannot be inferred.')
sys.exit()
# extract homologs
self.logger.info('Extracting homologs and determining local gene context.')
db_homologs_tmp = os.path.join(output_dir, 'homologs_db.tmp')
gene_precontext, gene_postcontext = self.extract_homologs_and_context(homologs.keys(), db_file, db_homologs_tmp)
homolog_ouput_tmp = os.path.join(output_dir, 'homologs.faa.tmp')
if custom_homologs:
custom_db_homologs_tmp = os.path.join(output_dir, 'custom_homologs_db.tmp')
custom_gene_precontext, custom_gene_postcontext = self.extract_homologs_and_context(custom_homologs.keys(), custom_db_file, custom_db_homologs_tmp)
gene_precontext.update(custom_gene_precontext)
gene_postcontext.update(custom_gene_postcontext)
homologs.update(custom_homologs)
concatenate_files([query_proteins, db_homologs_tmp, custom_db_homologs_tmp], homolog_ouput_tmp)
os.remove(custom_db_homologs_tmp)
else:
concatenate_files([query_proteins, db_homologs_tmp], homolog_ouput_tmp)
os.remove(db_homologs_tmp)
# remove stop codons
homolog_ouput = os.path.join(output_dir, 'homologs.faa')
self._remove_stop_codons(homolog_ouput_tmp, homolog_ouput)
os.remove(homolog_ouput_tmp)
# report gene length distribution of homologs
mean_len, max_len, min_len, p10, p50, p90 = self._gene_distribution(homolog_ouput)
self.logger.info('Homolog gene lengths: min, mean, max = %d, %.1f, %d | p10, p50, p90 = %.1f, %.1f, %.1f' % (
min_len, mean_len, max_len,
p10, p50, p90))
# infer multiple sequence alignment
self.logger.info('Inferring multiple sequence alignment with %s.' % msa_program)
if msa_program == 'mafft':
mafft = Mafft(self.cpus)
msa_output = os.path.join(output_dir, 'homologs.aligned.faa')
msa_log = os.path.join(output_dir, 'mafft.log')
mafft.run(homolog_ouput, msa_output, msa_log)
elif msa_program == 'muscle':
muscle = Muscle()
msa_output = os.path.join(output_dir, 'homologs.aligned.faa')
msa_log = os.path.join(output_dir, 'muscle.log')
muscle.run(homolog_ouput, msa_output, msa_log)
# trim multiple sequence alignment
trimmed_msa_output = os.path.join(output_dir, 'homologs.trimmed.aligned.faa')
if use_trimAl:
self.logger.info('Using trimAl to filter poorly represented columns from alignment.')
# convert MSA to relaxed phylip format
phylip_msa_output = msa_output.replace('.faa', '.phyx')
cmd = 'seqmagick convert %s %s' % (msa_output, phylip_msa_output)
os.system(cmd)
tmp_output = os.path.join(output_dir, 'tmp.faa')
cmd = 'trimal -in %s -out %s -automated1 -fasta' % (phylip_msa_output, tmp_output)
os.system(cmd)
cmd = 'trimal -in %s -out %s -resoverlap 0.75 -seqoverlap %f' % (tmp_output, trimmed_msa_output, min_per_bp)
os.system(cmd)
seqs = seq_io.read_fasta(msa_output)
tmp_seqs = seq_io.read_fasta(tmp_output)
trimmed_seqs = seq_io.read_fasta(trimmed_msa_output)
self.logger.info('Trimmed alignment from %d to %d AA.' % (len(seqs.values()[0]), len(trimmed_seqs.values()[0])))
self.logger.info('%d of %d taxa were deemed to be too short and removed.' % (len(tmp_seqs)-len(trimmed_seqs), len(seqs)))
os.remove(tmp_output)
else:
self.logger.info('Trimming poorly represented columns from alignment.')
seqs = seq_io.read_fasta(msa_output, keep_annotation=True)
trimmed_seqs, pruned_seqs, min_taxa_filtered, consensus_filtered = seq_tk.trim_seqs(seqs,
min_per_taxa / 100.0,
consensus / 100.0,
min_per_bp / 100.0)
self.logger.info('Trimmed alignment from %d to %d AA (%d by minimum taxa percent, %d by consensus).' % (len(seqs.values()[0]),
len(trimmed_seqs.values()[0]),
min_taxa_filtered,
consensus_filtered))
self.logger.info('%d of %d taxa were deemed to be too short and removed.' % (len(pruned_seqs), len(seqs)))
if len(pruned_seqs) > 0:
prune_seqs_out = os.path.join(output_dir, 'filtered_seqs.too_short.txt')
self.logger.info('Pruned sequences written to %s.' % prune_seqs_out)
seq_io.write_fasta(pruned_seqs, prune_seqs_out)
if len(pruned_seqs) == len(seqs):
self.logger.error('Too many sequences were pruned. Gene tree cannot be inferred.')
sys.exit()
seq_io.write_fasta(trimmed_seqs, trimmed_msa_output)
# infer tree
if tree_program == 'fasttree':
self.logger.info('Inferring gene tree with FastTree using %s+GAMMA.' % prot_model)
fasttree = FastTree(multithreaded=(self.cpus > 1))
tree_unrooted_output = os.path.join(output_dir, 'homologs.unrooted.tree')
tree_log = os.path.join(output_dir, 'homologs.tree.log')
tree_output_log = os.path.join(output_dir, 'fasttree.log')
fasttree.run(trimmed_msa_output, 'prot', prot_model, tree_unrooted_output, tree_log, tree_output_log)
elif tree_program == 'raxml':
self.logger.info('Inferring gene tree with RAxML using PROTGAMMA%s.' % prot_model)
# create phylip MSA file
phylip_msa_file = trimmed_msa_output.replace('.faa', '.phyx')
cmd = 'seqmagick convert %s %s' % (trimmed_msa_output, phylip_msa_file)
os.system(cmd)
raxml_dir = os.path.abspath(os.path.join(output_dir, 'raxml'))
if not os.path.exists(raxml_dir):
os.makedirs(raxml_dir)
tree_output_log = os.path.join(output_dir, 'raxml.log')
cmd = 'raxmlHPC-PTHREADS-SSE3 -s %s -m PROTGAMMA%s -p 12345 -w %s -n %s -T %d > %s' % (phylip_msa_file,
prot_model,
raxml_dir,
'mingle',
self.cpus,
tree_output_log)
os.system(cmd)
tree_unrooted_output = os.path.join(raxml_dir, 'RAxML_bestTree.mingle')
# root tree at midpoint
self.logger.info('Rooting tree at midpoint.')
tree = dendropy.Tree.get_from_path(tree_unrooted_output, schema='newick', rooting="force-rooted", preserve_underscores=True)
if len(trimmed_seqs) > 2:
tree.reroot_at_midpoint(update_bipartitions=False)
tree_output = os.path.join(output_dir, 'homologs.rooted.tree')
tree.write_to_path(tree_output, schema='newick', suppress_rooting=True, unquoted_underscores=True)
# create tax2tree consensus map and decorate tree
self.logger.info('Decorating internal tree nodes with tax2tree.')
output_taxonomy_file = os.path.join(output_dir, 'taxonomy.tsv')
fout = open(output_taxonomy_file, 'w')
for homolog_id in homologs.keys():
genome_id = homolog_id.split('~')[0]
t = taxonomy.get(genome_id, None)
if t:
fout.write(homolog_id + '\t' + ';'.join(t) + '\n')
fout.close()
t2t_tree = os.path.join(output_dir, 'homologs.tax2tree.tree')
cmd = 't2t decorate -m %s -t %s -o %s' % (output_taxonomy_file, tree_output, t2t_tree)
os.system(cmd)
# setup metadata for ARB file
src_dir = os.path.dirname(os.path.realpath(__file__))
version_file = open(os.path.join(src_dir, 'VERSION'))
metadata = {}
metadata['mingle_version'] = version_file.read().strip()
metadata['mingle_query_proteins'] = query_proteins
metadata['mingle_db_file'] = db_file
metadata['mingle_taxonomy_file'] = taxonomy_file
metadata['mingle_blast_evalue'] = str(evalue)
metadata['mingle_blast_per_identity'] = str(per_identity)
metadata['mingle_blast_per_aln_len'] = str(per_aln_len)
metadata['mingle_blast_max_matches'] = str(max_matches)
metadata['mingle_homology_search'] = homology_search
metadata['mingle_msa_min_per_taxa'] = str(min_per_taxa)
metadata['mingle_msa_consensus'] = str(consensus)
metadata['mingle_msa_min_per_bp'] = str(min_per_bp)
metadata['mingle_msa_program'] = msa_program
metadata['mingle_tree_program'] = tree_program
metadata['mingle_tree_prot_model'] = prot_model
# create ARB metadata file
self.logger.info('Creating ARB metadata file.')
arb_metadata_file = os.path.join(output_dir, 'arb.metadata.txt')
self.create_arb_metadata(homologs, trimmed_msa_output, taxonomy,
metadata,
gene_precontext, gene_postcontext,
arb_metadata_file)
|
dparks1134/mingle
|
mingle/blast_workflow.py
|
Python
|
gpl-3.0
| 27,972
|
[
"BLAST"
] |
9ea6c1ae2165930daefd7e005e6c66e71ffbae465c21648b2c9f74b702033c20
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinx.ext.imgmath',
'matplotlib.sphinxext.plot_directive'
]
source_suffix = '.rst'
master_doc = 'index'
project = 'Javelin'
copyright = '2017, Ross Whitfield'
author = 'Ross Whitfield'
version = '0.1.0'
release = '0.1.0'
exclude_patterns = ['_build']
pygments_style = 'friendly'
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Javelindoc'
latex_documents = [
(master_doc, 'Javelin.tex', 'Javelin Documentation',
'Ross Whitfield', 'manual'),
]
intersphinx_mapping = {'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'xarray': ('https://xarray.pydata.org/en/stable/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'ase': ('https://wiki.fysik.dtu.dk/ase/', None),
'diffpy.Structure': ('https://www.diffpy.org/diffpy.structure/', None)}
autodoc_default_flags = ['members', 'undoc-members']
# Use legacy numpy printing. This fix is made to keep doctests functional.
try:
np.set_printoptions(legacy='1.13')
except TypeError:
pass
|
rosswhitfield/javelin
|
docs/conf.py
|
Python
|
mit
| 1,331
|
[
"ASE"
] |
83bd4bacb03ba6003eb6a39076035a03b4ae2988e8384ad8bd41777e127a4ae9
|
#!/usr/bin/env python
#
# Prepares Amazon network to run either vagrant-development or
# vagrant-production. Configures subnets, virtual private clouds,
# security groups, etc
#
# Currently is a one-way operation, you have to delete these
# manually. Visit https://console.aws.amazon.com/vpc and remove the
# VPC tagged TFB_Network and that should undo all changes made my
# this script (for production mode). For development mode, the only
# things created are a security group and a subnet, so find those in
# your standard EC2 console https://console.aws.amazon.com/ec2 and
# delete them manually
import subprocess
import json
import logging
import sys
log = logging.getLogger('aws')
nwtags = "Key=Project,Value=FrameworkBenchmarks Key=TFB_Role,Value=network"
def setup_vpc():
'''Sets up a Virtual Private Cloud to allow hosts to communicate'''
# Setup VPC
log.info("Creating a new Virtual Private Cloud...")
log.info(" See details at http://console.aws.amazon.com/vpc")
vpc = run_aws("create-vpc --cidr-block 172.16.0.0/16 --instance-tenancy default")
vpcid = vpc["Vpc"]["VpcId"]
run_aws("modify-vpc-attribute --vpc-id %s --enable-dns-support" % vpcid)
run_aws("modify-vpc-attribute --vpc-id %s --no-enable-dns-hostnames" % vpcid)
run_aws("create-tags --resources %s --tags %s Key=Name,Value=TFB_Network" % (vpcid, nwtags))
log.debug(run_aws("describe-vpcs --vpc-id %s" % vpcid, load=False))
# Setup internet gateway
log.info("Creating InternetGateway for the VPC...")
igw = run_aws("create-internet-gateway")
igwid = igw["InternetGateway"]["InternetGatewayId"]
run_aws("create-tags --resources %s --tags %s Key=Name,Value=TFB_Gateway" % (igwid, nwtags))
run_aws("attach-internet-gateway --internet-gateway-id %s --vpc-id %s" % (igwid, vpcid))
log.debug(run_aws("describe-internet-gateways --internet-gateway-ids %s" % igwid, load=False))
# Setup public subnet
# NOTE: We considered using a public and private subnet, but
# this requires us to launch an extra EC2 instance for the duration of the
# benchmark to handle the NAT between the public subnet and the private subnet,
# so the cost is quite high. Also, Internet traffic is only generated during
# framework setup stages (e.g. during software installation), not during the
# running of the benchmark.
# We chose to use a single public subnet and filter inbound traffic to prevent
# interference during the test
log.info("Creating subnet inside the VPC...")
pubsub = run_aws("create-subnet --vpc-id %s --cidr-block 172.16.0.0/24" % vpcid)
pubid = pubsub["Subnet"]["SubnetId"]
log.debug("Found subnet id: %s", pubid)
#run_aws("modify-subnet-attribute --subnet-id %s --map-public-ip-on-launch" % pubid)
run_aws("create-tags --resources %s --tags %s Key=Name,Value=TFB_Public" % (pubid, nwtags))
log.debug(run_aws("describe-subnets --subnet-ids %s" % pubid, load=False))
# Setup routing
log.info("Creating routing table for VPC...")
route = run_aws("describe-route-tables --filters Name=vpc-id,Values=%s" % vpcid)
routeid = route["RouteTables"][0]["RouteTableId"]
run_aws("create-tags --resources %s --tags %s Key=Name,Value=TFB_Routing" % (routeid, nwtags))
log.info(" Creating route to internet...")
run_aws("create-route --route-table-id %s --destination-cidr-block 0.0.0.0/0 --gateway-id %s" % (routeid, igwid))
log.info(" Associating routing table and subnet...")
run_aws("associate-route-table --route-table-id %s --subnet-id %s" % (routeid, pubid))
# Setup default security group for instances launched in the VPC
log.info("Creating default security group for VPC")
group = run_aws("create-security-group --group-name TFB_Security --vpc-id %s --description 'FrameworkBenchmarks security group'" % vpcid)
groupid = group["GroupId"]
run_aws("create-tags --resources %s --tags %s Key=Name,Value=TFB_Security" % (groupid, nwtags))
run_aws("authorize-security-group-ingress --group-id %s --protocol tcp --port 22 --cidr 0.0.0.0/0" % groupid)
# run_aws("authorize-security-group-egress --group-id %s --protocol -1 --cidr 0.0.0.0/0 --port all" % groupid)
run_aws("authorize-security-group-ingress --group-id %s --source-group %s --protocol -1 --port -1" % (groupid, groupid))
log.info("Complete.")
log.info(" Here are the environment variables you should use:")
print "export TFB_AWS_SUBNET=%s" % pubid
print "export TFB_AWS_SEC_GROUP=%s" % groupid
return vpcid
def unset_vpc(vpcid):
'''Doesn't work at the moment, we need to delete all the other items first'''
run_aws("delete-vpc --vpc-id %s" % vpcid)
def run_aws(command, prefix=True, load=True):
'''Runs an AWS command and returns the JSON
prefix: Should we prefix "aws ec2 " to your command
load: Should we auto-load the response JSON into a python object?
'''
if prefix:
command = "aws ec2 %s" % command
log.debug("Request : %s", command)
result = subprocess.check_output(command, shell=True)
log.debug("Response: %s", result)
if load and result != '':
try:
return json.loads(result)
except ValueError:
log.error("Could not parse result '%s' as JSON for command '%s'", result, command)
raise
else:
return result
if __name__ == "__main__":
args = sys.argv[1:]
logging.basicConfig(level=logging.INFO)
usage = '''Usage: setup_aws.py
Prepares Amazon network to run either vagrant-development
or vagrant-production. Configures subnets, virtual private
clouds, security groups, etc.
Outputs TFB_AWS_SEC_GROUP and TFB_AWS_SUBNET
Currently is a one-way operation, you have to delete these
manually. This script expects standard AWS environment
variables to exist e.g. AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY, AWS_DEFAULT_REGION
'''
if len(args) != 0:
print usage
sys.exit(1)
setup_vpc()
|
PermeAgility/FrameworkBenchmarks
|
deployment/vagrant-aws/setup_aws.py
|
Python
|
bsd-3-clause
| 5,828
|
[
"VisIt"
] |
e0eee4445962e2389d93c8e48eae82975b48304ae554ad5618c754b534bf4e5e
|
#
# rml_send_gui
# graphical interface for sending jobs to the Roland Modela
#
# Brian Mayton <bmayton@media.mit.edu>
# MIT 2011-2014
#
# (c) Massachusetts Institute of Technology 2011-2014
# Permission granted for experimental and personal use;
# license for commercial sale available from MIT.
# imports
from __future__ import with_statement
import serial
import sys
# import wx # Fiore Basile - Removed dependency
import threading
import time
import math
# global constants
RML_UNITS=40.0
SPEED_TRAVERSE = 15.0
# utility functions
def dist(x1, y1, z1, x2, y2, z2):
return math.sqrt(
pow(x1-x2, 2.0) +
pow(y1-y2, 2.0) +
pow(z1-z2, 2.0)
)
# Edited by Fiore on 10/06/2014
# to support callbacks so that info can be passed to the SockJS server
class RMLSender:
"""This class implements the parsing of RML files and sending to the
Modela."""
def __init__(self, port="/dev/ttyUSB0", target=None):
self.serialPort = port
self.serial = None
self.cmds = []
self.target = target
self.xr=[0,1]; self.yr=[0,1]; self.zr=[0,1]
self.paths = []
self.segments_done = []
self.traverses = []
self.traverses_done = []
self.speed_feed = 15.0
self.speed_plunge = 5.0
self.total_distance = 1.0
self.distance_milled = 0.0
self.total_time = 1.0
self.time_remaining = 1.0
self.time_start = None
self.current_cmd = ""
self.cur_cmd_start = time.time()
self.cur_cmd_duration = 0.0
self.running = False
self.thread = threading.Thread(target=self.thread_fn)
self.should_stop = threading.Event()
self.done = threading.Event()
self.aborted = threading.Event()
self.lock = threading.Lock()
def load_file(self, filename):
self.cmds = []
f = open(filename, "r")
data = f.read()
f.close()
self.cmds = data.split(";")
self.calculate_metrics()
## FB: Added callback
if self.target:
self.target.callback('millingInfo', self)
def calculate_metrics(self):
paths = []
traverses = []
cur_path = []
xmin, ymin, zmin = 99999999, 999999999, 999999999
xmax, ymax, zmax = 0, 0, 0
xpos, ypos, zpos = 0, 0, 0
zup, zdown = 0, 0
speeds, speedz = 0.0, 0.0
total_distance = 0.0
total_time = 0.0
in_path = False
for cmd in self.cmds:
cmd=cmd.strip()
try:
if cmd[:3] == "!PZ":
params = cmd[3:].split(',')
if len(params) < 2:
params = cmd[3:].split(' ')
zup = int(params[1])
zdown = int(params[0])
print "pen: %d up, %d down" % (zup, zdown)
elif cmd[:2] == "VS":
params = cmd[2:].split(',')
if len(params) < 2:
params = cmd[2:].split(' ')
speeds = float(params[0])
print "xy speed: %f mm/s" % (speeds)
elif cmd[:3] == "!VZ":
params = cmd[3:].split(',')
if len(params) < 2:
params = cmd[3:].split(' ')
speedz = float(params[0])
print "z speed: %f mm/s" % (speedz)
elif cmd[:2] == "PU":
params = cmd[2:].split(',')
if len(params) < 2:
params = cmd[2:].split(' ')
if len(params) < 2:
continue
x = int(params[0])
y = int(params[1])
z = zup
d = dist(xpos, ypos, zpos, x, y, z)
total_distance += d
total_time += d / RML_UNITS / SPEED_TRAVERSE
traverses.append([(xpos, ypos, zpos), (x, y, z)])
xpos = x; ypos = y; zpos = z;
xmax = max(x, xmax); ymax = max(y, ymax); zmax = max(z, zmax)
xmin = min(x, xmin); ymin = min(y, ymin); zmin = min(z, zmin)
if len(cur_path) > 0:
paths.append(cur_path)
cur_path = []
elif cmd[:1] == "Z":
params = cmd[1:].split(',')
if len(params) < 2:
params = cmd[1:].split(' ')
x = int(params[0])
y = int(params[1])
z = int(params[2])
dist_xy = math.hypot(xpos-x, ypos-y) / RML_UNITS
dist_z = float(zpos-z) / RML_UNITS
time_xy = dist_xy / speeds
time_z = dist_z / speedz
total_time += max(time_xy, time_z)
total_distance += dist(xpos, ypos, zpos, x, y, z)
xpos = x; ypos = y; zpos = z;
xmax = max(x, xmax); ymax = max(y, ymax); zmax = max(z, zmax)
xmin = min(x, xmin); ymin = min(y, ymin); zmin = min(z, zmin)
cur_path.append((x, y, z))
except:
print "ignoring: %s" % cmd
pass
self.paths = paths
self.traverses = traverses
self.speed_feed = speeds
self.speed_plunge = speedz
self.xr = (xmin, xmax)
self.yr = (ymin, ymax)
self.zr = (zmin, zmax)
self.total_distance = total_distance
if self.total_distance == 0: self.total_distance = 1.0
self.total_time = total_time
if self.total_time == 0: self.total_time = 1.0
self.time_remaining = total_time
def start(self):
## Edit FB: we open the serial only when we start the thread
self.serial = serial.Serial(self.serialPort, baudrate=9600, rtscts=True, timeout=0)
self.running = True
self.time_start = time.time()
self.thread.start()
def abort(self):
if self.running and not self.done.isSet():
self.should_stop.set()
def thread_fn(self):
xmax, ymax, zmax = 0, 0, 0
xpos, ypos, zpos = 0, 0, 0
zup, zdown = 0, 0
speeds, speedz = 0.0, 0.0
with self.lock:
cmds = self.cmds
for cmd in cmds:
if self.target:
self.target.callback('progress', self)
cmd = cmd.strip()
if self.should_stop.isSet():
cmd="PA;PA;!VZ10;!PZ0,100;PU0,0;PD0,0;!MC0;"
self.serial.write(cmd)
self.serial.close()
self.aborted.set()
if self.target:
self.target.callback('done', self)
return
cmd=cmd.strip()
with self.lock:
self.current_cmd = cmd
self.cur_cmd_start = time.time()
self.cur_cmd_duration = 0.0
while (self.serial.getDSR() != True):
time.sleep(0.001)
self.serial.write(cmd)
try:
if cmd[:3] == "!PZ":
params = cmd[3:].split(',')
if len(params) < 2:
params = cmd[3:].split(' ')
zup = int(params[1])
zdown = int(params[0])
elif cmd[:2] == "VS":
params = cmd[2:].split(',')
if len(params) < 2:
params = cmd[2:].split(' ')
speeds = float(params[0])
with self.lock:
self.speed_feed = speeds
elif cmd[:3] == "!VZ":
params = cmd[3:].split(',')
if len(params) < 2:
params = cmd[3:].split(' ')
speedz = float(params[0])
with self.lock:
self.speed_plunge = speedz
elif cmd[:2] == "PU":
params = cmd[2:].split(',')
if len(params) < 2:
params = cmd[2:].split(' ')
if len(params) < 2:
continue
x = int(params[0])
y = int(params[1])
z = zup
d = dist(xpos, ypos, zpos, x, y, z)
t = d / RML_UNITS / SPEED_TRAVERSE
with self.lock:
self.cur_cmd_duration = t
self.time_remaining -= t
self.distance_milled += d
self.traverses_done.append(((xpos, ypos, zpos), (x, y, z)))
xpos = x; ypos = y; zpos = z;
elif cmd[:1] == "Z":
params = cmd[1:].split(',')
if len(params) < 2:
params = cmd[1:].split(' ')
x = int(params[0])
y = int(params[1])
z = int(params[2])
dist_xy = math.hypot(xpos-x, ypos-y) / RML_UNITS
dist_z = float(zpos-z) / RML_UNITS
time_xy = dist_xy / speeds
time_z = dist_z / speedz
t = max(time_xy, time_z)
with self.lock:
self.cur_cmd_duration = t
self.time_remaining -= t
self.distance_milled += dist(xpos, ypos, zpos, x, y, z)
self.segments_done.append(((xpos, ypos, zpos), (x, y, z)))
xpos = x; ypos = y; zpos = z;
time.sleep(self.cur_cmd_duration)
except:
print "ignoring: %s" % cmd
self.done.set()
if self.target:
self.target.callback('done', self)
|
fibasile/pi.mill
|
pimill/server/rml_send.py
|
Python
|
mit
| 10,028
|
[
"Brian"
] |
eb5549c94a3bcb082ed9f1434fd5bb41741120ae1fad3dee007c7e7600c4ac5a
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2010 Brian G. Matherly
# Copyright (C) 2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Class handling language-specific displaying of names.
Specific symbols for parts of a name are defined:
====== ===============================================================
Symbol Description
====== ===============================================================
't' title
'f' given (first names)
'l' full surname (lastname)
'c' callname
'x' nick name, call, or otherwise first first name (common name)
'i' initials of the first names
'm' primary surname (main)
'0m' primary surname prefix
'1m' primary surname surname
'2m' primary surname connector
'y' pa/matronymic surname (father/mother) - assumed unique
'0y' pa/matronymic prefix
'1y' pa/matronymic surname
'2y' pa/matronymic connector
'o' surnames without pa/matronymic and primary
'r' non primary surnames (rest)
'p' list of all prefixes
'q' surnames without prefixes and connectors
's' suffix
'n' nick name
'g' family nick name
====== ===============================================================
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
import logging
LOG = logging.getLogger(".gramps.gen")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..const import ARABIC_COMMA, ARABIC_SEMICOLON, GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..lib.name import Name
from ..lib.nameorigintype import NameOriginType
try:
from ..config import config
WITH_GRAMPS_CONFIG=True
except ImportError:
WITH_GRAMPS_CONFIG=False
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_FIRSTNAME = 4
_SURNAME_LIST = 5
_SUFFIX = 6
_TITLE = 7
_TYPE = 8
_GROUP = 9
_SORT = 10
_DISPLAY = 11
_CALL = 12
_NICK = 13
_FAMNICK = 14
_SURNAME_IN_LIST = 0
_PREFIX_IN_LIST = 1
_PRIMARY_IN_LIST = 2
_TYPE_IN_LIST = 3
_CONNECTOR_IN_LIST = 4
_ORIGINPATRO = NameOriginType.PATRONYMIC
_ORIGINMATRO = NameOriginType.MATRONYMIC
_ACT = True
_INA = False
_F_NAME = 0 # name of the format
_F_FMT = 1 # the format string
_F_ACT = 2 # if the format is active
_F_FN = 3 # name format function
_F_RAWFN = 4 # name format raw function
PAT_AS_SURN = False
#-------------------------------------------------------------------------
#
# Local functions
#
#-------------------------------------------------------------------------
# Because of occurring in an exec(), this couldn't be in a lambda:
# we sort names first on longest first, then last letter first, this to
# avoid translations of shorter terms which appear in longer ones, eg
# namelast may not be mistaken with name, so namelast must first be
# converted to %k before name is converted.
##def _make_cmp(a, b): return -cmp((len(a[1]),a[1]), (len(b[1]), b[1]))
def _make_cmp_key(a): return (len(a[1]),a[1]) # set reverse to True!!
#-------------------------------------------------------------------------
#
# NameDisplayError class
#
#-------------------------------------------------------------------------
class NameDisplayError(Exception):
"""
Error used to report that the name display format string is invalid.
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return self.value
#-------------------------------------------------------------------------
#
# Functions to extract data from raw lists (unserialized objects)
#
#-------------------------------------------------------------------------
def _raw_full_surname(raw_surn_data_list):
"""method for the 'l' symbol: full surnames"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_primary_surname(raw_surn_data_list):
"""method for the 'm' symbol: primary surname"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
#if there are multiple surnames, return the primary. If there
#is only one surname, then primary has little meaning, and we
#assume a pa/matronymic should not be given as primary as it
#normally is defined independently
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
result = "%s %s %s" % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_primary_surname_only(raw_surn_data_list):
"""method to obtain the raw primary surname data, so this returns a string
"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_SURNAME_IN_LIST]
return ''
def _raw_primary_prefix_only(raw_surn_data_list):
"""method to obtain the raw primary surname data"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_PREFIX_IN_LIST]
return ''
def _raw_primary_conn_only(raw_surn_data_list):
"""method to obtain the raw primary surname data"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_CONNECTOR_IN_LIST]
return ''
def _raw_patro_surname(raw_surn_data_list):
"""method for the 'y' symbol: patronymic surname"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s %s %s" % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_surname_only(raw_surn_data_list):
"""method for the '1y' symbol: patronymic surname only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_prefix_only(raw_surn_data_list):
"""method for the '0y' symbol: patronymic prefix only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_conn_only(raw_surn_data_list):
"""method for the '2y' symbol: patronymic conn only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_nonpatro_surname(raw_surn_data_list):
"""method for the 'o' symbol: full surnames without pa/matronymic or
primary
"""
result = ""
for raw_surn_data in raw_surn_data_list:
if ((not raw_surn_data[_PRIMARY_IN_LIST]) and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINPATRO and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINMATRO):
result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_nonprimary_surname(raw_surn_data_list):
"""method for the 'r' symbol: nonprimary surnames"""
result = ''
for raw_surn_data in raw_surn_data_list:
if not raw_surn_data[_PRIMARY_IN_LIST]:
result = "%s %s %s %s" % (result, raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
def _raw_prefix_surname(raw_surn_data_list):
"""method for the 'p' symbol: all prefixes"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_single_surname(raw_surn_data_list):
"""method for the 'q' symbol: surnames without prefix and connectors"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split()).strip()
def cleanup_name(namestring):
"""Remove too long white space due to missing name parts,
so "a b" becomes "a b" and "a , b" becomes "a, b"
"""
parts = namestring.split()
if not parts:
return ""
result = parts[0]
for val in parts[1:]:
if len(val) == 1 and val in [',', ';', ':',
ARABIC_COMMA, ARABIC_SEMICOLON]:
result += val
else:
result += ' ' + val
return result
#-------------------------------------------------------------------------
#
# NameDisplay class
#
#-------------------------------------------------------------------------
class NameDisplay:
"""
Base class for displaying of Name instances.
Property:
*default_format*
the default name format to use
*pas_as_surn*
if only one surname, see if pa/ma should be considered as 'the' surname.
"""
format_funcs = {}
raw_format_funcs = {}
def __init__(self, xlocale=glocale):
"""
Initialize the NameDisplay class.
If xlocale is passed in (a GrampsLocale), then
the translated script will be returned instead.
:param xlocale: allow selection of the displayer script
:type xlocale: a GrampsLocale instance
"""
global WITH_GRAMPS_CONFIG
global PAT_AS_SURN
# Translators: needed for Arabic, ignore otherwise
COMMAGLYPH = xlocale.translation.gettext(',')
self.STANDARD_FORMATS = [
(Name.DEF, _("Default format (defined by Gramps preferences)"),
'', _ACT),
(Name.LNFN, _("Surname, Given Suffix"),
'%l' + COMMAGLYPH + ' %f %s', _ACT),
(Name.FN, _("Given"),
'%f', _ACT),
(Name.FNLN, _("Given Surname Suffix"),
'%f %l %s', _ACT),
# primary name primconnector other, given pa/matronynic suffix, primprefix
# Translators: long string, have a look at Preferences dialog
(Name.LNFNP, _("Main Surnames, Given Patronymic Suffix Prefix"),
'%1m %2m %o' + COMMAGLYPH + ' %f %1y %s %0m', _ACT),
# DEPRECATED FORMATS
(Name.PTFN, _("Patronymic, Given"),
'%y' + COMMAGLYPH + ' %s %f', _INA),
]
self.LNFN_STR = "%s" + COMMAGLYPH + " %s %s"
self.name_formats = {}
if WITH_GRAMPS_CONFIG:
self.default_format = config.get('preferences.name-format')
if self.default_format == 0:
self.default_format = Name.LNFN
config.set('preferences.name-format', self.default_format)
#if only one surname, see if pa/ma should be considered as
# 'the' surname.
PAT_AS_SURN = config.get('preferences.patronimic-surname')
config.connect('preferences.patronimic-surname', self.change_pa_sur)
else:
self.default_format = Name.LNFN
PAT_AS_SURN = False
#preinit the name formats, this should be updated with the data
#in the database once a database is loaded
self.set_name_format(self.STANDARD_FORMATS)
def change_pa_sur(self, *args):
""" How to handle single patronymic as surname is changed"""
global PAT_AS_SURN
PAT_AS_SURN = config.get('preferences.patronimic-surname')
def get_pat_as_surn(self):
global PAT_AS_SURN
return PAT_AS_SURN
def _format_fn(self, fmt_str):
return lambda x: self.format_str(x, fmt_str)
def _format_raw_fn(self, fmt_str):
return lambda x: self.format_str_raw(x, fmt_str)
def _raw_lnfn(self, raw_data):
result = self.LNFN_STR % (_raw_full_surname(raw_data[_SURNAME_LIST]),
raw_data[_FIRSTNAME],
raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fnln(self, raw_data):
result = "%s %s %s" % (raw_data[_FIRSTNAME],
_raw_full_surname(raw_data[_SURNAME_LIST]),
raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fn(self, raw_data):
result = raw_data[_FIRSTNAME]
return ' '.join(result.split())
def clear_custom_formats(self):
self.name_formats = {num: value
for num, value in self.name_formats.items()
if num >= 0}
def set_name_format(self, formats):
raw_func_dict = {
Name.LNFN : self._raw_lnfn,
Name.FNLN : self._raw_fnln,
Name.FN : self._raw_fn,
}
for (num, name, fmt_str, act) in formats:
func = self._format_fn(fmt_str)
func_raw = raw_func_dict.get(num, self._format_raw_fn(fmt_str))
self.name_formats[num] = (name, fmt_str, act, func, func_raw)
self.set_default_format(self.get_default_format())
def add_name_format(self, name, fmt_str):
for num in self.name_formats:
if fmt_str in self.name_formats.get(num):
return num
num = -1
while num in self.name_formats:
num -= 1
self.set_name_format([(num, name, fmt_str,_ACT)])
return num
def edit_name_format(self, num, name, fmt_str):
self.set_name_format([(num, name, fmt_str,_ACT)])
if self.default_format == num:
self.set_default_format(num)
def del_name_format(self, num):
try:
del self.name_formats[num]
except:
pass
def set_default_format(self, num):
if num not in self.name_formats:
num = Name.LNFN
# if user sets default format to the Gramps default format,
# then we select LNFN as format.
if num == Name.DEF:
num = Name.LNFN
self.default_format = num
self.name_formats[Name.DEF] = (self.name_formats[Name.DEF][_F_NAME],
self.name_formats[Name.DEF][_F_FMT],
self.name_formats[Name.DEF][_F_ACT],
self.name_formats[num][_F_FN],
self.name_formats[num][_F_RAWFN])
def get_default_format(self):
return self.default_format
def set_format_inactive(self, num):
try:
self.name_formats[num] = (self.name_formats[num][_F_NAME],
self.name_formats[num][_F_FMT],
_INA,
self.name_formats[num][_F_FN],
self.name_formats[num][_F_RAWFN])
except:
pass
def get_name_format(self, also_default=False,
only_custom=False,
only_active=True):
"""
Returns a list of name formats as tuples on
the form (index, name,fmt_str,act).
The will contain standard formats followed
by custom formats both in ascending order on
their indices.
"""
custom_formats = sorted([
(index, name, format_string, active)
for index, (name, format_string, active, *rest) in self.name_formats.items()
if index < 0 and (not only_active or active)
])
if only_custom:
return custom_formats
standard_formats = sorted([
(index, name, format_string, active)
for index, (name, format_string, active, *rest) in self.name_formats.items()
if index >= 0 and (also_default or index) and (not only_active or active)
])
return standard_formats + custom_formats
def _is_format_valid(self, num):
try:
if not self.name_formats[num][_F_ACT]:
num = 0
except:
num = 0
return num
#-------------------------------------------------------------------------
def _gen_raw_func(self, format_str):
"""The job of building the name from a format string is rather
expensive and it is called lots and lots of times. So it is worth
going to some length to optimise it as much as possible.
This method constructs a new function that is specifically written
to format a name given a particular format string. This is worthwhile
because the format string itself rarely changes, so by caching the new
function and calling it directly when asked to format a name to the
same format string again we can be as quick as possible.
The new function is of the form::
def fn(raw_data):
return "%s %s %s" % (raw_data[_TITLE],
raw_data[_FIRSTNAME],
raw_data[_SUFFIX])
Specific symbols for parts of a name are defined (keywords given):
't' : title = title
'f' : given = given (first names)
'l' : surname = full surname (lastname)
'c' : call = callname
'x' : common = nick name, call, otherwise first first name (common name)
'i' : initials = initials of the first names
'm' : primary = primary surname (main)
'0m': primary[pre]= prefix primary surname (main)
'1m': primary[sur]= surname primary surname (main)
'2m': primary[con]= connector primary surname (main)
'y' : patronymic = pa/matronymic surname (father/mother) - assumed unique
'0y': patronymic[pre] = prefix "
'1y': patronymic[sur] = surname "
'2y': patronymic[con] = connector "
'o' : notpatronymic = surnames without pa/matronymic and primary
'r' : rest = non primary surnames
'p' : prefix = list of all prefixes
'q' : rawsurnames = surnames without prefixes and connectors
's' : suffix = suffix
'n' : nickname = nick name
'g' : familynick = family nick name
"""
# we need the names of each of the variables or methods that are
# called to fill in each format flag.
# Dictionary is "code": ("expression", "keyword", "i18n-keyword")
d = {"t": ("raw_data[_TITLE]", "title",
_("title", "Person")),
"f": ("raw_data[_FIRSTNAME]", "given",
_("given")),
"l": ("_raw_full_surname(raw_data[_SURNAME_LIST])", "surname",
_("surname")),
"s": ("raw_data[_SUFFIX]", "suffix",
_("suffix")),
"c": ("raw_data[_CALL]", "call",
_("call", "Name")),
"x": ("(raw_data[_NICK] or raw_data[_CALL] or raw_data[_FIRSTNAME].split(' ')[0])",
"common",
_("common", "Name")),
"i": ("''.join([word[0] +'.' for word in ('. ' +" +
" raw_data[_FIRSTNAME]).split()][1:])",
"initials",
_("initials")),
"m": ("_raw_primary_surname(raw_data[_SURNAME_LIST])",
"primary",
_("primary", "Name")),
"0m": ("_raw_primary_prefix_only(raw_data[_SURNAME_LIST])",
"primary[pre]",
_("primary[pre]")),
"1m": ("_raw_primary_surname_only(raw_data[_SURNAME_LIST])",
"primary[sur]",
_("primary[sur]")),
"2m": ("_raw_primary_conn_only(raw_data[_SURNAME_LIST])",
"primary[con]",
_("primary[con]")),
"y": ("_raw_patro_surname(raw_data[_SURNAME_LIST])", "patronymic",
_("patronymic")),
"0y": ("_raw_patro_prefix_only(raw_data[_SURNAME_LIST])", "patronymic[pre]",
_("patronymic[pre]")),
"1y": ("_raw_patro_surname_only(raw_data[_SURNAME_LIST])", "patronymic[sur]",
_("patronymic[sur]")),
"2y": ("_raw_patro_conn_only(raw_data[_SURNAME_LIST])", "patronymic[con]",
_("patronymic[con]")),
"o": ("_raw_nonpatro_surname(raw_data[_SURNAME_LIST])", "notpatronymic",
_("notpatronymic")),
"r": ("_raw_nonprimary_surname(raw_data[_SURNAME_LIST])",
"rest",
_("rest", "Remaining names")),
"p": ("_raw_prefix_surname(raw_data[_SURNAME_LIST])",
"prefix",
_("prefix")),
"q": ("_raw_single_surname(raw_data[_SURNAME_LIST])",
"rawsurnames",
_("rawsurnames")),
"n": ("raw_data[_NICK]", "nickname",
_("nickname")),
"g": ("raw_data[_FAMNICK]", "familynick",
_("familynick")),
}
args = "raw_data"
return self._make_fn(format_str, d, args)
def _gen_cooked_func(self, format_str):
"""The job of building the name from a format string is rather
expensive and it is called lots and lots of times. So it is worth
going to some length to optimise it as much as possible.
This method constructs a new function that is specifically written
to format a name given a particular format string. This is worthwhile
because the format string itself rarely changes, so by caching the new
function and calling it directly when asked to format a name to the
same format string again we can be as quick as possible.
The new function is of the form::
def fn(first, raw_surname_list, suffix, title, call,):
return "%s %s" % (first,suffix)
Specific symbols for parts of a name are defined (keywords given):
't' : title = title
'f' : given = given (first names)
'l' : surname = full surname (lastname)
'c' : call = callname
'x' : common = nick name, call, or otherwise first first name (common name)
'i' : initials = initials of the first names
'm' : primary = primary surname (main)
'0m': primary[pre]= prefix primary surname (main)
'1m': primary[sur]= surname primary surname (main)
'2m': primary[con]= connector primary surname (main)
'y' : patronymic = pa/matronymic surname (father/mother) - assumed unique
'0y': patronymic[pre] = prefix "
'1y': patronymic[sur] = surname "
'2y': patronymic[con] = connector "
'o' : notpatronymic = surnames without pa/matronymic and primary
'r' : rest = non primary surnames
'p' : prefix = list of all prefixes
'q' : rawsurnames = surnames without prefixes and connectors
's' : suffix = suffix
'n' : nickname = nick name
'g' : familynick = family nick name
"""
# we need the names of each of the variables or methods that are
# called to fill in each format flag.
# Dictionary is "code": ("expression", "keyword", "i18n-keyword")
d = {"t": ("title", "title",
_("title", "Person")),
"f": ("first", "given",
_("given")),
"l": ("_raw_full_surname(raw_surname_list)", "surname",
_("surname")),
"s": ("suffix", "suffix",
_("suffix")),
"c": ("call", "call",
_("call", "Name")),
"x": ("(nick or call or first.split(' ')[0])", "common",
_("common", "Name")),
"i": ("''.join([word[0] +'.' for word in ('. ' + first).split()][1:])",
"initials",
_("initials")),
"m": ("_raw_primary_surname(raw_surname_list)", "primary",
_("primary", "Name")),
"0m":("_raw_primary_prefix_only(raw_surname_list)",
"primary[pre]", _("primary[pre]")),
"1m":("_raw_primary_surname_only(raw_surname_list)",
"primary[sur]",_("primary[sur]")),
"2m":("_raw_primary_conn_only(raw_surname_list)",
"primary[con]", _("primary[con]")),
"y": ("_raw_patro_surname(raw_surname_list)", "patronymic",
_("patronymic")),
"0y":("_raw_patro_prefix_only(raw_surname_list)", "patronymic[pre]",
_("patronymic[pre]")),
"1y":("_raw_patro_surname_only(raw_surname_list)", "patronymic[sur]",
_("patronymic[sur]")),
"2y":("_raw_patro_conn_only(raw_surname_list)", "patronymic[con]",
_("patronymic[con]")),
"o": ("_raw_nonpatro_surname(raw_surname_list)", "notpatronymic",
_("notpatronymic")),
"r": ("_raw_nonprimary_surname(raw_surname_list)", "rest",
_("rest", "Remaining names")),
"p": ("_raw_prefix_surname(raw_surname_list)", "prefix",
_("prefix")),
"q": ("_raw_single_surname(raw_surname_list)", "rawsurnames",
_("rawsurnames")),
"n": ("nick", "nickname",
_("nickname")),
"g": ("famnick", "familynick",
_("familynick")),
}
args = "first,raw_surname_list,suffix,title,call,nick,famnick"
return self._make_fn(format_str, d, args)
def format_str(self, name, format_str):
return self._format_str_base(name.first_name, name.surname_list,
name.suffix, name.title,
name.call, name.nick, name.famnick,
format_str)
def format_str_raw(self, raw_data, format_str):
"""
Format a name from the raw name list. To make this as fast as possible
this uses :func:`_gen_raw_func` to generate a new method for each new
format_string.
Is does not call :meth:`_format_str_base` because it would introduce an
extra method call and we need all the speed we can squeeze out of this.
"""
func = self.__class__.raw_format_funcs.get(format_str)
if func is None:
func = self._gen_raw_func(format_str)
self.__class__.raw_format_funcs[format_str] = func
return func(raw_data)
def _format_str_base(self, first, surname_list, suffix, title, call,
nick, famnick, format_str):
"""
Generates name from a format string.
The following substitutions are made:
'%t' : title
'%f' : given (first names)
'%l' : full surname (lastname)
'%c' : callname
'%x' : nick name, call, or otherwise first first name (common name)
'%i' : initials of the first names
'%m' : primary surname (main)
'%0m': prefix primary surname (main)
'%1m': surname primary surname (main)
'%2m': connector primary surname (main)
'%y' : pa/matronymic surname (father/mother) - assumed unique
'%0y': prefix "
'%1y': surname "
'%2y': connector "
'%o' : surnames without patronymic
'%r' : non-primary surnames (rest)
'%p' : list of all prefixes
'%q' : surnames without prefixes and connectors
'%s' : suffix
'%n' : nick name
'%g' : family nick name
The capital letters are substituted for capitalized name components.
The %% is substituted with the single % character.
All the other characters in the fmt_str are unaffected.
"""
func = self.__class__.format_funcs.get(format_str)
if func is None:
func = self._gen_cooked_func(format_str)
self.__class__.format_funcs[format_str] = func
try:
s = func(first, [surn.serialize() for surn in surname_list],
suffix, title, call, nick, famnick)
except (ValueError, TypeError,):
raise NameDisplayError("Incomplete format string")
return s
#-------------------------------------------------------------------------
def primary_surname(self, name):
global PAT_AS_SURN
nrsur = len(name.surname_list)
sur = name.get_primary_surname()
if not PAT_AS_SURN and nrsur <= 1 and \
(sur.get_origintype().value == _ORIGINPATRO
or sur.get_origintype().value == _ORIGINMATRO):
return ''
return sur.get_surname()
def sort_string(self, name):
return "%-25s%-30s%s" % (self.primary_surname(name),
name.first_name, name.suffix)
def sorted(self, person):
"""
Return a text string representing the :class:`~.person.Person`
instance's :class:`~.name.Name` in a manner that should be used for
displaying a sortedname.
:param person: :class:`~.person.Person` instance that contains the
:class:`~.name.Name` that is to be displayed. The
primary name is used for the display.
:type person: :class:`~.person.Person`
:returns: Returns the :class:`~.person.Person` instance's name
:rtype: str
"""
name = person.get_primary_name()
return self.sorted_name(name)
def sorted_name(self, name):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for sorting the name in a list.
:param name: :class:`~.name.Name` instance that is to be displayed.
:type name: :class:`~.name.Name`
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
num = self._is_format_valid(name.sort_as)
return self.name_formats[num][_F_FN](name)
def truncate(self, full_name, max_length=15, elipsis="..."):
name_out = ""
if len(full_name) <= max_length:
name_out = full_name
else:
last_space = full_name.rfind(" ", max_length)
if (last_space) > -1:
name_out = full_name[:last_space]
else:
name_out = full_name[:max_length]
name_out += " " + elipsis
return name_out
def raw_sorted_name(self, raw_data):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for sorting the name in a list.
:param name: raw unserialized data of name that is to be displayed.
:type name: tuple
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
num = self._is_format_valid(raw_data[_SORT])
return self.name_formats[num][_F_RAWFN](raw_data)
def display(self, person):
"""
Return a text string representing the :class:`~.person.Person`
instance's :class:`~.name.Name` in a manner that should be used for
normal displaying.
:param person: :class:`~.person.Person` instance that contains the
:class:`~.name.Name` that is to be displayed. The
primary name is used for the display.
:type person: :class:`~.person.Person`
:returns: Returns the :class:`~.person.Person` instance's name
:rtype: str
"""
name = person.get_primary_name()
return self.display_name(name)
def display_format(self, person, num):
"""
Return a text string representing the L{gen.lib.Person} instance's
L{Name} using num format.
@param person: L{gen.lib.Person} instance that contains the
L{Name} that is to be displayed. The primary name is used for
the display.
@type person: L{gen.lib.Person}
@param num: num of the format to be used, as return by
name_displayer.add_name_format('name','format')
@type num: int
@returns: Returns the L{gen.lib.Person} instance's name
@rtype: str
"""
name = person.get_primary_name()
return self.name_formats[num][_F_FN](name)
def display_formal(self, person):
"""
Return a text string representing the :class:`~.person.Person`
instance's :class:`~.name.Name` in a manner that should be used for
formal displaying.
:param person: :class:`~.person.Person` instance that contains the
:class:`~.name.Name` that is to be displayed. The
primary name is used for the display.
:type person: :class:`~.person.Person`
:returns: Returns the :class:`~.person.Person` instance's name
:rtype: str
"""
# FIXME: At this time, this is just duplicating display() method
name = person.get_primary_name()
return self.display_name(name)
def display_name(self, name):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for normal displaying.
:param name: :class:`~.name.Name` instance that is to be displayed.
:type name: :class:`~.name.Name`
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
if name is None:
return ""
num = self._is_format_valid(name.display_as)
return self.name_formats[num][_F_FN](name)
def raw_display_name(self, raw_data):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for normal displaying.
:param name: raw unserialized data of name that is to be displayed.
:type name: tuple
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
num = self._is_format_valid(raw_data[_DISPLAY])
return self.name_formats[num][_F_RAWFN](raw_data)
def display_given(self, person):
return self.format_str(person.get_primary_name(),'%f')
def name_grouping(self, db, person):
"""
Return the name under which to group this person. This is defined as:
1. if group name is defined on primary name, use that
2. if group name is defined for the primary surname of the primary
name, use that
3. use primary surname of primary name otherwise
"""
return self.name_grouping_name(db, person.primary_name)
def name_grouping_name(self, db, pn):
"""
Return the name under which to group. This is defined as:
1. if group name is defined, use that
2. if group name is defined for the primary surname, use that
3. use primary surname itself otherwise
:param pn: :class:`~.name.Name` object
:type pn: :class:`~.name.Name` instance
:returns: Returns the groupname string representation
:rtype: str
"""
if pn.group_as:
return pn.group_as
return db.get_name_group_mapping(pn.get_primary_surname().get_surname())
def name_grouping_data(self, db, pn):
"""
Return the name under which to group. This is defined as:
1. if group name is defined, use that
2. if group name is defined for the primary surname, use that
3. use primary surname itself otherwise
4. if no primary surname, do we have a ma/patronymic surname ?
in this case, group name will be the ma/patronymic name.
:param pn: raw unserialized data of name
:type pn: tuple
:returns: Returns the groupname string representation
:rtype: str
"""
if pn[_GROUP]:
return pn[_GROUP]
name = pn[_GROUP]
if not name:
# if we have no primary surname, perhaps we have a
# patronymic/matronynic name ?
srnme = pn[_ORIGINPATRO]
surname = []
for _surname in srnme:
if (_surname[_TYPE_IN_LIST][0] == _ORIGINPATRO
or _surname[_TYPE_IN_LIST][0] == _ORIGINMATRO):
# Yes, we have one.
surname = [_surname]
# name1 is the ma/patronymic name.
name1 = _raw_patro_surname_only(surname)
if name1 and len(srnme) == 1:
name = db.get_name_group_mapping(name1)
if not name:
name = db.get_name_group_mapping(_raw_primary_surname_only(
pn[_SURNAME_LIST]))
return name
def _make_fn(self, format_str, d, args):
"""
Create the name display function and handles dependent
punctuation.
"""
# d is a dict: dict[code] = (expr, word, translated word)
# First, go through and do internationalization-based
# key-word replacement. Just replace ikeywords with
# %codes (ie, replace "irstnamefay" with "%f", and
# "IRSTNAMEFAY" for %F)
if (len(format_str) > 2 and
format_str[0] == format_str[-1] == '"'):
pass
else:
d_keys = [(code, _tuple[2]) for code, _tuple in d.items()]
d_keys.sort(key=_make_cmp_key, reverse=True) # reverse on length and by ikeyword
for (code, ikeyword) in d_keys:
exp, keyword, ikeyword = d[code]
format_str = format_str.replace(ikeyword, "%"+ code)
format_str = format_str.replace(ikeyword.title(), "%"+ code)
format_str = format_str.replace(ikeyword.upper(), "%"+ code.upper())
# Next, go through and do key-word replacement.
# Just replace keywords with
# %codes (ie, replace "firstname" with "%f", and
# "FIRSTNAME" for %F)
if (len(format_str) > 2 and
format_str[0] == format_str[-1] == '"'):
pass
else:
d_keys = [(code, _tuple[1]) for code, _tuple in d.items()]
d_keys.sort(key=_make_cmp_key, reverse=True) # reverse sort on length and by keyword
# if in double quotes, just use % codes
for (code, keyword) in d_keys:
exp, keyword, ikeyword = d[code]
format_str = format_str.replace(keyword, "%"+ code)
format_str = format_str.replace(keyword.title(), "%"+ code)
format_str = format_str.replace(keyword.upper(), "%"+ code.upper())
# Get lower and upper versions of codes:
codes = list(d.keys()) + [c.upper() for c in d]
# Next, list out the matching patterns:
# If it starts with "!" however, treat the punctuation verbatim:
if len(format_str) > 0 and format_str[0] == "!":
patterns = ["%(" + ("|".join(codes)) + ")", # %s
]
format_str = format_str[1:]
else:
patterns = [
",\\W*\"%(" + ("|".join(codes)) + ")\"", # ,\W*"%s"
",\\W*\\(%(" + ("|".join(codes)) + ")\\)", # ,\W*(%s)
",\\W*%(" + ("|".join(codes)) + ")", # ,\W*%s
"\"%(" + ("|".join(codes)) + ")\"", # "%s"
"_%(" + ("|".join(codes)) + ")_", # _%s_
"\\(%(" + ("|".join(codes)) + ")\\)", # (%s)
"%(" + ("|".join(codes)) + ")", # %s
]
new_fmt = format_str
# replace the specific format string flags with a
# flag that works in standard python format strings.
new_fmt = re.sub("|".join(patterns), "%s", new_fmt)
# replace special meaning codes we need to have verbatim in output
if (len(new_fmt) > 2 and new_fmt[0] == new_fmt[-1] == '"'):
new_fmt = new_fmt.replace('\\', r'\\')
new_fmt = new_fmt[1:-1].replace('"', r'\"')
else:
new_fmt = new_fmt.replace('\\', r'\\')
new_fmt = new_fmt.replace('"', '\\\"')
# find each format flag in the original format string
# for each one we find the variable name that is needed to
# replace it and add this to a list. This list will be used to
# generate the replacement tuple.
# This compiled pattern should match all of the format codes.
pat = re.compile("|".join(patterns))
param = ()
mat = pat.search(format_str)
while mat:
match_pattern = mat.group(0) # the matching pattern
# prefix, code, suffix:
p, code, s = re.split("%(.)", match_pattern)
if code in '0123456789':
code = code + s[0]
s = s[1:]
field = d[code.lower()][0]
if code.isupper():
field += ".upper()"
if p == '' and s == '':
param = param + (field,)
else:
param = param + ("ifNotEmpty(%s,'%s','%s')" % (field, p, s), )
mat = pat.search(format_str, mat.end())
s = """
def fn(%s):
def ifNotEmpty(str,p,s):
if str == '':
return ''
else:
return p + str + s
return cleanup_name("%s" %% (%s))""" % (args, new_fmt, ",".join(param))
try:
exec(s) in globals(), locals()
return locals()['fn']
except:
LOG.error("\n" + 'Wrong name format string %s' % new_fmt
+"\n" + ("ERROR, Edit Name format in Preferences->Display to correct")
+"\n" + _('Wrong name format string %s') % new_fmt
+"\n" + ("ERROR, Edit Name format in Preferences->Display to correct")
)
def errfn(*arg):
return _("ERROR, Edit Name format in Preferences")
return errfn
displayer = NameDisplay()
|
gramps-project/gramps
|
gramps/gen/display/name.py
|
Python
|
gpl-2.0
| 45,879
|
[
"Brian"
] |
ded1903dc3a5cd5bf960a20c04e93170288878728f234e926f4bcbbd5a2936b7
|
import base64
import zlib
import json
from WebAppDIRAC.Lib.WebHandler import WebHandler, WErr, WOK, asyncGen
from DIRAC.Core.Utilities import DEncode
from DIRAC.Core.DISET.ThreadConfig import ThreadConfig
from DIRAC.FrameworkSystem.Client.UserProfileClient import UserProfileClient
class UPHandler( WebHandler ):
AUTH_PROPS = "authenticated"
__tc = ThreadConfig()
def prepare( self ):
if not self.isRegisteredUser():
raise WErr( 401, "Not a registered user" )
self.set_header( "Pragma", "no-cache" )
self.set_header( "Cache-Control", "max-age=0, no-store, no-cache, must-revalidate" )
# Do not use the defined user setup. Use the web one to show the same profile independenly of
# user setup
self.__tc.setSetup( False )
def __getUP( self ):
try:
obj = self.request.arguments[ 'obj' ][-1]
app = self.request.arguments[ 'app' ][-1]
except KeyError as excp:
raise WErr( 400, "Missing %s" % excp )
return UserProfileClient( "Web/%s/%s" % ( obj, app ) )
@asyncGen
def web_saveAppState( self ):
up = self.__getUP()
try:
name = self.request.arguments[ 'name' ][-1]
state = self.request.arguments[ 'state' ][-1]
except KeyError as excp:
raise WErr( 400, "Missing %s" % excp )
data = base64.b64encode( zlib.compress( DEncode.encode( state ), 9 ) )
# before we save the state (modify the state) we have to remeber the actual access: ReadAccess and PublishAccess
result = yield self.threadTask( up.getVarPermissions, name )
if result['OK']:
access = result['Value']
else:
access = {'ReadAccess': 'USER', 'PublishAccess': 'USER'} # this is when the application/desktop does not exists.
result = yield self.threadTask( up.storeVar, name, data )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
# change the access to the application/desktop
result = yield self.threadTask( up.setVarPermissions, name, access )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
self.set_status( 200 )
self.finish()
@asyncGen
def web_makePublicAppState( self ):
up = self.__getUP()
try:
name = self.request.arguments[ 'name' ][-1]
except KeyError as excp:
raise WErr( 400, "Missing %s" % excp )
try:
access = self.request.arguments[ 'access' ][-1].upper()
except KeyError as excp:
access = 'ALL'
if access not in ( 'ALL', 'VO', 'GROUP', 'USER' ):
raise WErr( 400, "Invalid access" )
revokeAccess = { 'ReadAccess': access }
if access == 'USER': # if we make private a state,
# we have to revoke from the public as well
revokeAccess['PublishAccess'] = 'USER'
# TODO: Check access is in either 'ALL', 'VO' or 'GROUP'
result = yield self.threadTask( up.setVarPermissions, name, revokeAccess )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
self.set_status( 200 )
self.finish()
@asyncGen
def web_loadAppState( self ):
up = self.__getUP()
try:
name = self.request.arguments[ 'name' ][-1]
except KeyError as excp:
raise WErr( 400, "Missing %s" % excp )
result = yield self.threadTask( up.retrieveVar, name )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
data = result[ 'Value' ]
data, count = DEncode.decode( zlib.decompress( base64.b64decode( data ) ) )
self.finish( data )
@asyncGen
def web_loadUserAppState( self ):
up = self.__getUP()
try:
user = self.request.arguments[ 'user' ][-1]
group = self.request.arguments[ 'group' ][-1]
name = self.request.arguments[ 'name' ][-1]
except KeyError as excp:
raise WErr( 400, "Missing %s" % excp )
result = yield self.threadTask( up.retrieveVarFromUser, user, group, name )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
data = result[ 'Value' ]
data, count = DEncode.decode( zlib.decompress( base64.b64decode( data ) ) )
self.finish( data )
@asyncGen
def web_listAppState( self ):
up = self.__getUP()
result = yield self.threadTask( up.retrieveAllVars )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
data = result[ 'Value' ]
for k in data:
# Unpack data
data[ k ] = json.loads( DEncode.decode( zlib.decompress( base64.b64decode( data[ k ] ) ) )[0] )
self.finish( data )
@asyncGen
def web_delAppState( self ):
up = self.__getUP()
try:
name = self.request.arguments[ 'name' ][-1]
except KeyError as excp:
raise WErr( 400, "Missing %s" % excp )
result = yield self.threadTask( up.deleteVar, name )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
self.finish()
@asyncGen
def web_listPublicDesktopStates( self ):
up = self.__getUP()
result = yield self.threadTask( up.listAvailableVars )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
data = result['Value']
paramNames = ['UserName', 'Group', 'VO', 'desktop']
records = []
for i in data:
records += [dict( zip( paramNames, i ) )]
sharedDesktops = {}
for i in records:
result = yield self.threadTask( up.getVarPermissions, i['desktop'] )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
if result['Value']['ReadAccess'] == 'ALL':
print i['UserName'], i['Group'], i
result = yield self.threadTask( up.retrieveVarFromUser , i['UserName'], i['Group'], i['desktop'] )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
if i['UserName'] not in sharedDesktops:
sharedDesktops[i['UserName']] = {}
sharedDesktops[i['UserName']][i['desktop']] = json.loads( DEncode.decode( zlib.decompress( base64.b64decode( result['Value'] ) ) )[0] )
sharedDesktops[i['UserName']]['Metadata'] = i
else:
sharedDesktops[i['UserName']][i['desktop']] = json.loads( DEncode.decode( zlib.decompress( base64.b64decode( result['Value'] ) ) )[0] )
sharedDesktops[i['UserName']]['Metadata'] = i
self.finish( sharedDesktops )
@asyncGen
def web_makePublicDesktopState( self ):
up = UserProfileClient( "Web/application/desktop" )
try:
name = self.request.arguments[ 'name' ][-1]
except KeyError as excp:
raise WErr( 400, "Missing %s" % excp )
try:
access = self.request.arguments[ 'access' ][-1].upper()
except KeyError as excp:
access = 'ALL'
if access not in ( 'ALL', 'VO', 'GROUP', 'USER' ):
raise WErr( 400, "Invalid access" )
# TODO: Check access is in either 'ALL', 'VO' or 'GROUP'
result = yield self.threadTask( up.setVarPermissions, name, { 'ReadAccess': access } )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
self.set_status( 200 )
self.finish()
@asyncGen
def web_changeView( self ):
up = self.__getUP()
try:
desktopName = self.request.arguments[ 'desktop' ][-1]
view = self.request.arguments[ 'view' ][-1]
except KeyError as excp:
raise WErr( 400, "Missing %s" % excp )
result = yield self.threadTask( up.retrieveVar, desktopName )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
data = result['Value']
oDesktop = json.loads( DEncode.decode( zlib.decompress( base64.b64decode( data ) ) )[0] )
oDesktop[unicode( 'view' )] = unicode( view )
oDesktop = json.dumps( oDesktop )
data = base64.b64encode( zlib.compress( DEncode.encode( oDesktop ), 9 ) )
result = yield self.threadTask( up.storeVar, desktopName, data )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
self.set_status( 200 )
self.finish()
@asyncGen
def web_listPublicStates( self ):
session = self.getSessionData()
user = session["user"]["username"]
up = self.__getUP()
retVal = yield self.threadTask( up.getUserProfileNames, {'PublishAccess':'ALL'} )
if not retVal[ 'OK' ]:
raise WErr.fromSERROR( retVal )
data = retVal['Value']
if data == None:
self.finish( {"success":"false", "result":[], "total":0, "error":"There are no public states!"} )
return
paramNames = ['user', 'group', 'vo', 'name']
mydesktops = {'name':'My Desktops',
'group':'',
'vo':'',
'user':'',
'iconCls' : 'my-desktop',
'children' :[]
}
shareddesktops = {'name':'Shared Desktops',
'group':'',
'vo':'',
'user':'',
'expanded': 'true',
'iconCls' : 'shared-desktop',
'children' :[]
}
myapplications = {'name':'My Applications',
'group':'',
'vo':'',
'user':'',
'children' :[]
}
sharedapplications = {'name':'Shared Applications',
'group':'',
'vo':'',
'user':'',
'expanded': 'true',
'iconCls' : 'shared-desktop',
'children' :[]
}
desktopsApplications = {
'text':'.', 'children': [{'name':'Desktops',
'group':'',
'vo':'',
'user':'',
'children' :[mydesktops,
shareddesktops]
}, {'name':'Applications',
'group':'',
'vo':'',
'user':'',
'children' :[myapplications,
sharedapplications]
}
]
}
type = ''
for i in data:
application = i.replace( 'Web/application/', '' )
up = UserProfileClient( i )
retVal = up.listAvailableVars()
if not retVal['OK']:
raise WErr.fromSERROR( retVal )
else:
states = retVal['Value']
for state in states:
record = dict( zip( paramNames, state ) )
record['app'] = application
retVal = yield self.threadTask( up.getVarPermissions, record['name'] )
if not retVal['OK']:
raise WErr.fromSERROR( retVal )
else:
permissions = retVal['Value']
if permissions['PublishAccess'] == 'ALL':
if application == 'desktop':
record['type'] = 'desktop'
record['leaf'] = 'true'
record['iconCls'] = 'core-desktop-icon',
if record['user'] == user:
mydesktops['children'].append( record )
else:
shareddesktops['children'].append( record )
else:
record['type'] = 'application'
record['leaf'] = 'true'
record['iconCls'] = 'core-application-icon'
if record['user'] == user:
myapplications['children'].append( record )
else:
sharedapplications['children'].append( record )
self.finish( desktopsApplications )
@asyncGen
def web_publishAppState( self ):
up = self.__getUP()
try:
name = self.request.arguments[ 'name' ][-1]
except KeyError as excp:
raise WErr( 400, "Missing %s" % excp )
try:
access = self.request.arguments[ 'access' ][-1].upper()
except KeyError as excp:
access = 'ALL'
if access not in ( 'ALL', 'VO', 'GROUP', 'USER' ):
raise WErr( 400, "Invalid access" )
result = yield self.threadTask( up.setVarPermissions, name, { 'PublishAccess': access, 'ReadAccess': access } )
if not result[ 'OK' ]:
raise WErr.fromSERROR( result )
self.set_status( 200 )
self.finish()
|
chaen/WebAppDIRAC
|
WebApp/handler/UPHandler.py
|
Python
|
gpl-3.0
| 12,236
|
[
"DIRAC"
] |
44f962640b154a859078e42af0841b97e3cfc0eef094f7171dcadfb703f25abb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from builtins import range, object
import sys
sys.path.append('../')
try:
import vtk
except ImportError:
pass
from mupif import Timer
import numpy as np
from mupif import Mesh
#from mupif import VertexserverHost
from mupif import Cell
from mupif import Field
from mupif import FieldID
from mupif import ValueType
#debug flag
debug = 0
class EnsightReader(object):
"""
Read ensight files with vertices, cells, fields. Why is there this version and version 2? Is this file obsolete?
"""
def __init__(self):
self.grid = None
return
def readVtkFile(self, fileName):
"""
Read VTK file.
:param str filename: Input filename
"""
self.reader = vtk.vtkUnstructuredGridReader()
self.reader.SetFileName(fileName)
self.reader.Update()
self.grid = self.reader.GetOutput()
self.points = self.grid.GetPoints()
def readEnsightFile(self, fileName):
"""
Read Ensight file. Writes a VTK file with fileName+'.vtk'.
:param str filename: Input filename
"""
#read the ensight file
reader = vtk.vtkGenericEnSightReader()
reader.SetCaseFileName(fileName)
reader.Update()
output = reader.GetOutput()
num_blocks = output.GetNumberOfBlocks()
#blocks_unstructured is a list of objects of vtkUnstructuredGrid
blocks_unstructured = []
for i in range(num_blocks):
blocks_unstructured.append(output.GetBlock(i))
appendFilter = vtk.vtkAppendFilter()
i = 0
while i < len(blocks_unstructured):
if(vtk.VTK_MAJOR_VERSION <= 5):
appendFilter.AddInput(blocks_unstructured[i])
else:
appendFilter.AddInputData(blocks_unstructured[i])
i=i+1
appendFilter.Update();
unstructuredGrid=vtk.vtkUnstructuredGrid()
unstructuredGrid.ShallowCopy(appendFilter.GetOutput());
w = vtk.vtkUnstructuredGridWriter()
if(vtk.VTK_MAJOR_VERSION <= 5):
w.SetInput(unstructuredGrid)
else:
w.SetInputData(unstructuredGrid)
w.SetFileName(fileName+'.vtk')
w.Write()
self.readVtkFile(fileName+'.vtk')
def getMesh (self, cellFilter):
"""
Reads a mesh from Ensight file.
:param tuple cellFilter: A tuple containing a list of eligible cell types (according to CellGeometryType)??
:return: mesh
:rtype: Mesh
"""
mesh = mesh.UnstructuredMesh()
vertices=[]
coords = np.zeros((3), dtype='f')
for i in range(0, self.getNumberOfVertices()):
coords=self.getCoords(i,coords)
tuple = (coords)
vertices.append(vertex.Vertex(i,i+1, tuple))
cells = []
for i in range(0, self.getNumberOfCells()):
if (self.giveCellType(i) == 12 and self.giveCellType(i) in cellFilter):
cells.append(cell.Brick_3d_lin(mesh, i, i, (int(self.giveVertex(i,0)), int(self.giveVertex(i,1)), int(self.giveVertex(i,2)), int(self.giveVertex(i,3)), int(self.giveVertex(i,4)), int(self.giveVertex(i,5)), int(self.giveVertex(i,6)), int(self.giveVertex(i,7))) ))
elif (self.giveCellType(i) == 9 and self.giveCellType(i) in cellFilter):
cells.append(cell.Quad_2d_lin(mesh, i, i,(int(self.giveVertex(i,0)),int(self.giveVertex(i,1)),int(self.giveVertex(i,2)),int(self.giveVertex(i,3))) ))
mesh.setup(vertices, cells)
return mesh
def getField(self, mesh, fileName, fieldName, vertexBasedFlag, cellFilter):
"""
Extract field from Ensight file.
:param Mesh mesh: A mesh
:param str fileName: ???
:param str fieldName: A name of a computed field
:param bool vertexBasedFlag: Field is assigned to vertices directly???
:param tuple cellFilter: A tuple containing a list of eligible cell types (according to CellGeometryType)??
"""
values=[]
if (vertexBasedFlag == True):
for i in range(0, self.getNumberOfVertices()):
values.append ((self.giveValueAtPoint(fieldName, i), ))
elif(vertexBasedFlag == False):
for i in range(0, self.getNumberOfCells()):
if (self.giveCellType(i) == 12 and self.giveCellType(i) in cellFilter):
alues.append ((self.giveValueAtCell(fieldName, i), ))
elif (self.giveCellType(i) == 9 and self.giveCellType(i) in cellFilter):
values.append ((self.giveValueAtCell(fieldName, i), ))
return field.Field(mesh, FieldID.FID_Temperature, ValueType.Scalar, None, None, values, field.FieldType.FT_cellBased)
def giveValueAtPoint(self, fieldName, componentID):
"""
Evaluate field value at a given Point??
:param str fieldName: Name of unknown field
:param int componentID: ???
:return: ??
:rtype: ??
"""
uGrid = vtk.vtkUnstructuredGrid()
uGrid.ShallowCopy(self.grid)
return uGrid.GetPointData().GetScalars(fieldName).GetValue( componentID )
def giveValueAtCell(self, fieldName, componentID):
"""
Evaluate field value at a given Point??
:param str fieldName: Name of unknown field
:param int componentID: ???
:return: ??
:rtype: ??
"""
uGrid = vtk.vtkUnstructuredGrid()
uGrid.ShallowCopy(self.grid)
return uGrid.GetCellData().GetScalars(fieldName).GetValue( componentID )
def giveVectorAtPoint(self, fieldName, i):
"""
Evaluate field value at a given Point??
:param str fieldName: Name of unknown field
:param int i: ???
:return: ??
:rtype: ??
"""
uGrid = vtk.vtkUnstructuredGrid()
uGrid.ShallowCopy(self.grid)
return uGrid.GetPointData().GetVectors(fieldName).GetTuple3(i)
def giveVectorAtCell(self, fieldName, i):
"""
Evaluate field value at a given cell??
:param str fieldName: Name of unknown field
:param int i: ???
:return: ??
:rtype: ??
"""
uGrid = vtk.vtkUnstructuredGrid()
uGrid.ShallowCopy(self.grid)
return uGrid.GetCellData().GetVectors(fieldName).GetTuple3(i)
def giveTensorAtPoint(self, fieldName, i):
"""
Evaluate field value at a given point
:param str fieldName: Name of unknown field
:param int i: ???
:return: ??
:rtype: ??
"""
uGrid = vtk.vtkUnstructuredGrid()
uGrid.ShallowCopy(self.grid)
return uGrid.GetPointData().GetTensors(fieldName).GetTuple9(i)
def giveTensorAtCell(self, fieldName, i):
"""
Evaluate field value at a given cell ??
:param str fieldName: Name of unknown field
:param int i: ???
:return: ??
:rtype: ??
"""
uGrid = vtk.vtkUnstructuredGrid()
uGrid.ShallowCopy(self.grid)
return uGrid.GetCellData().GetTensors(fieldName).GetTuple9(i)
def getNumberOfCells(self):
"""
Returns the number of Cells.
:return: Number of cells
:rtype: int
"""
geomData = self.reader.GetOutput();
numCells = geomData.GetNumberOfCells()
return numCells;
def getNumberOfVertices(self):
"""
Returns the number of Vertices.
:return: Number of vertices
:rtype: int
"""
geomData = self.reader.GetOutput();
numPts = geomData.GetNumberOfPoints()
return numPts;
def getCoords(self, i, coords):
"""
Get the xyz coordinate of the point
:param int i: Point number??
:param tuple coords: Coordinate of a point?? Why is here as input argument, should be return value instead??
:return: Coordinates
:rtype: tuple
"""
if debug:
with timer.Timer() as t:
coords=self.points.GetPoint(i)
print('Request getCoords took %.03f sec.' % t.interval)
else:
coords=self.points.GetPoint(i)
return coords
def getBounds(self, b):
"""
???
"""
geomData = self.reader.GetOutput();
geomData.ComputeBounds()
geomData.GetBounds(b)
return b
def giveCellDimension(self, i):
"""
:return: Dimensions of a cell, e.g. length, width, thickness??
:rtype: tuple??
"""
geomData = self.reader.GetOutput();
dim=geomData.GetCell(i).GetCellDimension()
return dim
def giveCellType(self, i):
"""
:param int i: Cell number
:return: CellType
:rtype: int
"""
geomData = self.reader.GetOutput();
type=geomData.GetCellType(i)
return type
def giveNumberOfVertices(self, cellid):
"""
:param int cellid: Cell number
:return: Number of vertices
:rtype: int
"""
geomData = self.reader.GetOutput();
PtIds = vtk.vtkIdList()
geomData.GetCellPoints(cellid, PtIds)
return PtIds.GetNumberOfIds()
def giveVertex(self, cellid, i):
"""
:param int cellid: ??
:param int i: ??
:return: ID??
:rtype: ??
"""
geomData = self.reader.GetOutput();
PtIds = vtk.vtkIdList()
geomData.GetCellPoints(cellid, PtIds)
ID = PtIds.GetId(i)
return ID
|
mupif/mupif
|
obsolete/EnsightReader.py
|
Python
|
lgpl-3.0
| 9,660
|
[
"VTK"
] |
897aa0d795151f82a6f48794e2af3a6a43c9df3e79f69ef93f8e49b8ce172755
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 9 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011-2014, Tomasz Długosz <tomek3d@gmail.com>'
__docformat__ = 'restructuredtext en'
import urllib
from base64 import b64encode
from lxml import html
from PyQt5.Qt import QUrl
from calibre import url_slash_cleaner
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
from calibre.ebooks.chardet import strip_encoding_declarations
from calibre.utils.ipc.simple_worker import fork_job, WorkerError
js_browser = '''
from calibre.web.jsbrowser.browser import Browser, Timeout
import urllib
def get_results(url, timeout):
browser = Browser(default_timeout=timeout)
browser.visit(url)
browser.wait_for_element('#nw_content_main')
return browser.html
'''
class WoblinkStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
aff_root = 'https://www.a4b-tracking.com/pl/stat-click-text-link/16/58/'
url = 'http://woblink.com/publication'
aff_url = aff_root + str(b64encode(url))
detail_url = None
if detail_item:
detail_url = aff_root + str(b64encode('http://woblink.com' + detail_item))
if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else aff_url)))
else:
d = WebStoreDialog(self.gui, url, parent, detail_url if detail_url else aff_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://woblink.com/ebooki-kategorie?query=' + urllib.quote_plus(query.encode('utf-8'))
if max_results > 10:
if max_results > 20:
url += '&limit=30'
else:
url += '&limit=20'
counter = max_results
try:
results = fork_job(js_browser,'get_results', (url, timeout,), module_is_source_code=True)
except WorkerError as e:
raise Exception('Could not get results: %s'%e.orig_tb)
doc = html.fromstring(strip_encoding_declarations(results['result']))
for data in doc.xpath('//div[@class="nw_katalog_lista_ksiazka"]'):
if counter <= 0:
break
id = ''.join(data.xpath('.//div[@class="nw_katalog_lista_ksiazka_okladka nw_okladka"]/a[1]/@href'))
if not id:
continue
cover_url = ''.join(data.xpath('.//div[@class="nw_katalog_lista_ksiazka_okladka nw_okladka"]/a[1]/img/@src'))
title = ''.join(data.xpath('.//h2[@class="nw_katalog_lista_ksiazka_detale_tytul"]/a[1]/text()'))
author = ', '.join(data.xpath('.//p[@class="nw_katalog_lista_ksiazka_detale_autor"]/a/text()'))
price = ''.join(data.xpath('.//div[@class="nw_opcjezakupu_cena"]/text()'))
formats = ', '.join(data.xpath('.//p[@class="nw_katalog_lista_ksiazka_detale_format"]/span/text()'))
s = SearchResult()
s.cover_url = 'http://woblink.com' + cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price + ' zł'
s.detail_item = id.strip()
s.formats = formats
if 'DRM' in formats:
s.drm = SearchResult.DRM_LOCKED
counter -= 1
yield s
else:
s.drm = SearchResult.DRM_UNLOCKED
counter -= 1
yield s
|
sharad/calibre
|
src/calibre/gui2/store/stores/woblink_plugin.py
|
Python
|
gpl-3.0
| 3,887
|
[
"VisIt"
] |
687063b1a07487dbc28e64d2f826a9664a1958e2d287aa38d8e4ae96cef4da2e
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import classproperty, overrides
from ._iupac_sequence import IUPACSequence, _motifs as parent_motifs
class Protein(IUPACSequence):
"""Store protein sequence data and optional associated metadata.
Only characters in the IUPAC protein character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the protein sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC protein character set. If ``False``,
validation will not be performed. Turning off validation will improve
runtime performance. If invalid characters are present, however, there
is **no guarantee that operations performed on the resulting object
will work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
case_insenstive : bool, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC protein characters.
Attributes
----------
values
metadata
positional_metadata
alphabet
gap_chars
nondegenerate_chars
degenerate_chars
degenerate_map
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import Protein
>>> Protein('PAW')
Protein
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
-----------------------------
0 PAW
Convert lowercase characters to uppercase:
>>> Protein('paW', lowercase=True)
Protein
-----------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has non-degenerates: True
-----------------------------
0 PAW
"""
@property
def _motifs(self):
return _motifs
@classproperty
@overrides(IUPACSequence)
def nondegenerate_chars(cls):
return set("ACDEFGHIKLMNPQRSTVWY*")
@classproperty
@overrides(IUPACSequence)
def degenerate_map(cls):
return {
"B": set("DN"), "Z": set("EQ"),
"X": set("ACDEFGHIKLMNPQRSTVWY")
}
_motifs = parent_motifs.copy()
@_motifs("N-glycosylation")
def _motif_nitro_glycosylation(sequence, min_length, ignore):
"""Identifies N-glycosylation runs"""
return sequence.find_with_regex("(N[^PX][ST][^PX])", ignore=ignore)
# Leave this at the bottom
_motifs.interpolate(Protein, "find_motifs")
|
Achuth17/scikit-bio
|
skbio/sequence/_protein.py
|
Python
|
bsd-3-clause
| 3,712
|
[
"scikit-bio"
] |
27638293c55f06c955e59ba50f373d8fe17bb60e2942c00aafbe513342288431
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from optparse import make_option
import os
import sys
import glob
import shutil
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
# Based on the collectmedia management command by Brian Beck (exogen)
# http://blog.brianbeck.com/post/50940622/collectmedia
class Command(BaseCommand):
media_dirs = ['media']
ignore_apps = ['django.contrib.admin']
exclude = ['CVS', '.*', '*~']
option_list = BaseCommand.option_list + (
make_option('--media-root', default=settings.MEDIA_ROOT, dest='media_root', metavar='DIR',
help="Specifies the root directory in which to collect media files."),
make_option('-n', '--dry-run', action='store_true', dest='dry_run',
help="Do everything except modify the filesystem."),
make_option('-d', '--dir', action='append', default=media_dirs, dest='media_dirs', metavar='NAME',
help="Specifies the name of the media directory to look for in each app."),
make_option('-e', '--exclude', action='append', default=exclude, dest='exclude', metavar='PATTERNS',
help="A space-delimited list of glob-style patterns to ignore. Use multiple times to add more."),
make_option('-l', '--link', action='store_true', dest='link',
help="Create a symbolic link to each file instead of copying."),
make_option('-i', '--interactive', action='store_true', dest='interactive',
help="Ask before modifying files and selecting from multiple sources."),
make_option('-t', '--theme', default=settings.PINAX_THEME, dest='theme', metavar='DIR',
help="Use this Pinax theme as a the basis."
)
)
help = 'Collect media files from installed apps, Pinax and project in a single media directory.'
args = '[appname ...]'
def handle(self, *app_labels, **options):
if not app_labels:
app_labels = settings.INSTALLED_APPS
short_app_labels = [label.split('.')[-1] for label in app_labels]
interactive = options.get('interactive', False)
dry_run = options.get('dry_run', False)
exclude = options.get('exclude')
theme = options.get('theme', settings.PINAX_THEME)
media_root = options.get('media_root', settings.MEDIA_ROOT)
pinax_media_root = os.path.join(settings.PINAX_ROOT, 'media', theme)
project_media_root = os.path.join(settings.PROJECT_ROOT, 'media')
if dry_run:
print "\n DRY RUN! NO FILES WILL BE MODIFIED."
# This mapping collects files that may be copied. Keys are what the
# file's path relative to `media_root` will be when copied. Values
# are a list of 2-tuples containing the the name of the app providing
# the file and the file's absolute path. The list will have a length
# greater than 1 if multiple apps provide a media file with the same
# relative path.
media_files = {}
for app in app_labels:
if app not in self.ignore_apps:
for rel_path, abs_path in self.handle_app(app, **options):
media_files.setdefault(rel_path, []).append((app, abs_path))
for location in (pinax_media_root, project_media_root):
if not os.path.isdir(location):
continue
app_labels = []
app_labels[:] = self.filter_names(os.listdir(location), exclude=exclude)
for app in app_labels:
if app in short_app_labels and app not in self.ignore_apps:
for rel_path, abs_path in self.handle_override(app, location, **options):
media_files.setdefault(rel_path, []).append((app, abs_path))
# Forget the unused versions of a media file
for f in media_files:
media_files[f] = dict(media_files[f]).items()
# Stop if no media files were found
if not media_files:
print "\nNo media found."
return
# Try to copy in some predictable order.
destinations = list(media_files)
destinations.sort()
for destination in destinations:
sources = media_files[destination]
first_source, other_sources = sources[0], sources[1:]
if interactive and other_sources:
first_app = first_source[0]
app_sources = dict(sources)
print "\nThe file %r is provided by multiple apps:" % destination
print "\n".join([" %s" % app for (app, source) in sources])
message = "Enter the app that should provide this file [%s]: " % first_app
while True:
app = raw_input(message)
if not app:
app, source = first_source
break
elif app in app_sources:
source = app_sources[app]
break
else:
print "The app %r does not provide this file." % app
else:
app, source = first_source
print "\nSelected %r provided by %r." % (destination, app)
# Move in site_media/<app_label>/<file>
destination = os.path.join(app.split('.')[-1], destination)
self.process_file(source, destination, media_root, **options)
def handle_override(self, app, location, **options):
media_dirs = options.get('media_dirs')
exclude = options.get('exclude')
for media_dir in media_dirs:
app_media = os.path.join(location, app)
if os.path.isdir(app_media):
prefix_length = len(app_media) + len(os.sep)
for root, dirs, files in os.walk(app_media):
# Filter `dirs` and `files` based on the exclusion pattern.
dirs[:] = self.filter_names(dirs, exclude=exclude)
files[:] = self.filter_names(files, exclude=exclude)
for filename in files:
absolute_path = os.path.join(root, filename)
relative_path = absolute_path[prefix_length:]
yield (relative_path, absolute_path)
def handle_app(self, app, **options):
if isinstance(app, basestring):
app = __import__(app, {}, {}, [''])
media_dirs = options.get('media_dirs')
exclude = options.get('exclude')
app_root = os.path.dirname(app.__file__)
for media_dir in media_dirs:
app_media = os.path.join(app_root, media_dir)
if os.path.isdir(app_media):
prefix_length = len(app_media) + len(os.sep)
for root, dirs, files in os.walk(app_media):
# Filter `dirs` and `files` based on the exclusion pattern.
dirs[:] = self.filter_names(dirs, exclude=exclude)
files[:] = self.filter_names(files, exclude=exclude)
for filename in files:
absolute_path = os.path.join(root, filename)
relative_path = absolute_path[prefix_length:]
yield (relative_path, absolute_path)
def process_file(self, source, destination, root, link=False, **options):
dry_run = options.get('dry_run', False)
interactive = options.get('interactive', False)
destination = os.path.abspath(os.path.join(root, destination))
if not dry_run:
# Get permission bits and ownership of `root`.
try:
root_stat = os.stat(root)
except os.error, e:
mode = 0777 # Default for `os.makedirs` anyway.
uid = gid = None
else:
mode = root_stat.st_mode
uid, gid = root_stat.st_uid, root_stat.st_gid
destination_dir = os.path.dirname(destination)
try:
# Recursively create all the required directories, attempting
# to use the same mode as `root`.
os.makedirs(destination_dir, mode)
except os.error, e:
# This probably just means the leaf directory already exists,
# but if not, we'll find out when copying or linking anyway.
pass
else:
os.lchown(destination_dir, uid, gid)
if link:
success = self.link_file(source, destination, interactive, dry_run)
else:
success = self.copy_file(source, destination, interactive, dry_run)
if success and None not in (uid, gid):
# Try to use the same ownership as `root`.
os.lchown(destination, uid, gid)
def copy_file(self, source, destination, interactive=False, dry_run=False):
"Attempt to copy `source` to `destination` and return True if successful."
if interactive:
exists = os.path.exists(destination) or os.path.islink(destination)
if exists:
print "The file %r already exists." % destination
if not self.prompt_overwrite(destination):
return False
print "Copying %r to %r." % (source, destination)
if not dry_run:
try:
os.remove(destination)
except os.error, e:
pass
shutil.copy2(source, destination)
return True
return False
def link_file(self, source, destination, interactive=False, dry_run=False):
"Attempt to link to `source` from `destination` and return True if successful."
if sys.platform == 'win32':
message = "Linking is not supported by this platform (%s)."
raise os.error(message % sys.platform)
if interactive:
exists = os.path.exists(destination) or os.path.islink(destination)
if exists:
print "The file %r already exists." % destination
if not self.prompt_overwrite(destination):
return False
if not dry_run:
try:
os.remove(destination)
except os.error, e:
pass
print "Linking to %r from %r." % (source, destination)
if not dry_run:
os.symlink(source, destination)
return True
return False
def prompt_overwrite(self, filename, default=True):
"Prompt the user to overwrite and return their selection as True or False."
yes_values = ['Y']
no_values = ['N']
if default:
prompt = "Overwrite? [Y/n]: "
yes_values.append('')
else:
prompt = "Overwrite? [y/N]: "
no_values.append('')
while True:
overwrite = raw_input(prompt).strip().upper()
if overwrite in yes_values:
return True
elif overwrite in no_values:
return False
else:
print "Select 'Y' or 'N'."
def filter_names(self, names, exclude=None, func=glob.fnmatch.filter):
if exclude is None:
exclude = []
elif isinstance(exclude, basestring):
exclude = exclude.split()
else:
exclude = [pattern for patterns in exclude for pattern in patterns.split()]
excluded_names = set(
[name for pattern in exclude for name in func(names, pattern)]
)
return set(names) - excluded_names
|
ingenieroariel/pinax
|
apps/misc/management/commands/build_media.py
|
Python
|
mit
| 11,666
|
[
"Brian"
] |
8bf4db758ad5427a703de13c0bd358ae2194a9a0e071e962f3d24f22ca7061bf
|
import nibabel as nib
import numpy as np
from sklearn.mixture import GaussianMixture
def assign_gaussian(data, n_components, covariance_type,
init_params='kmeans',
n_init=50,
):
classifier = GaussianMixture(
n_components=n_components,
covariance_type=covariance_type,
init_params=init_params,
n_init=n_init,
)
classifier.fit(data)
assignment = classifier.predict(data)
return assignment, classifier
def sort_by_occurence(assignments):
"""Change unique values in array to ordinal integers based on the number of occurences.
Parameters
----------
assignments : array
1-D array of values.
"""
labels, counts = np.unique(assignments, return_counts=True)
mysort = np.argsort(counts)[::-1]
counts = counts[mysort]
labels_ = labels[mysort]
new_labels = [i for i in range(len(labels))]
convert = dict(zip(labels_, new_labels))
keys,inv = np.unique(assignments,return_inverse = True)
assignments = np.array([convert[x] for x in keys])[inv].reshape(assignments.shape)
return assignments
def assignment_from_paths(path_list,
components=4,
covariance='spherical',
mask='/usr/share/mouse-brain-templates/dsurqec_200micron_mask.nii',
save_as='',
):
"""Segment list of paths into Gaussian mixtures
Parameters
----------
path_list : list
List of strings which are paths to existing NIfTI files.
components : int, optional
Number of components to segment into.
covariance : {'spherical', 'diag', 'tied', 'full'}, optional
Covariance model to use for the gaussian mixture model.
mask : str, optional
Path to a mask in which to segment data.
Returns
-------
assignment_img : nibabel.Nifti1Image
NIfTI image of assignment.
retest_accuracy : float
Accuracy of single retest (percentage of assignments which overlap)
"""
mask = nib.load(mask)
data = []
affine = mask.affine
header = mask.header
mask_data = mask.get_data()
shape = mask.shape
mask_data = mask_data.flatten()
mask_data = mask_data.astype(bool)
all_data = []
for i in path_list:
img = nib.load(i)
data = img.get_data()
data = data.flatten()
data = data[mask_data]
all_data.append(data)
all_data = np.array(all_data)
assignments, classifier = assign_gaussian(all_data.T,components,covariance)
assignments = sort_by_occurence(assignments)
assignments_, classifier = assign_gaussian(all_data.T,4,'spherical')
assignments_ = sort_by_occurence(assignments_)
retest_accuracy = np.mean(assignments_.ravel() == assignments.ravel()) * 100
assignments += 1
new_data = mask_data.astype(int)
new_data[mask_data] = assignments
new_data[~mask_data] = 0
new_data = new_data.reshape(shape)
assignment_img = nib.Nifti1Image(new_data, affine, header)
if save_as:
nib.save(assignment_img, save_as)
return assignment_img, retest_accuracy
|
IBT-FMI/SAMRI
|
samri/analysis/segmentation.py
|
Python
|
gpl-3.0
| 2,769
|
[
"Gaussian"
] |
1649673b66a4b5153b429ceb1a83490542e68f6a11b71b57280e36b51966ff2c
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import collections
import copy
import inspect
import logging
import os
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured # noqa
from django.core.urlresolvers import reverse
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import empty
from django.utils.functional import SimpleLazyObject # noqa
from django.utils.importlib import import_module # noqa
from django.utils.module_loading import module_has_submodule # noqa
from django.utils.translation import ugettext_lazy as _
import six
from horizon import conf
from horizon.decorators import _current_component # noqa
from horizon.decorators import require_auth # noqa
from horizon.decorators import require_perms # noqa
from horizon import loaders
# Name of the panel group for panels to be displayed without a group.
DEFAULT_PANEL_GROUP = 'default'
LOG = logging.getLogger(__name__)
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
pattern._callback = decorator(pattern.callback, *args, **kwargs)
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
# FIXME(lhcheng): We need to find a better way to cache the result.
# Rather than storing it in the session, we could leverage the Django
# session. Currently, this has been causing issue with cookie backend,
# adding 1600+ in the cookie size.
def access_cached(func):
def inner(self, context):
session = context['request'].session
try:
if session['allowed']['valid_for'] != session.get('token'):
raise KeyError()
except KeyError:
session['allowed'] = {"valid_for": session.get('token')}
key = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if key not in session['allowed']:
session['allowed'][key] = func(self, context)
session.modified = True
return session['allowed'][key]
return inner
class NotRegistered(Exception):
pass
@python_2_unicode_compatible
class HorizonComponent(object):
policy_rules = None
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __str__(self):
name = getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
return name
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = patterns('')
return urlpatterns
# FIXME(lhcheng): Removed the access_cached decorator for now until
# a better implementation has been figured out. This has been causing
# issue with cookie backend, adding 1600+ in the cookie size.
# @access_cached
def can_access(self, context):
"""Return whether the user has role based access to this component.
This method is not intended to be overridden.
The result of the method is stored in per-session cache.
"""
return self.allowed(context)
def allowed(self, context):
"""Checks if the user is allowed to access this component.
This method should be overridden to return the result of
any policy checks required for the user to access this component
when more complex checks are required.
"""
return self._can_access(context['request'])
def _can_access(self, request):
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
# this check is an OR check rather than an AND check that is the
# default in the policy engine, so calling each rule individually
if policy_check and self.policy_rules:
for rule in self.policy_rules:
if policy_check((rule,), request):
return True
return False
# default to allowed
return True
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class.__name__)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
found = self._registry.get(cls, None)
if found:
return found
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.slug})
else:
slug = getattr(cls, "slug", cls)
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.' % {"type": class_name,
"slug": slug})
class Panel(HorizonComponent):
"""A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing permission-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any permissions required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
.. staticmethod:: can_register
This optional static method can be used to specify conditions that
need to be satisfied to load this panel. Unlike ``permissions`` and
``allowed`` this method is intended to handle settings based
conditions rather than user based permission and policy checks.
The return value is boolean. If the method returns ``True``, then the
panel will be registered and available to user (if ``permissions`` and
``allowed`` runtime checks are also satisfied). If the method returns
``False``, then the panel will not be registered and will not be
available via normal navigation or direct URL access.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.slug
def get_absolute_url(self):
"""Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except Exception as exc:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.info("Error reversing absolute URL for %s: %s" % (self, exc))
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
@six.python_2_unicode_compatible
class PanelGroup(object):
"""A container for a set of :class:`~horizon.Panel` classes.
When iterated, it will yield each of the ``Panel`` instances it
contains.
.. attribute:: slug
A unique string to identify this panel group. Required.
.. attribute:: name
A user-friendly name which will be used as the group heading in
places such as the navigation. Default: ``None``.
.. attribute:: panels
A list of panel module names which should be contained within this
grouping.
"""
def __init__(self, dashboard, slug=None, name=None, panels=None):
self.dashboard = dashboard
self.slug = slug or getattr(self, "slug", DEFAULT_PANEL_GROUP)
self.name = name or getattr(self, "name", None)
# Our panels must be mutable so it can be extended by others.
self.panels = list(panels or getattr(self, "panels", []))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return self.name
def __iter__(self):
panel_instances = []
for name in self.panels:
try:
panel_instances.append(self.dashboard.get_panel(name))
except NotRegistered as e:
LOG.debug(e)
return iter(panel_instances)
class Dashboard(Registry, HorizonComponent):
"""A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing
permission-based access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a flat list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a list of :class:`~horizon.PanelGroup` classes which
define groups of panels as in the following example::
class SystemPanels(horizon.PanelGroup):
slug = "syspanel"
name = _("System")
panels = ('overview', 'instances', ...)
class Syspanel(horizon.Dashboard):
panels = (SystemPanels,)
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any permissions required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this dashboard should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.slug
def __init__(self, *args, **kwargs):
super(Dashboard, self).__init__(*args, **kwargs)
self._panel_groups = None
def get_panel(self, panel):
"""Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order, without any panel groupings.
"""
all_panels = []
panel_groups = self.get_panel_groups()
for panel_group in panel_groups.values():
all_panels.extend(panel_group)
return all_panels
def get_panel_group(self, slug):
"""Returns the specified :class:~horizon.PanelGroup
or None if not registered
"""
return self._panel_groups.get(slug)
def get_panel_groups(self):
registered = copy.copy(self._registry)
panel_groups = []
# Gather our known panels
if self._panel_groups is not None:
for panel_group in self._panel_groups.values():
for panel in panel_group:
registered.pop(panel.__class__)
panel_groups.append((panel_group.slug, panel_group))
# Deal with leftovers (such as add-on registrations)
if len(registered):
slugs = [panel.slug for panel in registered.values()]
new_group = PanelGroup(self,
slug="other",
name=_("Other"),
panels=slugs)
panel_groups.append((new_group.slug, new_group))
return collections.OrderedDict(panel_groups)
def get_absolute_url(self):
"""Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except Exception:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s." % self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
url_slug = panel.slug.replace('.', '/')
urlpatterns += patterns('',
url(r'^%s/' % url_slug,
include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns += patterns('',
url(r'',
include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, require_auth)
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
"""Discovers panels to register from the current dashboard module."""
if getattr(self, "_autodiscover_complete", False):
return
panels_to_discover = []
panel_groups = []
# If we have a flat iterable of panel names, wrap it again so
# we have a consistent structure for the next step.
if all([isinstance(i, six.string_types) for i in self.panels]):
self.panels = [self.panels]
# Now iterate our panel sets.
default_created = False
for panel_set in self.panels:
# Instantiate PanelGroup classes.
if not isinstance(panel_set, collections.Iterable) and \
issubclass(panel_set, PanelGroup):
panel_group = panel_set(self)
# Check for nested tuples, and convert them to PanelGroups
elif not isinstance(panel_set, PanelGroup):
panel_group = PanelGroup(self, panels=panel_set)
# Put our results into their appropriate places
panels_to_discover.extend(panel_group.panels)
panel_groups.append((panel_group.slug, panel_group))
if panel_group.slug == DEFAULT_PANEL_GROUP:
default_created = True
# Plugin panels can be added to a default panel group. Make sure such a
# default group exists.
if not default_created:
default_group = PanelGroup(self)
panel_groups.insert(0, (default_group.slug, default_group))
self._panel_groups = collections.OrderedDict(panel_groups)
# Do the actual discovery
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
for panel in panels_to_discover:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
self._autodiscover_complete = True
@classmethod
def register(cls, panel):
"""Registers a :class:`~horizon.Panel` with this dashboard."""
panel_class = Horizon.register_panel(cls, panel)
# Support template loading from panel template directories.
panel_mod = import_module(panel.__module__)
panel_dir = os.path.dirname(panel_mod.__file__)
template_dir = os.path.join(panel_dir, "templates")
if os.path.exists(template_dir):
key = os.path.join(cls.slug, panel.slug)
loaders.panel_template_dirs[key] = template_dir
return panel_class
@classmethod
def unregister(cls, panel):
"""Unregisters a :class:`~horizon.Panel` from this dashboard."""
success = Horizon.unregister_panel(cls, panel)
if success:
# Remove the panel's template directory.
key = os.path.join(cls.slug, panel.slug)
if key in loaders.panel_template_dirs:
del loaders.panel_template_dirs[key]
return success
def allowed(self, context):
"""Checks for role based access for this dashboard.
Checks for access to any panels in the dashboard and of the the
dashboard itself.
This method should be overridden to return the result of
any policy checks required for the user to access this dashboard
when more complex checks are required.
"""
# if the dashboard has policy rules, honor those above individual
# panels
if not self._can_access(context['request']):
return False
# check if access is allowed to a single panel,
# the default for each panel is True
for panel in self.get_panels():
if panel.can_access(context):
return True
return False
class Workflow(object):
pass
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is empty:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is empty:
self._setup()
return reversed(self._wrapped)
def __len__(self):
if self._wrapped is empty:
self._setup()
return len(self._wrapped)
def __getitem__(self, idx):
if self._wrapped is empty:
self._setup()
return self._wrapped[idx]
class Site(Registry, HorizonComponent):
"""The overarching class which encompasses all dashboards and panels."""
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.slug
@property
def _conf(self):
return conf.HORIZON_CONFIG
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
"""Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
"""Unregisters a :class:`~horizon.Dashboard` from Horizon."""
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
"""Returns the specified :class:`~horizon.Dashboard` instance."""
return self._registered(dashboard)
def get_dashboards(self):
"""Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = sorted(registered.values())
dashboards.extend(extra)
return dashboards
else:
return sorted(self._registry.values())
def get_default_dashboard(self):
"""Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
"""Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
{"user_home": None,} # Will always return the default dashboard
This can be useful if the default dashboard may not be accessible
to all users. When user_home is missing from HORIZON_CONFIG,
it will default to the settings.LOGIN_REDIRECT_URL value.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, six.string_types):
# Assume we've got a URL if there's a slash in it
if '/' in user_home:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
"""Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
"""Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
"""Constructs the URLconf for Horizon from registered Dashboards."""
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Load the plugin-based panel configuration
self._load_panel_customization()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns += patterns('',
url(r'^%s/' % dash.slug,
include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
"""Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
def _load_panel_customization(self):
"""Applies the plugin-based panel configurations.
This method parses the panel customization from the ``HORIZON_CONFIG``
and make changes to the dashboard accordingly.
It supports adding, removing and setting default panels on the
dashboard. It also support registering a panel group.
"""
panel_customization = self._conf.get("panel_customization", [])
# Process all the panel groups first so that they exist before panels
# are added to them and Dashboard._autodiscover() doesn't wipe out any
# panels previously added when its panel groups are instantiated.
panel_configs = []
for config in panel_customization:
if config.get('PANEL'):
panel_configs.append(config)
elif config.get('PANEL_GROUP'):
self._process_panel_group_configuration(config)
else:
LOG.warning("Skipping %s because it doesn't have PANEL or "
"PANEL_GROUP defined.", config.__name__)
# Now process the panels.
for config in panel_configs:
self._process_panel_configuration(config)
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except ImportError:
LOG.warning("Could not load panel: %s", mod_path)
return
panel = getattr(mod, panel_cls)
# test is can_register method is present and call method if
# it is to determine if the panel should be loaded
if hasattr(panel, 'can_register') and \
callable(getattr(panel, 'can_register')):
if not panel.can_register():
LOG.debug("Load condition failed for panel: %(panel)s",
{'panel': panel_slug})
return
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e})
def _process_panel_group_configuration(self, config):
"""Adds a panel group to the dashboard."""
panel_group_slug = config.get('PANEL_GROUP')
try:
dashboard = config.get('PANEL_GROUP_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_DASHBOARD defined.", config.__name__)
return
dashboard_cls = self.get_dashboard(dashboard)
panel_group_name = config.get('PANEL_GROUP_NAME')
if not panel_group_name:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_NAME defined.", config.__name__)
return
# Create the panel group class
panel_group = type(panel_group_slug,
(PanelGroup, ),
{'slug': panel_group_slug,
'name': panel_group_name,
'panels': []},)
# Add the panel group to dashboard
panels = list(dashboard_cls.panels)
panels.append(panel_group)
dashboard_cls.panels = tuple(panels)
# Trigger the autodiscovery to completely load the new panel group
dashboard_cls._autodiscover_complete = False
dashboard_cls._autodiscover()
except Exception as e:
LOG.warning('Could not process panel group %(panel_group)s: '
'%(exc)s',
{'panel_group': panel_group_slug, 'exc': e})
class HorizonSite(Site):
"""A singleton implementation of Site such that all dealings with horizon
get the same instance no matter what. There can be only one.
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Site, cls).__new__(cls, *args, **kwargs)
return cls._instance
# The one true Horizon
Horizon = HorizonSite()
|
redhat-cip/horizon
|
horizon/base.py
|
Python
|
apache-2.0
| 39,033
|
[
"VisIt"
] |
f96c5b658cbe5fd93dc90afcaabf36983bd80b7eb60c871e0a35094126f5d5fe
|
import ops_piggybacker as oink
import openpathsampling as paths
from .tools import *
from . import common_test_data as common
from openpathsampling.tests.test_helpers import make_1d_traj
import os.path
import sys
try:
import mdtraj as md
except ImportError:
HAS_MDTRAJ = False
else:
HAS_MDTRAJ = True
class StupidOneWayTPSConverter(oink.OneWayTPSConverter):
"""Test-ready subclass"""
def __init__(self, storage, initial_file, mover, network, options=None,
options_rejected=None):
self.test_dir = os.path.join(
os.path.dirname(__file__),
"test_data", "one_way_tps_examples"
)
super(StupidOneWayTPSConverter, self).__init__(
storage=storage,
initial_file=initial_file,
mover=mover,
network=network,
options=options,
options_rejected=options_rejected
)
self.summary_root_dir = ""
def load_trajectory(self, file_name):
f = open(os.path.join(self.test_dir, file_name), "r")
traj_list = [float(line) for line in f]
return make_1d_traj(traj_list)
class TestOneWayTPSConverter(object):
def setup(self):
test_dir = "one_way_tps_examples"
self.data_filename = lambda f : \
data_filename(os.path.join(test_dir, f))
old_store = paths.Storage(data_filename("tps_setup.nc"), "r")
self.network = old_store.networks[0]
tps_ensemble=self.network.sampling_ensembles[0]
self.shoot = oink.ShootingStub(tps_ensemble, pre_joined=False)
self.converter = StupidOneWayTPSConverter(
storage=paths.Storage(self.data_filename("output.nc"), "w"),
initial_file="file0.data",
mover=self.shoot,
network=self.network,
options=oink.TPSConverterOptions(includes_shooting_point=False,
trim=False)
)
old_store.close()
def teardown(self):
try:
self.converter.storage.close()
except RuntimeError:
pass # test_run closes this already
if os.path.isfile(self.data_filename("output.nc")):
os.remove(self.data_filename("output.nc"))
def test_initial_extra_frames_fw_bw(self):
converter = StupidOneWayTPSConverter(
storage=None,
initial_file="file0_extra.data",
mover=self.shoot,
network=self.network
)
assert_equal(converter.extra_bw_frames, 3)
assert_equal(converter.extra_fw_frames, 4)
def _standard_summary_line_check(self, summary_file, converter):
summary = open(self.data_filename(summary_file), "r")
lines = [l for l in summary]
moves = common.tps_shooting_moves
for line, move in zip(lines, moves):
parsed_line = converter.parse_summary_line(line)
assert_equal(parsed_line[0], move[0]) # replicas
assert_array_almost_equal(parsed_line[1].coordinates,
move[4].coordinates) # trajectories
assert_equal(parsed_line[2], move[2]) # shooting points
assert_equal(parsed_line[3], move[3]) # acceptance
assert_equal(parsed_line[4], move[5]) # directions
def test_parse_summary_line(self):
self._standard_summary_line_check(
summary_file="summary.txt",
converter=self.converter
)
def test_parse_summary_line_extras(self):
extras_converter = StupidOneWayTPSConverter(
storage=None,
initial_file="file0_extra.data",
mover=self.shoot,
network=self.network,
options=oink.TPSConverterOptions(trim=True,
auto_reverse=True,
includes_shooting_point=True)
)
self._standard_summary_line_check(
summary_file="summary_extra.txt",
converter=extras_converter
)
def test_parse_summary_line_full_trajectory(self):
full_converter = StupidOneWayTPSConverter(
storage=None,
initial_file="file0.data",
mover=self.shoot,
network=self.network,
options=oink.TPSConverterOptions(trim=False,
auto_reverse=False,
full_trajectory=True)
)
self._standard_summary_line_check(
summary_file="summary_full.txt",
converter=full_converter
)
def test_parse_summary_line_options_rejected(self):
options_rejected_converter = StupidOneWayTPSConverter(
storage=None,
initial_file="file0.data",
mover=self.shoot,
network=self.network,
options=oink.TPSConverterOptions(trim=False,
auto_reverse=False,
full_trajectory=True),
options_rejected=oink.TPSConverterOptions(trim=False,
auto_reverse=True,
full_trajectory=False)
)
self._standard_summary_line_check(
summary_file="summary_full_accepted.txt",
converter=options_rejected_converter
)
def test_parse_summary_line_retrim_shooting_partial_accepted(self):
retrim_shooting_converter = StupidOneWayTPSConverter(
storage=None,
initial_file="file0_extra.data",
mover=self.shoot,
network=self.network,
options=oink.TPSConverterOptions(trim=True,
retrim_shooting=True,
auto_reverse=True,
includes_shooting_point=True,
full_trajectory=False)
)
self._standard_summary_line_check(
summary_file="summary_extra_retrim.txt",
converter=retrim_shooting_converter
)
def test_parse_summary_line_retrim_shooting_full_accepted(self):
retrim_shooting_converter = StupidOneWayTPSConverter(
storage=None,
initial_file="file0_extra.data",
mover=self.shoot,
network=self.network,
options=oink.TPSConverterOptions(trim=True,
retrim_shooting=True,
auto_reverse=False,
full_trajectory=True)
)
self._standard_summary_line_check(
summary_file="summary_full_retrim.txt",
converter=retrim_shooting_converter
)
def test_default_options(self):
converter = StupidOneWayTPSConverter(
storage=None,
initial_file="file0.data",
mover=oink.ShootingStub(self.network.sampling_ensembles[0]),
network=self.network
)
assert_equal(converter.options.trim, True)
assert_equal(converter.options.retrim_shooting, False)
assert_equal(converter.options.auto_reverse, False)
assert_equal(converter.options.includes_shooting_point, True)
assert_equal(converter.options.full_trajectory, False)
def _standard_analysis_checks(self, analysis):
# next is same as test_simulation_stubs (move to a common test?)
assert_equal(len(analysis.steps), 5) # initial + 4 steps
scheme = analysis.schemes[0]
assert_equal(list(scheme.movers.keys()), ['shooting'])
assert_equal(len(scheme.movers['shooting']), 1)
mover = scheme.movers['shooting'][0]
# use several OPS tools to analyze this file
## scheme.move_summary
devnull = open(os.devnull, 'w')
scheme.move_summary(analysis.steps, output=devnull)
mover_keys = [k for k in scheme._mover_acceptance._trials.keys()
if k[0] == mover]
assert_equal(len(mover_keys), 1)
assert_equal(scheme._mover_acceptance._trials[mover_keys[0]], 4)
assert_equal(scheme._mover_acceptance._accepted[mover_keys[0]], 3)
# assert_equal(scheme._mover_acceptance[mover_keys[0]], [3,4])
## move history tree
import openpathsampling.visualize as ops_vis
history = ops_vis.PathTree(
analysis.steps,
ops_vis.ReplicaEvolution(replica=0)
)
assert_equal(len(history.generator.decorrelated_trajectories), 2)
## path length histogram
path_lengths = [len(step.active[0].trajectory)
for step in analysis.steps]
assert_equal(path_lengths, [11, 9, 7, 7, 7])
def test_run(self):
self.converter.run(self.data_filename("summary.txt"))
self.converter.storage.close()
analysis = paths.AnalysisStorage(self.data_filename("output.nc"))
self._standard_analysis_checks(analysis)
analysis.close()
def test_run_with_negative_shooting_point(self):
shoot = oink.ShootingStub(self.network.sampling_ensembles[0],
pre_joined=False)
converter = StupidOneWayTPSConverter(
storage=paths.Storage(self.data_filename("neg_sp.nc"), "w"),
initial_file="file0.data",
mover=shoot,
network=self.network,
options=oink.TPSConverterOptions(includes_shooting_point=False,
trim=False)
)
converter.run(self.data_filename("summary_neg_sp.txt"))
converter.storage.close()
analysis = paths.AnalysisStorage(self.data_filename("neg_sp.nc"))
self._standard_analysis_checks(analysis)
analysis.close()
if os.path.isfile(self.data_filename("neg_sp.nc")):
os.remove(self.data_filename("neg_sp.nc"))
def test_run_with_neg_sp_retrim(self):
storage_file = self.data_filename("retrim_negsp.nc")
storage = paths.Storage(storage_file, 'w')
converter = StupidOneWayTPSConverter(
storage=storage,
initial_file="file0_extra.data",
mover=self.shoot,
network=self.network,
options=oink.TPSConverterOptions(trim=True,
retrim_shooting=True,
auto_reverse=False,
full_trajectory=True)
)
converter.run(self.data_filename("summary_full_retrim_negsp.txt"))
storage.close()
analysis = paths.AnalysisStorage(storage_file)
step4 = analysis.steps[4]
self._standard_analysis_checks(analysis)
analysis.close()
if os.path.isfile(storage_file):
os.remove(storage_file)
class TestGromacsOneWayTPSConverter(object):
def setup(self):
from openpathsampling.engines.openmm.tools import ops_load_trajectory
if not HAS_MDTRAJ:
raise SkipTest("Missing MDTraj")
test_dir = "gromacs_1way"
self.data_filename = lambda f : \
data_filename(os.path.join(test_dir, f))
topology_file = self.data_filename("dna.gro")
initial_file = self.data_filename("initial.xtc")
init_traj = ops_load_trajectory(initial_file, top=topology_file)
self.network = self._wc_hg_TPS_network(init_traj.topology)
acc_options = oink.TPSConverterOptions(trim=True,
retrim_shooting=True,
auto_reverse=False,
includes_shooting_point=True,
full_trajectory=True)
rej_options = oink.TPSConverterOptions(trim=True,
retrim_shooting=True,
auto_reverse=True,
includes_shooting_point=True,
full_trajectory=False)
storage = paths.Storage(self.data_filename("gromacs.nc"), 'w')
# initialization includes smoke test of load_trajectory
self.converter = oink.GromacsOneWayTPSConverter(
storage=storage,
network=self.network,
initial_file=initial_file,
topology_file=topology_file,
options=acc_options,
options_rejected=rej_options
)
self.converter.report_progress = sys.stdout
self.converter.n_trajs_per_block = 1
def teardown(self):
self.converter.storage.close()
if os.path.exists(self.data_filename("gromacs.nc")):
os.remove(self.data_filename("gromacs.nc"))
def _wc_hg_TPS_network(self, topology):
# separated for readability, not re-usability
d_WC = paths.MDTrajFunctionCV("d_WC", md.compute_distances,
topology, atom_pairs=[[275, 494]])
d_HG = paths.MDTrajFunctionCV("d_HG", md.compute_distances,
topology, atom_pairs=[[275, 488]])
d_bp = paths.MDTrajFunctionCV("d_bp", md.compute_distances,
topology, atom_pairs=[[274, 491]])
state_WC = (paths.CVDefinedVolume(d_WC, 0.0, 0.35) &
paths.CVDefinedVolume(d_bp, 0.0, 0.35)).named("WC")
state_HG = (paths.CVDefinedVolume(d_HG, 0.0, 0.35) &
paths.CVDefinedVolume(d_bp, 0.0, 0.35)).named("HG")
network = paths.TPSNetwork(state_WC, state_HG)
return network
def test_options_setup(self):
assert_equal(self.converter.options.full_trajectory, True)
assert_equal(self.converter.options_rejected.full_trajectory, False)
def test_run(self):
self.converter.run(self.data_filename("summary.txt"))
|
dwhswenson/OPSPiggybacker
|
ops_piggybacker/tests/test_one_way_tps_converters.py
|
Python
|
lgpl-2.1
| 14,133
|
[
"Gromacs",
"MDTraj",
"OpenMM"
] |
a9691d0481fbbfa4946b030a087028c469c419e8fdb68bf5a448e9a5f7e2c26e
|
def main():
"""
Takes in an existing bad pixel mask, and additionally masks out inner radii of galaxy
Version history
2013.10.17 - Eric Gentry - initial draft completed
"""
from astropy.io import fits
import numpy as np
###
main_dir = '/home/egentry/Data/HST/PictorA/bad_pixel_masks/'
mask_filename_in = 'f160w_drz_mask.fits'
mask_filename_out = 'f160w_drz_mask_OuterRadii.fits'
###
mask_in_hdulist = fits.open(main_dir + mask_filename_in)
mask_in_data = mask_in_hdulist[0].data
mask_size_x = mask_in_data.shape[1]
mask_size_y = mask_in_data.shape[0]
#Center region to crop out:
center_radius = 75 # pixels
center_x = 758 #pixel, zero-indexed
center_y = 265 #pixel, zero-indexed
mask_out_data = np.zeros( mask_in_data.shape )
for x_i in xrange(mask_size_x):
for y_i in xrange(mask_size_y):
if (center_x - x_i)**2 + (center_y - y_i)**2 < center_radius**2:
mask_out_data[y_i][x_i] = 1
else:
mask_out_data[y_i][x_i] = mask_in_data[y_i][x_i]
mask_out_hdulist = fits.PrimaryHDU(mask_out_data)
mask_out_hdulist.writeto(main_dir + mask_filename_out, clobber=True)
|
egentry/Galaxy_Subtraction
|
Create_OuterRadii_mask.py
|
Python
|
gpl-2.0
| 1,291
|
[
"Galaxy"
] |
d9b3abed6a7e39cc0ec0331f22641f197a0037e69b12bbd12ce92f115914037c
|
tests=[
("testExecs/test1.exe","",{}),
("testExecs/testMolSupplier.exe","",{}),
("testExecs/testMolWriter.exe","",{}),
("testExecs/testTplParser.exe","",{}),
("testExecs/testMol2ToMol.exe","",{}),
]
longTests=[]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
soerendip42/rdkit
|
Code/GraphMol/FileParsers/test_list.py
|
Python
|
bsd-3-clause
| 380
|
[
"RDKit"
] |
1205d9a3c9094f94767a09bacace7af341d62d3669c03842070ae103b4cc6578
|
#!/usr/bin/python
# from https://github.com/jameslz/benchmark
import sys
if len(sys.argv) != 4:
print "\nUsage: %s <alignment> <evalue> <bit_score>\n" % sys.argv[0]
sys.exit(1)
blast = sys.argv[1]
max_evalue = float(sys.argv[2])
min_bit_score = float(sys.argv[3])
with open(blast, 'r') as fp:
init = ""
for line in fp:
if not line.startswith("#"):
item = line.strip().split("\t")
evalue = float(item[10])
bit_score = float(item[11])
if init != item[0]:
if evalue <= max_evalue and bit_score >= min_bit_score:
print line.strip()
init = item[0]
|
DawnEve/bio_scripts
|
blast/blast_best_hit_outfmt6.py
|
Python
|
mit
| 703
|
[
"BLAST"
] |
0b0ae42fc6cdae4659aee9c88174b62345c0eb946c0f44cebd50f3245f7592c4
|
'''
* Author: Lukasz Jachym
* Date: 9/14/13
* Time: 3:43 PM
*
* This work is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License.
* To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/.
'''
from collections import namedtuple
import csv
from dateutil.parser import *
from itertools import groupby
import operator
import sys
from classifier import parse_source
import exchange
import locale
class VisitRecord(object):
def __init__(self, visit_date, user_id, visit_id, source, order_id):
self.visit_date = visit_date
self.user_id = user_id
self.visit_id = visit_id
channel = parse_source(source)
self.source_type = channel.type
self.source_branded = channel.branded
self.order_id = order_id
OrderRecord = namedtuple('OrderRecord', ['order_date', 'country', 'order_id', 'value', 'currency', 'product_category', 'source_type', 'source_branded'])
def get_csv_data(path):
content = []
with open(path, 'rb') as csv_in:
reader = csv.DictReader(csv_in, delimiter=';', quotechar='"')
for item in reader:
content.append(item)
return content
def parse_visitors(path, for_date=None):
visitors_raw = get_csv_data(path)
all_visitors = []
visitors_with_orders = {}
# "VisitDate";"UserID";"VisitID";"Source";"OrderID"
for visitor in visitors_raw:
date_formatted = parse(visitor['VisitDate']).strftime("%Y-%m-%d")
order_id = visitor['OrderID']
visit_record = VisitRecord(date_formatted, visitor['UserID'], visitor['VisitID'], visitor['Source'], order_id)
visitors_with_orders[order_id] = visit_record
if not for_date:
all_visitors.append(visit_record)
else:
if for_date == date_formatted:
all_visitors.append(visit_record)
return all_visitors, visitors_with_orders
def parse_orders(path, visitors_with_orders_dict, for_date=None):
orders_raw = get_csv_data(path)
orders = []
locale_map = { 'UK': 'en_GB',
'FR': 'eu_FR',
'ES': 'eu_ES',
'IT': 'it_IT',
'US': 'en_US',
'EUR': 'eu_FR',
'GBP': 'en_GB',
'USD': 'en_US',
}
# "OrderDate";"Country";"OrderId";"Value";"Currency";"ProductCategory"
for order in orders_raw:
date_formatted = parse(order['OrderDate']).strftime("%Y-%m-%d")
country = order['Country']
order_id = order['OrderId']
currency = order['Currency']
prod_category = order['ProductCategory']
value = 0
try:
country_locale = '%s.UTF-8' % locale_map[country]
locale.setlocale( locale.LC_ALL, country_locale)
value = locale.atoi(order['Value'])
except locale.Error:
print('Install locale %s' % country_locale)
# fallback, hold tight!
except ValueError:
value = exchange.str_to_int(order['Value'])
assert value > 0, 'Could not convert transaction value of record: %s' % order
try:
source_type = visitors_with_orders_dict[order_id].source_type
source_branded = visitors_with_orders_dict[order_id].source_branded
except KeyError:
print('Cannot find order: %s, ignoring, expected inconsistencies.' % order_id)
source_type = None
source_branded = None
pass
order_record = OrderRecord(date_formatted, country, order_id, value, currency, prod_category, source_type, source_branded)
if not for_date:
orders.append(order_record)
else:
if for_date == date_formatted:
orders.append(order_record)
return orders
def group_collection_by_key(collection, key):
groups_by_channel_type = {}
sorted_x = sorted(collection, key=key)
for k, g in groupby(sorted_x, key):
groups_by_channel_type[k] = list(g)
return groups_by_channel_type
labels = {(3, False): 'PPC_NonBrand',
(2, False): 'SEO_NonBrand',
(1, False): 'Direct',
(3, True): 'PPC_Brand',
(2, True): 'SEO_Brand',
(None, None): 'Unknown'
}
def print_visitors_summary(groups_by_channel_type):
print('Visitors:')
for channel_type in groups_by_channel_type:
visitors_by_channel_type = groups_by_channel_type[channel_type]
by_user_id = lambda obj: obj.user_id
unique_user_ids = set(map(by_user_id, visitors_by_channel_type))
print('Channel: %s - %d Users, %d Visits' % (
labels[channel_type], len(unique_user_ids), len(visitors_by_channel_type)))
assert len(unique_user_ids) <= len(visitors_by_channel_type)
def print_order_summary(orders_by_key, total_orders_value, currency):
print('Orders:')
for order_type in orders_by_key:
print('%s - %s - %s = %d orders, %d %s' % (order_type[0], order_type[1], labels[order_type[2:4]], len(orders_by_key[order_type]), total_orders_value[order_type], currency))
print('')
date = sys.argv[1]
currency = sys.argv[2]
# print('Parsing data...')
visitors, visitors_with_orders = parse_visitors('../../data/task1/visitors.txt', date)
orders = parse_orders('../../data/task1/orders.txt', visitors_with_orders, date)
# print('Transforming views...')
by_src_type = lambda container: (container.source_type, container.source_branded)
visitors_by_channel_type = group_collection_by_key(visitors, by_src_type)
by_country_product_channel = lambda container: (container.country, container.product_category, container.source_type, container.source_branded)
orders_by_key = group_collection_by_key(orders, by_country_product_channel)
total_orders_values = {}
exchange_rates_api = exchange.OpenExchangeClient('1aecd98b5fd9489fac736c2345379bcf')
exchange_rates = exchange_rates_api.get_historical_rates(date)
for key in orders_by_key:
selected_orders = orders_by_key[key]
total_orders_value = 0
for order in selected_orders:
order_value = exchange.convert(order.value, order.currency, currency, exchange_rates)
total_orders_value += order_value
total_orders_values[key] = total_orders_value
print_order_summary(orders_by_key, total_orders_values, currency)
print_visitors_summary(visitors_by_channel_type)
|
b1r3k/recruitment-challanges
|
data-hacking/src/task1/task1_crunchstats.py
|
Python
|
mit
| 6,464
|
[
"VisIt"
] |
4ff492d176621d93b3f76a93dabd619d27e3be7150677a6b3976c783719a5cf6
|
import numpy as np
from scipy import interpolate
from progressbar import ProgressBar, Bar, Percentage
class ImpulseResponseFunction(object):
'''Internal bemio object to contain impulse response function (IRF) data
'''
pass
class WaveElevationTimeSeries(object):
'''Internal bemio object to contain wave elevation time series data
'''
pass
class WaveExcitationForce(object):
'''Internal bemio object to contain wave excitation force data
'''
pass
class WaveExcitationConvolution(object):
'''
Object for calculating wave excitation force time history using the
convolution method
Parameters:
irf : np.array
Wave excitation force impulse response function.
irf_t : np.array
Time series corresponding to `irf`
eta : np.array
Wave elevation time series
eta_t : np.array
Time series corresponding to `eta`
Attribuites:
self.irf : ImpulseResponseFunction
Object containing excitation force IRF information
self.wave_elevation : WaveElevationTimeSeries
Object containing wave elevation time series data
self.excitation_force : WaveExcitationForce
Object containing wave excitation force data
'''
def __init__(self, irf, irf_t, eta, eta_t):
self.irf = ImpulseResponseFunction()
self.wave_elevation = WaveElevationTimeSeries()
self.excitation_force = WaveExcitationForce()
self.irf.f = irf
self.irf.t = irf_t
self.wave_elevation.eta = eta
self.wave_elevation.t = eta_t
self.wave_elevation.dt = self.wave_elevation.t[1] - self.wave_elevation.t[0]
self._excitation_convolution()
def _excitation_convolution(self):
'''Internal function to perform the wave excitation convolution
'''
eta_interp = interpolate.interp1d(x=self.wave_elevation.t, y=self.wave_elevation.eta, bounds_error=False, fill_value=0.)
irf_interp = interpolate.interp1d(x=self.irf.t, y=self.irf.f, bounds_error=False, fill_value=0.)
# Interpolate the IRF to the dt as the wave elevation data
irf = irf_interp(np.linspace(self.irf.t.min(),self.irf.t.max(),(self.irf.t.max()-self.irf.t.min())/self.wave_elevation.dt+1))
# Assume that the IRF dt is used unless specified by the user
# if self.excitation_force.dt is None:
# self.excitation_force.dt = self.irf.t[1] - self.irf.t[0]
# This code caluclates the wave excitation force manually - the below method that uses the convolve function is much more efficient
# self.excitation_force.t = np.linspace(self.wave_elevation.t.min(), self.wave_elevation.t.max(), (self.wave_elevation.t.max()-self.wave_elevation.t.min())/self.excitation_force.dt+1)
# pbar_max_val = self.excitation_force.t.max()
# pbar = ProgressBar(widgets=['Calculating the excitation force time history:', Percentage(), Bar()], maxval=pbar_max_val).start()
# f_ex = []
# for t in self.excitation_force.t:
# f_ex.append(np.trapz(y=irf_interp(self.irf.t)*eta_interp(t-self.irf.t),x=self.irf.t))
#
# pbar.update(t)
# pbar.finish()
f_ex_conv = np.convolve(self.wave_elevation.eta, irf, mode='same')*self.wave_elevation.dt
self.excitation_force.f = np.array(f_ex_conv)
self.excitation_force.t = self.wave_elevation.t
def convolution(irf, irf_t, eta, eta_t, dt=None):
'''
Function to calculate wave excitation force using the convolution method
Patrameters:
irf : np.array
Wave excitation force impulse response function.
irf_t : np.array
Time series corresponding to `irf`
eta : np.array
Wave elevation time series
eta_t : np.array
Time series corresponding to `eta`
dt : float, optional
Time step for calculating the
Returns:
excitation_force : WaveExcitationConvolution
This function returns a `WaveExcitationConvolution` object with
the wave exciting force and other information. See the
`WaveExcitationConvolution` for more information.
Example:
The following example assumes that variables `irf`, `irf_t`, `eta`, and
`eta_t` of type type(np.array) exist in the workspace. The contents of
these variables are described above.
Calculate excitation force using the convolution method
>>> ex = convolution(irf=irf, irf_t=irf_t, eta=eta, eta_t=eta_t)
Plot the data
>>> plt.figure()
>>> plt.plot(ex.excitation_force.t,ex.excitation_force.f)
'''
excitation_force = WaveExcitationConvolution(irf, irf_t, eta, eta_t)
return excitation_force
|
NREL/OpenWARP
|
source/automated_test/bemio/data_structures/wave_excitation.py
|
Python
|
apache-2.0
| 4,833
|
[
"exciting"
] |
1ca4e4a98c7d2b1ede7358ab4873935ebb08cbeeb7594e7c5b93423602ab309f
|
'''
NOTE: This is adapted from
http://sociograph.blogspot.com/2011/11/scalable-mean-shift-clustering-in-few.html
'''
import numpy as np
from sklearn.neighbors import BallTree, NearestNeighbors
from sklearn.utils import extmath
from sklearn.metrics.pairwise import euclidean_distances
from collections import defaultdict
def mean_shift(X, bandwidth, n_seeds, kernel_function='gaussian', max_iterations=100, proximity_thresh=5):
'''
---Parameters---
X : data in form (samples, dims)
bandwidth : radius of nearest neighbors
n_seeds :
kernel_update_function : can be "gaussian" or "flat" or your own kernel
proximity_thresh : minimum distance (in pixels) a new cluster must be away from previous ones
---Returns---
cluster_centers :
cluster_counts : how many pixels are with the neighborhood of each cluster
'''
import numpy as np
from sklearn.neighbors import BallTree, NearestNeighbors
from sklearn.utils import extmath
from sklearn.metrics.pairwise import euclidean_distances
from collections import defaultdict
if kernel_function == 'gaussian':
kernel_update_function = gaussian_kernel
elif kernel_function == 'flat':
kernel_update_function = flat_kernel
else:
kernel_update_function = kernel_function
n_points, n_features = X.shape
stop_thresh = 1e-2 * bandwidth # when mean has converged
cluster_centers = []
cluster_counts = []
# ball_tree = BallTree(X)# to efficiently look up nearby points
neighbors = NearestNeighbors(radius=bandwidth).fit(X)
seeds = X[(np.random.uniform(0,X.shape[0], n_seeds)).astype(np.int)]
# For each seed, climb gradient until convergence or max_iterations
for weighted_mean in seeds:
completed_iterations = 0
while True:
points_within = X[neighbors.radius_neighbors([weighted_mean], bandwidth, return_distance=False)[0]]
old_mean = weighted_mean # save the old mean
weighted_mean = kernel_update_function(old_mean, points_within, bandwidth)
converged = extmath.norm(weighted_mean - old_mean) < stop_thresh
if converged or completed_iterations == max_iterations:
# Only add cluster if it's different enough from other centers
if len(cluster_centers) > 0:
diff_from_prev = [np.linalg.norm(weighted_mean-cluster_centers[i], 2) for i in range(len(cluster_centers))]
if np.min(diff_from_prev) > proximity_thresh:
cluster_centers.append(weighted_mean)
cluster_counts.append(points_within.shape[0])
else:
cluster_centers.append(weighted_mean)
cluster_counts.append(points_within.shape[0])
break
completed_iterations += 1
return cluster_centers, cluster_counts
def gaussian_kernel(x, points, bandwidth):
from sklearn.metrics.pairwise import euclidean_distances
distances = euclidean_distances(points, x)
weights = np.exp(-1 * (distances ** 2 / bandwidth ** 2))
return np.sum(points * weights, axis=0) / np.sum(weights)
def flat_kernel(x, points, bandwidth):
return np.mean(points, axis=0)
def bin_points(X, bin_size, min_bin_freq):
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.cast[np.int32](point / bin_size)
bin_sizes[tuple(binned_point)] += 1
bin_seeds = np.array([point for point, freq in bin_sizes.iteritems() if freq >= min_bin_freq], dtype=np.float32)
bin_seeds = bin_seeds * bin_size
return bin_seeds
|
colincsl/pyKinectTools
|
pyKinectTools/algs/MeanShift.py
|
Python
|
bsd-2-clause
| 3,990
|
[
"Gaussian"
] |
5a8f764fd76794e00edb7e8b032075888c0800d6d2dd04b9e8a4b6f3e0a02a8e
|
# -*- coding: utf-8 -*-
#
# synaptic_elements.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'naveau'
import nest
import unittest
class TestSynapticElements(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
def test_set_status(self):
synaptic_element_dict = {u'SE': {u'z': 15.0, u'growth_curve': u'linear'}}
neuron = nest.Create('iaf_neuron', 1)
nest.SetStatus(neuron, {'synaptic_elements': synaptic_element_dict})
neuron_synaptic_elements = nest.GetStatus(neuron, 'synaptic_elements')[0]
self.assertIn('SE', neuron_synaptic_elements)
self.assertDictContainsSubset(synaptic_element_dict[u'SE'], neuron_synaptic_elements[u'SE'])
def test_set_status_overwrite(self):
synaptic_element_dict1 = {u'SE1': {u'z': 15.0, u'growth_curve': u'linear'}}
synaptic_element_dict2 = {u'SE2': {u'z': 10.0, u'growth_curve': u'gaussian'}}
neuron = nest.Create('iaf_neuron', 1)
nest.SetStatus(neuron, {'synaptic_elements': synaptic_element_dict1})
nest.SetStatus(neuron, {'synaptic_elements': synaptic_element_dict2})
neuron_synaptic_elements = nest.GetStatus(neuron, 'synaptic_elements')[0]
self.assertNotIn('SE1', neuron_synaptic_elements)
self.assertIn('SE2', neuron_synaptic_elements)
self.assertDictContainsSubset(synaptic_element_dict2[u'SE2'], neuron_synaptic_elements[u'SE2'])
def test_set_defaults(self):
synaptic_element_dict = {u'SE': {u'z': 15.0, u'growth_curve': u'linear'}}
nest.SetDefaults('iaf_neuron', {'synaptic_elements': synaptic_element_dict})
neuron = nest.Create('iaf_neuron', 1)
neuron_synaptic_elements = nest.GetStatus(neuron, 'synaptic_elements')[0]
self.assertIn('SE', neuron_synaptic_elements)
self.assertDictContainsSubset(synaptic_element_dict[u'SE'], neuron_synaptic_elements[u'SE'])
def test_set_defaults_overwrite(self):
synaptic_element_dict1 = {u'SE1': {u'z': 15.0, u'growth_curve': u'linear'}}
synaptic_element_dict2 = {u'SE2': {u'z': 10.0, u'growth_curve': u'gaussian'}}
nest.SetDefaults('iaf_neuron', {'synaptic_elements': synaptic_element_dict1})
nest.SetDefaults('iaf_neuron', {'synaptic_elements': synaptic_element_dict2})
neuron = nest.Create('iaf_neuron', 1)
neuron_synaptic_elements = nest.GetStatus(neuron, 'synaptic_elements')[0]
self.assertNotIn('SE1', neuron_synaptic_elements)
self.assertIn('SE2', neuron_synaptic_elements)
self.assertDictContainsSubset(synaptic_element_dict2[u'SE2'], neuron_synaptic_elements[u'SE2'])
def suite():
test_suite = unittest.makeSuite(TestSynapticElements, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
|
zifeo/nest-simulator
|
pynest/nest/tests/test_msp/synaptic_elements.py
|
Python
|
gpl-2.0
| 3,443
|
[
"Gaussian",
"NEURON"
] |
930b22ef5c45bc255e25b717484e2b449b8562a632e5028c3012a4da3271e1a9
|
import os
import pprint
import sys
import textwrap
import py
import pytest
from _pytest.config import ExitCode
from _pytest.main import _in_venv
from _pytest.main import Session
from _pytest.pytester import Testdir
class TestCollector:
def test_collect_versus_item(self):
from pytest import Collector, Item
assert not issubclass(Collector, Item)
assert not issubclass(Item, Collector)
def test_check_equality(self, testdir: Testdir) -> None:
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert isinstance(fn, pytest.Function)
assert fn != 3 # type: ignore[comparison-overlap] # noqa: F821
assert fn != modcol
assert fn != [1, 2, 3] # type: ignore[comparison-overlap] # noqa: F821
assert [1, 2, 3] != fn # type: ignore[comparison-overlap] # noqa: F821
assert modcol != fn
assert testdir.collect_by_name(modcol, "doesnotexist") is None
def test_getparent(self, testdir):
modcol = testdir.getmodulecol(
"""
class TestClass(object):
def test_foo():
pass
"""
)
cls = testdir.collect_by_name(modcol, "TestClass")
fn = testdir.collect_by_name(testdir.collect_by_name(cls, "()"), "test_foo")
parent = fn.getparent(pytest.Module)
assert parent is modcol
parent = fn.getparent(pytest.Function)
assert parent is fn
parent = fn.getparent(pytest.Class)
assert parent is cls
def test_getcustomfile_roundtrip(self, testdir):
hello = testdir.makefile(".xxx", hello="world")
testdir.makepyfile(
conftest="""
import pytest
class CustomFile(pytest.File):
pass
def pytest_collect_file(path, parent):
if path.ext == ".xxx":
return CustomFile.from_parent(fspath=path, parent=parent)
"""
)
node = testdir.getpathnode(hello)
assert isinstance(node, pytest.File)
assert node.name == "hello.xxx"
nodes = node.session.perform_collect([node.nodeid], genitems=False)
assert len(nodes) == 1
assert isinstance(nodes[0], pytest.File)
def test_can_skip_class_with_test_attr(self, testdir):
"""Assure test class is skipped when using `__test__=False` (See #2007)."""
testdir.makepyfile(
"""
class TestFoo(object):
__test__ = False
def __init__(self):
pass
def test_foo():
assert True
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["collected 0 items", "*no tests ran in*"])
class TestCollectFS:
def test_ignored_certain_directories(self, testdir):
tmpdir = testdir.tmpdir
tmpdir.ensure("build", "test_notfound.py")
tmpdir.ensure("dist", "test_notfound.py")
tmpdir.ensure("_darcs", "test_notfound.py")
tmpdir.ensure("CVS", "test_notfound.py")
tmpdir.ensure("{arch}", "test_notfound.py")
tmpdir.ensure(".whatever", "test_notfound.py")
tmpdir.ensure(".bzr", "test_notfound.py")
tmpdir.ensure("normal", "test_found.py")
for x in tmpdir.visit("test_*.py"):
x.write("def test_hello(): pass")
result = testdir.runpytest("--collect-only")
s = result.stdout.str()
assert "test_notfound" not in s
assert "test_found" in s
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
testdir.tmpdir.ensure("virtual", bindir, fname)
testfile = testdir.tmpdir.ensure("virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
# by default, ignore tests inside a virtualenv
result = testdir.runpytest()
result.stdout.no_fnmatch_line("*test_invenv*")
# allow test collection if user insists
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" in result.stdout.str()
# allow test collection if user directly passes in the directory
result = testdir.runpytest("virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname):
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# norecursedirs takes priority
testdir.tmpdir.ensure(".virtual", bindir, fname)
testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
result = testdir.runpytest("--collect-in-virtualenv")
result.stdout.no_fnmatch_line("*test_invenv*")
# ...unless the virtualenv is explicitly given on the CLI
result = testdir.runpytest("--collect-in-virtualenv", ".virtual")
assert "test_invenv" in result.stdout.str()
@pytest.mark.parametrize(
"fname",
(
"activate",
"activate.csh",
"activate.fish",
"Activate",
"Activate.bat",
"Activate.ps1",
),
)
def test__in_venv(self, testdir, fname):
"""Directly test the virtual env detection function"""
bindir = "Scripts" if sys.platform.startswith("win") else "bin"
# no bin/activate, not a virtualenv
base_path = testdir.tmpdir.mkdir("venv")
assert _in_venv(base_path) is False
# with bin/activate, totally a virtualenv
base_path.ensure(bindir, fname)
assert _in_venv(base_path) is True
def test_custom_norecursedirs(self, testdir):
testdir.makeini(
"""
[pytest]
norecursedirs = mydir xyz*
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass")
tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0")
tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass")
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
rec = testdir.inline_run("xyz123/test_2.py")
rec.assertoutcome(failed=1)
def test_testpaths_ini(self, testdir, monkeypatch):
testdir.makeini(
"""
[pytest]
testpaths = gui uts
"""
)
tmpdir = testdir.tmpdir
tmpdir.ensure("env", "test_1.py").write("def test_env(): pass")
tmpdir.ensure("gui", "test_2.py").write("def test_gui(): pass")
tmpdir.ensure("uts", "test_3.py").write("def test_uts(): pass")
# executing from rootdir only tests from `testpaths` directories
# are collected
items, reprec = testdir.inline_genitems("-v")
assert [x.name for x in items] == ["test_gui", "test_uts"]
# check that explicitly passing directories in the command-line
# collects the tests
for dirname in ("env", "gui", "uts"):
items, reprec = testdir.inline_genitems(tmpdir.join(dirname))
assert [x.name for x in items] == ["test_%s" % dirname]
# changing cwd to each subdirectory and running pytest without
# arguments collects the tests in that directory normally
for dirname in ("env", "gui", "uts"):
monkeypatch.chdir(testdir.tmpdir.join(dirname))
items, reprec = testdir.inline_genitems()
assert [x.name for x in items] == ["test_%s" % dirname]
class TestCollectPluginHookRelay:
def test_pytest_collect_file(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_file(self, path):
if not path.basename.startswith("."):
# Ignore hidden files, e.g. .testmondata.
wascalled.append(path)
testdir.makefile(".abc", "xyz")
pytest.main([testdir.tmpdir], plugins=[Plugin()])
assert len(wascalled) == 1
assert wascalled[0].ext == ".abc"
@pytest.mark.filterwarnings("ignore:.*pytest_collect_directory.*")
def test_pytest_collect_directory(self, testdir):
wascalled = []
class Plugin:
def pytest_collect_directory(self, path):
wascalled.append(path.basename)
testdir.mkdir("hello")
testdir.mkdir("world")
pytest.main(testdir.tmpdir, plugins=[Plugin()])
assert "hello" in wascalled
assert "world" in wascalled
class TestPrunetraceback:
def test_custom_repr_failure(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyError(Exception):
pass
class MyFile(pytest.File):
def collect(self):
raise MyError()
def repr_failure(self, excinfo):
if excinfo.errisinstance(MyError):
return "hello world"
return pytest.File.repr_failure(self, excinfo)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*hello world*"])
@pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile(
"""
import not_exists
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_make_collect_report():
outcome = yield
rep = outcome.get_result()
rep.headerlines += ["header1"]
outcome.force_result(rep)
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*ERROR collecting*", "*header1*"])
class TestCustomConftests:
def test_ignore_collect_path(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return path.basename.startswith("x") or \
path.basename == "test_one.py"
"""
)
sub = testdir.mkdir("xy123")
sub.ensure("test_hello.py").write("syntax error")
sub.join("conftest.py").write("syntax error")
testdir.makepyfile("def test_hello(): pass")
testdir.makepyfile(test_one="syntax error")
result = testdir.runpytest("--fulltrace")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_ignore_collect_not_called_on_argument(self, testdir):
testdir.makeconftest(
"""
def pytest_ignore_collect(path, config):
return True
"""
)
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_collectignore_exclude_on_option(self, testdir):
testdir.makeconftest(
"""
collect_ignore = ['hello', 'test_world.py']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore[:] = []
"""
)
testdir.mkdir("hello")
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.no_fnmatch_line("*passed*")
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
def test_collectignoreglob_exclude_on_option(self, testdir):
testdir.makeconftest(
"""
collect_ignore_glob = ['*w*l[dt]*']
def pytest_addoption(parser):
parser.addoption("--XX", action="store_true", default=False)
def pytest_configure(config):
if config.getvalue("XX"):
collect_ignore_glob[:] = []
"""
)
testdir.makepyfile(test_world="def test_hello(): pass")
testdir.makepyfile(test_welt="def test_hallo(): pass")
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*collected 0 items*"])
result = testdir.runpytest("--XX")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
testdir.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule(path, parent)
"""
)
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
result = testdir.runpytest("--co")
result.stdout.fnmatch_lines(["*MyModule*", "*test_x*"])
def test_pytest_collect_file_from_sister_dir(self, testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
conf1 = testdir.makeconftest(
"""
import pytest
class MyModule1(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule1(path, parent)
"""
)
conf1.move(sub1.join(conf1.basename))
conf2 = testdir.makeconftest(
"""
import pytest
class MyModule2(pytest.Module):
pass
def pytest_collect_file(path, parent):
if path.ext == ".py":
return MyModule2(path, parent)
"""
)
conf2.move(sub2.join(conf2.basename))
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
result = testdir.runpytest("--co")
result.stdout.fnmatch_lines(["*MyModule1*", "*MyModule2*", "*test_x*"])
class TestSession:
def test_parsearg(self, testdir) -> None:
p = testdir.makepyfile("def test_func(): pass")
subdir = testdir.mkdir("sub")
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session.from_config(config)
assert rcol.fspath == subdir
fspath, parts = rcol._parsearg(p.basename)
assert fspath == target
assert len(parts) == 0
fspath, parts = rcol._parsearg(p.basename + "::test_func")
assert fspath == target
assert parts[0] == "test_func"
assert len(parts) == 1
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
# XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session.from_config(config)
assert topdir == rcol.fspath
# rootid = rcol.nodeid
# root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]
# assert root2 == rcol, rootid
colitems = rcol.perform_collect([rcol.nodeid], genitems=False)
assert len(colitems) == 1
assert colitems[0].fspath == p
def get_reported_items(self, hookrec):
"""Return pytest.Item instances reported by the pytest_collectreport hook"""
calls = hookrec.getcalls("pytest_collectreport")
return [
x
for call in calls
for x in call.report.result
if isinstance(x, pytest.Item)
]
def test_collect_protocol_single_function(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
(item,) = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
pprint.pprint(hookrec.calls)
topdir = testdir.tmpdir # noqa
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == topdir"),
("pytest_make_collect_report", "collector.fspath == topdir"),
("pytest_collectstart", "collector.fspath == p"),
("pytest_make_collect_report", "collector.fspath == p"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.result[0].name == 'test_func'"),
]
)
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_func"]
def test_collect_protocol_method(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
normid = p.basename + "::TestClass::test_method"
for id in [p.basename, p.basename + "::TestClass", normid]:
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 1
assert items[0].name == "test_method"
newid = items[0].nodeid
assert newid == normid
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
def test_collect_custom_nodes_multi_id(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
testdir.makeconftest(
"""
import pytest
class SpecialItem(pytest.Item):
def runtest(self):
return # ok
class SpecialFile(pytest.File):
def collect(self):
return [SpecialItem(name="check", parent=self)]
def pytest_collect_file(path, parent):
if path.basename == %r:
return SpecialFile(fspath=path, parent=parent)
"""
% p.basename
)
id = p.basename
items, hookrec = testdir.inline_genitems(id)
pprint.pprint(hookrec.calls)
assert len(items) == 2
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == collector.session.fspath"),
(
"pytest_collectstart",
"collector.__class__.__name__ == 'SpecialFile'",
),
("pytest_collectstart", "collector.__class__.__name__ == 'Module'"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith(p.basename)"),
]
)
assert len(self.get_reported_items(hookrec)) == 2
def test_collect_subdir_event_ordering(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
test_aaa = aaa.join("test_aaa.py")
p.move(test_aaa)
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid.startswith('aaa/test_aaa.py')"),
]
)
def test_collect_two_commandline_args(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
aaa = testdir.mkpydir("aaa")
bbb = testdir.mkpydir("bbb")
test_aaa = aaa.join("test_aaa.py")
p.copy(test_aaa)
test_bbb = bbb.join("test_bbb.py")
p.move(test_bbb)
id = "."
items, hookrec = testdir.inline_genitems(id)
assert len(items) == 2
pprint.pprint(hookrec.calls)
hookrec.assert_contains(
[
("pytest_collectstart", "collector.fspath == test_aaa"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"),
("pytest_collectstart", "collector.fspath == test_bbb"),
("pytest_pycollect_makeitem", "name == 'test_func'"),
("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"),
]
)
def test_serialization_byid(self, testdir):
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
(item,) = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
(item2,) = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
def test_find_byid_without_instance_parents(self, testdir):
p = testdir.makepyfile(
"""
class TestClass(object):
def test_method(self):
pass
"""
)
arg = p.basename + "::TestClass::test_method"
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
(item,) = items
assert item.nodeid.endswith("TestClass::test_method")
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
class Test_getinitialnodes:
def test_global_file(self, testdir, tmpdir):
x = tmpdir.ensure("x.py")
with tmpdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert isinstance(col, pytest.Module)
assert col.name == "x.py"
assert col.parent.parent is None
for col in col.listchain():
assert col.config is config
def test_pkgfile(self, testdir):
"""Verify nesting when a module is within a package.
The parent chain should match: Module<x.py> -> Package<subdir> -> Session.
Session's parent should always be None.
"""
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
subdir.ensure("__init__.py")
with subdir.as_cwd():
config = testdir.parseconfigure(x)
col = testdir.getnode(config, x)
assert col.name == "x.py"
assert isinstance(col, pytest.Module)
assert isinstance(col.parent, pytest.Package)
assert isinstance(col.parent.parent, pytest.Session)
# session is batman (has no parents)
assert col.parent.parent.parent is None
for col in col.listchain():
assert col.config is config
class Test_genitems:
def test_check_collect_hashes(self, testdir):
p = testdir.makepyfile(
"""
def test_1():
pass
def test_2():
pass
"""
)
p.copy(p.dirpath(p.purebasename + "2" + ".py"))
items, reprec = testdir.inline_genitems(p.dirpath())
assert len(items) == 4
for numi, i in enumerate(items):
for numj, j in enumerate(items):
if numj != numi:
assert hash(i) != hash(j)
assert i != j
def test_example_items1(self, testdir):
p = testdir.makepyfile(
"""
import pytest
def testone():
pass
class TestX(object):
def testmethod_one(self):
pass
class TestY(TestX):
@pytest.mark.parametrize("arg0", [".["])
def testmethod_two(self, arg0):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
assert len(items) == 4
assert items[0].name == "testone"
assert items[1].name == "testmethod_one"
assert items[2].name == "testmethod_one"
assert items[3].name == "testmethod_two[.[]"
# let's also test getmodpath here
assert items[0].getmodpath() == "testone"
assert items[1].getmodpath() == "TestX.testmethod_one"
assert items[2].getmodpath() == "TestY.testmethod_one"
# PR #6202: Fix incorrect result of getmodpath method. (Resolves issue #6189)
assert items[3].getmodpath() == "TestY.testmethod_two[.[]"
s = items[0].getmodpath(stopatmodule=False)
assert s.endswith("test_example_items1.testone")
print(s)
def test_class_and_functions_discovery_using_glob(self, testdir):
"""
tests that python_classes and python_functions config options work
as prefixes and glob-like patterns (issue #600).
"""
testdir.makeini(
"""
[pytest]
python_classes = *Suite Test
python_functions = *_test test
"""
)
p = testdir.makepyfile(
"""
class MyTestSuite(object):
def x_test(self):
pass
class TestCase(object):
def test_y(self):
pass
"""
)
items, reprec = testdir.inline_genitems(p)
ids = [x.getmodpath() for x in items]
assert ids == ["MyTestSuite.x_test", "TestCase.test_y"]
def test_matchnodes_two_collections_same_file(testdir):
testdir.makeconftest(
"""
import pytest
def pytest_configure(config):
config.pluginmanager.register(Plugin2())
class Plugin2(object):
def pytest_collect_file(self, path, parent):
if path.ext == ".abc":
return MyFile2(path, parent)
def pytest_collect_file(path, parent):
if path.ext == ".abc":
return MyFile1(path, parent)
class MyFile1(pytest.Item, pytest.File):
def runtest(self):
pass
class MyFile2(pytest.File):
def collect(self):
return [Item2("hello", parent=self)]
class Item2(pytest.Item):
def runtest(self):
pass
"""
)
p = testdir.makefile(".abc", "")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
res = testdir.runpytest("%s::hello" % p.basename)
res.stdout.fnmatch_lines(["*1 passed*"])
class TestNodekeywords:
def test_no_under(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
values = list(modcol.keywords)
assert modcol.name in values
for x in values:
assert not x.startswith("_")
assert modcol.name in repr(modcol.keywords)
def test_issue345(self, testdir):
testdir.makepyfile(
"""
def test_should_not_be_selected():
assert False, 'I should not have been selected to run'
def test___repr__():
pass
"""
)
reprec = testdir.inline_run("-k repr")
reprec.assertoutcome(passed=1, failed=0)
def test_keyword_matching_is_case_insensitive_by_default(self, testdir):
"""Check that selection via -k EXPRESSION is case-insensitive.
Since markers are also added to the node keywords, they too can
be matched without having to think about case sensitivity.
"""
testdir.makepyfile(
"""
import pytest
def test_sPeCiFiCToPiC_1():
assert True
class TestSpecificTopic_2:
def test(self):
assert True
@pytest.mark.sPeCiFiCToPic_3
def test():
assert True
@pytest.mark.sPeCiFiCToPic_4
class Test:
def test(self):
assert True
def test_failing_5():
assert False, "This should not match"
"""
)
num_matching_tests = 4
for expression in ("specifictopic", "SPECIFICTOPIC", "SpecificTopic"):
reprec = testdir.inline_run("-k " + expression)
reprec.assertoutcome(passed=num_matching_tests, failed=0)
COLLECTION_ERROR_PY_FILES = dict(
test_01_failure="""
def test_1():
assert False
""",
test_02_import_error="""
import asdfasdfasdf
def test_2():
assert True
""",
test_03_import_error="""
import asdfasdfasdf
def test_3():
assert True
""",
test_04_success="""
def test_4():
assert True
""",
)
def test_exit_on_collection_error(testdir):
"""Verify that all collection errors are collected and no tests executed"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest()
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
]
)
def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):
"""
Verify collection is aborted once maxfail errors are encountered ignoring
further modules which would cause more collection errors.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=1")
assert res.ret == 1
res.stdout.fnmatch_lines(
[
"collected 1 item / 1 error",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*! stopping after 1 failures !*",
"*= 1 error in *",
]
)
res.stdout.no_fnmatch_line("*test_03*")
def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
"""
Verify the test run aborts due to collection errors even if maxfail count of
errors was not reached.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--maxfail=4")
assert res.ret == 2
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
"*ERROR collecting test_02_import_error.py*",
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
"*! Interrupted: 2 errors during collection !*",
"*= 2 errors in *",
]
)
def test_continue_on_collection_errors(testdir):
"""
Verify tests are executed even when collection errors occur when the
--continue-on-collection-errors flag is set
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors")
assert res.ret == 1
res.stdout.fnmatch_lines(
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 errors*"]
)
def test_continue_on_collection_errors_maxfail(testdir):
"""
Verify tests are executed even when collection errors occur and that maxfail
is honoured (including the collection error count).
4 tests: 2 collection errors + 1 failure + 1 success
test_4 is never executed because the test run is with --maxfail=3 which
means it is interrupted after the 2 collection errors + 1 failure.
"""
testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
assert res.ret == 1
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 errors*"])
def test_fixture_scope_sibling_conftests(testdir):
"""Regression test case for https://github.com/pytest-dev/pytest/issues/2836"""
foo_path = testdir.mkdir("foo")
foo_path.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fix():
return 1
"""
)
)
foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1")
# Tests in `food/` should not see the conftest fixture from `foo/`
food_path = testdir.mkpydir("food")
food_path.join("test_food.py").write("def test_food(fix): assert fix == 1")
res = testdir.runpytest()
assert res.ret == 1
res.stdout.fnmatch_lines(
[
"*ERROR at setup of test_food*",
"E*fixture 'fix' not found",
"*1 passed, 1 error*",
]
)
def test_collect_init_tests(testdir):
"""Check that we collect files from __init__.py files when they patch the 'python_files' (#3773)"""
p = testdir.copy_example("collect/collect_init_tests")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"<Package *",
" <Module __init__.py>",
" <Function test_init>",
" <Module test_foo.py>",
" <Function test_foo>",
]
)
result = testdir.runpytest("./tests", "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"<Package *",
" <Module __init__.py>",
" <Function test_init>",
" <Module test_foo.py>",
" <Function test_foo>",
]
)
# Ignores duplicates with "." and pkginit (#4310).
result = testdir.runpytest("./tests", ".", "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"<Package */tests>",
" <Module __init__.py>",
" <Function test_init>",
" <Module test_foo.py>",
" <Function test_foo>",
]
)
# Same as before, but different order.
result = testdir.runpytest(".", "tests", "--collect-only")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"<Package */tests>",
" <Module __init__.py>",
" <Function test_init>",
" <Module test_foo.py>",
" <Function test_foo>",
]
)
result = testdir.runpytest("./tests/test_foo.py", "--collect-only")
result.stdout.fnmatch_lines(
["<Package */tests>", " <Module test_foo.py>", " <Function test_foo>"]
)
result.stdout.no_fnmatch_line("*test_init*")
result = testdir.runpytest("./tests/__init__.py", "--collect-only")
result.stdout.fnmatch_lines(
["<Package */tests>", " <Module __init__.py>", " <Function test_init>"]
)
result.stdout.no_fnmatch_line("*test_foo*")
def test_collect_invalid_signature_message(testdir):
"""Check that we issue a proper message when we can't determine the signature of a test
function (#4026).
"""
testdir.makepyfile(
"""
import pytest
class TestCase:
@pytest.fixture
def fix():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
["Could not determine arguments of *.fix *: invalid method signature"]
)
def test_collect_handles_raising_on_dunder_class(testdir):
"""Handle proxy classes like Django's LazySettings that might raise on
``isinstance`` (#4266).
"""
testdir.makepyfile(
"""
class ImproperlyConfigured(Exception):
pass
class RaisesOnGetAttr(object):
def raises(self):
raise ImproperlyConfigured
__class__ = property(raises)
raises = RaisesOnGetAttr()
def test_1():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed in*"])
assert result.ret == 0
def test_collect_with_chdir_during_import(testdir):
subdir = testdir.tmpdir.mkdir("sub")
testdir.tmpdir.join("conftest.py").write(
textwrap.dedent(
"""
import os
os.chdir(%r)
"""
% (str(subdir),)
)
)
testdir.makepyfile(
"""
def test_1():
import os
assert os.getcwd() == %r
"""
% (str(subdir),)
)
with testdir.tmpdir.as_cwd():
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed in*"])
assert result.ret == 0
# Handles relative testpaths.
testdir.makeini(
"""
[pytest]
testpaths = .
"""
)
with testdir.tmpdir.as_cwd():
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["collected 1 item"])
def test_collect_pyargs_with_testpaths(testdir, monkeypatch):
testmod = testdir.mkdir("testmod")
# NOTE: __init__.py is not collected since it does not match python_files.
testmod.ensure("__init__.py").write("def test_func(): pass")
testmod.ensure("test_file.py").write("def test_func(): pass")
root = testdir.mkdir("root")
root.ensure("pytest.ini").write(
textwrap.dedent(
"""
[pytest]
addopts = --pyargs
testpaths = testmod
"""
)
)
monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir), prepend=os.pathsep)
with root.as_cwd():
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed in*"])
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
def test_collect_symlink_file_arg(testdir):
"""Test that collecting a direct symlink, where the target does not match python_files works (#4325)."""
real = testdir.makepyfile(
real="""
def test_nodeid(request):
assert request.node.nodeid == "real.py::test_nodeid"
"""
)
symlink = testdir.tmpdir.join("symlink.py")
symlink.mksymlinkto(real)
result = testdir.runpytest("-v", symlink)
result.stdout.fnmatch_lines(["real.py::test_nodeid PASSED*", "*1 passed in*"])
assert result.ret == 0
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
def test_collect_symlink_out_of_tree(testdir):
"""Test collection of symlink via out-of-tree rootdir."""
sub = testdir.tmpdir.join("sub")
real = sub.join("test_real.py")
real.write(
textwrap.dedent(
"""
def test_nodeid(request):
# Should not contain sub/ prefix.
assert request.node.nodeid == "test_real.py::test_nodeid"
"""
),
ensure=True,
)
out_of_tree = testdir.tmpdir.join("out_of_tree").ensure(dir=True)
symlink_to_sub = out_of_tree.join("symlink_to_sub")
symlink_to_sub.mksymlinkto(sub)
sub.chdir()
result = testdir.runpytest("-vs", "--rootdir=%s" % sub, symlink_to_sub)
result.stdout.fnmatch_lines(
[
# Should not contain "sub/"!
"test_real.py::test_nodeid PASSED"
]
)
assert result.ret == 0
def test_collectignore_via_conftest(testdir):
"""collect_ignore in parent conftest skips importing child (issue #4592)."""
tests = testdir.mkpydir("tests")
tests.ensure("conftest.py").write("collect_ignore = ['ignore_me']")
ignore_me = tests.mkdir("ignore_me")
ignore_me.ensure("__init__.py")
ignore_me.ensure("conftest.py").write("assert 0, 'should_not_be_called'")
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
def test_collect_pkg_init_and_file_in_args(testdir):
subdir = testdir.mkdir("sub")
init = subdir.ensure("__init__.py")
init.write("def test_init(): pass")
p = subdir.ensure("test_file.py")
p.write("def test_file(): pass")
# NOTE: without "-o python_files=*.py" this collects test_file.py twice.
# This changed/broke with "Add package scoped fixtures #2283" (2b1410895)
# initially (causing a RecursionError).
result = testdir.runpytest("-v", str(init), str(p))
result.stdout.fnmatch_lines(
[
"sub/test_file.py::test_file PASSED*",
"sub/test_file.py::test_file PASSED*",
"*2 passed in*",
]
)
result = testdir.runpytest("-v", "-o", "python_files=*.py", str(init), str(p))
result.stdout.fnmatch_lines(
[
"sub/__init__.py::test_init PASSED*",
"sub/test_file.py::test_file PASSED*",
"*2 passed in*",
]
)
def test_collect_pkg_init_only(testdir):
subdir = testdir.mkdir("sub")
init = subdir.ensure("__init__.py")
init.write("def test_init(): pass")
result = testdir.runpytest(str(init))
result.stdout.fnmatch_lines(["*no tests ran in*"])
result = testdir.runpytest("-v", "-o", "python_files=*.py", str(init))
result.stdout.fnmatch_lines(["sub/__init__.py::test_init PASSED*", "*1 passed in*"])
@pytest.mark.skipif(
not hasattr(py.path.local, "mksymlinkto"),
reason="symlink not available on this platform",
)
@pytest.mark.parametrize("use_pkg", (True, False))
def test_collect_sub_with_symlinks(use_pkg, testdir):
sub = testdir.mkdir("sub")
if use_pkg:
sub.ensure("__init__.py")
sub.ensure("test_file.py").write("def test_file(): pass")
# Create a broken symlink.
sub.join("test_broken.py").mksymlinkto("test_doesnotexist.py")
# Symlink that gets collected.
sub.join("test_symlink.py").mksymlinkto("test_file.py")
result = testdir.runpytest("-v", str(sub))
result.stdout.fnmatch_lines(
[
"sub/test_file.py::test_file PASSED*",
"sub/test_symlink.py::test_file PASSED*",
"*2 passed in*",
]
)
def test_collector_respects_tbstyle(testdir):
p1 = testdir.makepyfile("assert 0")
result = testdir.runpytest(p1, "--tb=native")
assert result.ret == ExitCode.INTERRUPTED
result.stdout.fnmatch_lines(
[
"*_ ERROR collecting test_collector_respects_tbstyle.py _*",
"Traceback (most recent call last):",
' File "*/test_collector_respects_tbstyle.py", line 1, in <module>',
" assert 0",
"AssertionError: assert 0",
"*! Interrupted: 1 error during collection !*",
"*= 1 error in *",
]
)
def test_does_not_eagerly_collect_packages(testdir):
testdir.makepyfile("def test(): pass")
pydir = testdir.mkpydir("foopkg")
pydir.join("__init__.py").write("assert False")
result = testdir.runpytest()
assert result.ret == ExitCode.OK
def test_does_not_put_src_on_path(testdir):
# `src` is not on sys.path so it should not be importable
testdir.tmpdir.join("src/nope/__init__.py").ensure()
testdir.makepyfile(
"import pytest\n"
"def test():\n"
" with pytest.raises(ImportError):\n"
" import nope\n"
)
result = testdir.runpytest()
assert result.ret == ExitCode.OK
|
alfredodeza/pytest
|
testing/test_collection.py
|
Python
|
mit
| 44,974
|
[
"VisIt"
] |
72dcff1ef27338a2b29bc60893f9efd83050078a273dec31f185cf42d5b3d671
|
"""
# Definition for a Node.
class Node(object):
def __init__(self, val=0, left=None, right=None, next=None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
# 核心思路
# 为了连接第N+1层的结点的sibling,我们需要在第N层操作(因为需要访问左右子结点)
# 利用start来逐层向下,并且在每一层用p遍历每一个节点;这一层的sibling已经在遍历上一层时连接完毕
# 所以遍历这一层的时候需要连接下一层的siblings
class Solution(object):
def connect(self, root):
"""
:type root: Node
:rtype: Node
"""
if not root:
return root
start = root
# When populating layer N+1, we are actually visiting layer N.
# we use start to go down into each layer, and use p to visit that layer.
while start.left:
p = start
while p:
p.left.next = p.right
if p.next:
p.right.next = p.next.left
p = p.next
start = start.left
return root
|
kingsamchen/Eureka
|
crack-data-structures-and-algorithms/leetcode/populating_next_right_pointers_in_each_node_q116.py
|
Python
|
mit
| 1,150
|
[
"VisIt"
] |
500bcefdb47ae5596c5eabe313aec4d47de54cd66039323070c7cd95551c7c66
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
import collections
import logging
import threading
import itertools
import time
import types
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._traceback = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
except BaseException:
# Explicitly let all other new-style exceptions through so
# that we can catch all old-style exceptions with a simple
# "except:" clause below.
#
# All old-style exception objects are instances of
# types.InstanceType, but "except types.InstanceType:" does
# not catch old-style exceptions for some reason. Thus, the
# only way to catch all old-style exceptions without catching
# any new-style exceptions is to filter out the new-style
# exceptions, which all derive from BaseException.
raise
except:
# Because of the BaseException clause above, this handler only
# executes for old-style exception objects.
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
if isinstance(self._exception, types.InstanceType):
# The exception is an instance of an old-style class, which
# means type(self._exception) returns types.ClassType instead
# of the exception's actual class type.
exception_type = self._exception.__class__
else:
exception_type = type(self._exception)
raise exception_type, self._exception, self._traceback
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception_info(self, timeout=None):
"""Return a tuple of (exception, traceback) raised by the call that the
future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception, self._traceback
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
return self.exception_info(timeout)[0]
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception_info(self, exception, traceback):
"""Sets the result of the future as being the given exception
and traceback.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._traceback = traceback
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
self.set_exception_info(exception, None)
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
|
nateprewitt/pipenv
|
pipenv/vendor/concurrent27/futures/_base.py
|
Python
|
mit
| 22,424
|
[
"Brian"
] |
96f12df22f52336066489cae99977c99fc5d0882afc1530a796fa5e50c9fa602
|
# $Id$
#
# Copyright (C) 2007-2008 Greg Landrum
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Lipinski,Descriptors,Crippen
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbModule
import re
#set up the logger:
import rdkit.RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
def ProcessMol(mol,typeConversions,globalProps,nDone,nameProp='_Name',nameCol='compound_id',
redraw=False,keepHs=False,
skipProps=False,addComputedProps=False,
skipSmiles=False,
uniqNames=None,namesSeen=None):
if not mol:
raise ValueError('no molecule')
if keepHs:
Chem.SanitizeMol(mol)
try:
nm = mol.GetProp(nameProp)
except KeyError:
nm = None
if not nm:
nm = 'Mol_%d'%nDone
if uniqNames and nm in namesSeen:
logger.error('duplicate compound id (%s) encountered. second instance skipped.'%nm)
return None
namesSeen.add(nm)
row = [nm]
if not skipProps:
if addComputedProps:
nHD=Lipinski.NumHDonors(mol)
mol.SetProp('DonorCount',str(nHD))
nHA=Lipinski.NumHAcceptors(mol)
mol.SetProp('AcceptorCount',str(nHA))
nRot=Lipinski.NumRotatableBonds(mol)
mol.SetProp('RotatableBondCount',str(nRot))
MW=Descriptors.MolWt(mol)
mol.SetProp('AMW',str(MW))
logp=Crippen.MolLogP(mol)
mol.SetProp('MolLogP',str(logp))
pns = list(mol.GetPropNames())
pD={}
for pi,pn in enumerate(pns):
if pn.lower()==nameCol.lower(): continue
pv = mol.GetProp(pn).strip()
if pv.find('>')<0 and pv.find('<')<0:
colTyp = globalProps.get(pn,2)
while colTyp>0:
try:
tpi = typeConversions[colTyp][1](pv)
except Exception:
colTyp-=1
else:
break
globalProps[pn]=colTyp
pD[pn]=typeConversions[colTyp][1](pv)
else:
pD[pn]=pv
else:
pD={}
if redraw:
AllChem.Compute2DCoords(m)
if not skipSmiles:
row.append(Chem.MolToSmiles(mol,True))
row.append(DbModule.binaryHolder(mol.ToBinary()))
row.append(pD)
return row
def ConvertRows(rows,globalProps,defaultVal,skipSmiles):
for i,row in enumerate(rows):
newRow = [row[0],row[1]]
pD=row[-1]
for pn in globalProps:
pv = pD.get(pn,defaultVal)
newRow.append(pv)
newRow.append(row[2])
if not skipSmiles:
newRow.append(row[3])
rows[i] = newRow
def LoadDb(suppl,dbName,nameProp='_Name',nameCol='compound_id',silent=False,
redraw=False,errorsTo=None,keepHs=False,defaultVal='N/A',skipProps=False,
regName='molecules',skipSmiles=False,maxRowsCached=-1,
uniqNames=False,addComputedProps=False,lazySupplier=False,
startAnew=True):
if not lazySupplier:
nMols = len(suppl)
else:
nMols=-1
if not silent:
logger.info("Generating molecular database in file %s"%dbName)
if not lazySupplier:
logger.info(" Processing %d molecules"%nMols)
rows = []
globalProps = {}
namesSeen = set()
nDone = 0
typeConversions={0:('varchar',str),1:('float',float),2:('int',int)}
for m in suppl:
nDone +=1
if not m:
if errorsTo:
if hasattr(suppl,'GetItemText'):
d = suppl.GetItemText(nDone-1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
row=ProcessMol(m,typeConversions,globalProps,nDone,nameProp=nameProp,
nameCol=nameCol,redraw=redraw,
keepHs=keepHs,skipProps=skipProps,
addComputedProps=addComputedProps,skipSmiles=skipSmiles,
uniqNames=uniqNames,namesSeen=namesSeen)
if row is None: continue
rows.append([nDone]+row)
if not silent and not nDone%100:
logger.info(' done %d'%nDone)
if len(rows)==maxRowsCached:
break
nameDef='%s varchar not null'%nameCol
if uniqNames:
nameDef += ' unique'
typs = ['guid integer not null primary key',nameDef]
pns = []
for pn,v in globalProps.items():
addNm = re.sub(r'[\W]','_',pn)
typs.append('%s %s'%(addNm,typeConversions[v][0]))
pns.append(pn.lower())
if not skipSmiles:
if 'smiles' not in pns:
typs.append('smiles varchar')
else:
typs.append('cansmiles varchar')
typs.append('molpkl %s'%(DbModule.binaryTypeName))
conn = DbConnect(dbName)
curs = conn.GetCursor()
if startAnew:
try:
curs.execute('drop table %s'%regName)
except Exception:
pass
curs.execute('create table %s (%s)'%(regName,','.join(typs)))
else:
curs.execute('select * from %s limit 1'%(regName,))
ocolns = set([x[0] for x in curs.description])
ncolns = set([x.split()[0] for x in typs])
if ncolns != ocolns:
raise ValueError('Column names do not match: %s != %s'%(ocolns,ncolns))
curs.execute('select max(guid) from %s'%(regName,))
offset = curs.fetchone()[0]
for row in rows:
row[0] += offset
qs = ','.join([DbModule.placeHolder for x in typs])
ConvertRows(rows,globalProps,defaultVal,skipSmiles)
curs.executemany('insert into %s values (%s)'%(regName,qs),rows)
conn.Commit()
rows = []
while 1:
nDone +=1
try:
m = next(suppl)
except StopIteration:
break
if not m:
if errorsTo:
if hasattr(suppl,'GetItemText'):
d = suppl.GetItemText(nDone-1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
tmpProps={}
row=ProcessMol(m,typeConversions,globalProps,nDone,nameProp=nameProp,
nameCol=nameCol,redraw=redraw,
keepHs=keepHs,skipProps=skipProps,
addComputedProps=addComputedProps,skipSmiles=skipSmiles,
uniqNames=uniqNames,namesSeen=namesSeen)
if not row: continue
rows.append([nDone]+row)
if not silent and not nDone%100:
logger.info(' done %d'%nDone)
if len(rows)==maxRowsCached:
ConvertRows(rows,globalProps,defaultVal,skipSmiles)
curs.executemany('insert into %s values (%s)'%(regName,qs),rows)
conn.Commit()
rows = []
if len(rows):
ConvertRows(rows,globalProps,defaultVal,skipSmiles)
curs.executemany('insert into %s values (%s)'%(regName,qs),rows)
conn.Commit()
|
adalke/rdkit
|
rdkit/Chem/MolDb/Loader_orig.py
|
Python
|
bsd-3-clause
| 6,608
|
[
"RDKit"
] |
9ea8f9d6e082dd1715ec032b747c156e254c3ea2b90e71757d161fca940c07ca
|
"""
Utilities for traversing `CWNode` trees.
"""
from collections import deque
def preorder_traversal(node, start=None, end=None) -> "iterator(CWNode)":
"""
Yields every node in the tree. Each node is yielded before its descendants.
Mutation is disallowed.
- *start*: If specified, only yield nodes following (not including) this node.
- *end*: If specified, do not yield this node or nodes following it.
"""
stack = deque([node])
has_started = start is None
while len(stack) > 0:
node = stack.pop()
if node is end:
return
has_started = has_started or start is node
if has_started:
yield node
for child in reversed(node.children):
stack.append(child)
def postorder_traversal(node) -> "iterator(CWNode)":
"""
Yields every node in the tree. Each node is yielded after its descendants.
Mutation is disallowed.
"""
root = node
stack = deque([(root, 'yield'), (root, 'add_children')])
while len(stack) > 0:
(node, action) = stack.pop()
if action == 'yield':
yield node
elif action == 'add_children':
for i in reversed(range(len(node.children))):
stack.append((node.children[i], 'yield'))
stack.append((node.children[i], 'add_children'))
def iterate_ancestors(node):
"""
Yields every ancestor of a node, starting with its immediate parent.
```python
from computerwords.cwdom.nodes import CWNode
from computerwords.cwdom.traversal import iterate_ancestors
node_c = CWNode('c', [])
tree = CWNode('a', [
CWNode('b', [node_c]),
CWNode('d', []),
])
assert ([node.name for node in iterate_ancestors(node_c)] ==
['b', 'a'])
```
"""
node = node.get_parent()
while node:
yield node
node = node.get_parent()
def find_ancestor(node, predicate):
"""
Returns the closest ancestor of a node matching the given predicate.
```python
from computerwords.cwdom.traversal import find_ancestor
document_node = find_ancestor(node, lambda n: n.name == 'Document')
```
"""
for ancestor in iterate_ancestors(node):
if predicate(ancestor):
return ancestor
def visit_tree(tree, node_name_to_visitor, node=None, handle_error=None):
"""
Recursively call the `CWTreeVisitor` for each node. If a node
is encountered that has no corresponding visitor, `MissingVisitorError` is
thrown.
```python
from computerwords.cwdom.CWTree import CWTree
from computerwords.cwdom.traversal import (
visit_tree,
CWTreeVisitor
)
visits = []
class SimpleVisitor(CWTreeVisitor):
def before_children(self, tree, node):
visits.append('pre-{}'.format(node.name))
def after_children(self, tree, node):
visits.append('post-{}'.format(node.name))
tree = CWTree(CWNode('x', [CWNode('y', [])]))
visit_tree(tree, {
'x': SimpleVisitor(),
'y': SimpleVisitor(),
})
assert visits == ['pre-x', 'pre-y', 'post-y', 'post-x']
```
"""
node = node or tree.root
visitor = None
try:
visitor = node_name_to_visitor[node.name]
except KeyError:
if handle_error is None:
raise MissingVisitorError(node)
else:
handle_error(node)
if visitor is not None:
visitor.before_children(tree, node)
for child in node.children:
visit_tree(
tree, node_name_to_visitor, node=child, handle_error=handle_error)
if visitor is not None:
visitor.after_children(tree, node)
class MissingVisitorError(Exception):
"""
Error thrown when trying to visit a node for which no visitor is available.
"""
def __init__(self, node):
super().__init__()
self.node = node
def __repr__(self):
return "MissingVisitorError(document_id={!r}, node={!r})".format(
self.node.document_id, self.node)
def __str__(self):
return repr(self)
class CWTreeVisitor:
def before_children(self, tree, node):
"""Called before the node's children are visited."""
def after_children(self, tree, node):
"""Called after the node's children are visited."""
class PostorderTraverser:
"""
A class that lets you iterate over a tree while mutating it.
Keeps track of a *cursor* representing the last visited node. Each time
the next node is requested, the iterator looks at the cursor and walks
up the tree to find the cursor's next sibling or parent.
You may replace the cursor if you want to replace the node currently being
visited.
You may safely mutate the cursor's ancestors, since they haven't been
visited yet.
"""
def __init__(self, node):
super().__init__()
self.cursor = node
self._is_first_result = True
def replace_cursor(self, new_cursor):
"""Only use this if you really know what you are doing."""
self.cursor = new_cursor
def __iter__(self):
return self
def __next__(self) -> "CWNode":
if self._is_first_result:
self._descend()
self._is_first_result = False
else:
parent = self.cursor.get_parent()
if not parent:
raise StopIteration()
child_i = parent.children.index(self.cursor)
next_child_i = child_i + 1
if next_child_i >= len(parent.children):
self.cursor = parent
else:
self.cursor = parent.children[next_child_i]
self._descend()
return self.cursor
def _descend(self):
while self.cursor.children:
self.cursor = self.cursor.children[0]
|
irskep/computerwords
|
computerwords/cwdom/traversal.py
|
Python
|
bsd-3-clause
| 5,871
|
[
"VisIt"
] |
5bf1475198f0596e6605e8bac65d1a8ff565fff31334f316f0c1d870fb6dc13b
|
import numpy as np
from . import atomic_mass
class Atom:
# Static variables #
one_letter_elements = {"H","B","C","N","O","F","P","K","S","V","I","U","Y","W","I"} # one letter elements
two_letter_elements = {"He","Li","Be","Ne",
"Na","Mg","Al","Si","Cl","Ar","Ca", # the 2nd letter is in the lower-case.
"Sc","Ti","Cr","Mn","Fe","Co","Ni","Cu","Zn",
"Ga","Ge","As","Se","Br","Kr","Rb","Sr","Zr",
"Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn",
"Sb","Te","Xe","Cs","Ba","La","Ce","Pr","Nd",
"Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb",
"Lu","Hf","Ta","Re","Os","Ir","Pt","Au","Hg",
"Tl","Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th",
"Pa","Np","Pu","Am","Cm","Bk","Cf"} #
def __init__(self,line_ext):
self.pos = np.array([0,0,0],dtype='f')
line_ext=line_ext.replace("\n","") # getting rid of \n and \r in the line
line_ext=line_ext.replace("\r","") # getting rid of \n and \r in the line
self.line = "%-80s\n" % line_ext
self.pos[0] = float(self.line[30:38])
self.pos[1] = float(self.line[38:46])
self.pos[2] = float(self.line[46:54])
self.name = self.line[12:16]
self.resname = self.line[17:21]
self.index = 1
self.element = self.ExtractElement()
self.bonded_atoms=[]
self.num_hydrogen=False
@classmethod
def FromCoordinates(cls, pos = [0.0,0.0,0.0] , name="C" ):
line = "ATOM 1 1 0.000 0.000 0.000 1.00 0.00 "
line = line[:30] + "%8.3f" % pos[0] + line[38:]
line = line[:38] + "%8.3f" % pos[1] + line[46:]
line = line[:46] + "%8.3f" % pos[2] + line[54:]
line = line[:12] + "%4.4s" % name + line[16:]
atom = cls(line)
return atom
def ApplyTransform(self,trans): # trans is a vtk.vtkTransfom()
trans.TransformPoint(self.pos,self.pos)
self.UpdateCrd()
def SetPosition(self,ex_pos):
self.pos = ex_pos
self.UpdateCrd()
def UpdateCrd(self):
self.line = self.line[:30] + "%8.3f" % self.pos[0] + self.line[38:]
self.line = self.line[:38] + "%8.3f" % self.pos[1] + self.line[46:]
self.line = self.line[:46] + "%8.3f" % self.pos[2] + self.line[54:]
## fix element ##
if not self.element:
pass
def GetMolNameGMX(self):
"""Returns the molecule's name with Gromacs standard.
"""
return self.line[17:21].strip()
def GetStr(self,atom_sq_number=1,res_sq_number=1,atom_index=1):
self.UpdateCrd()
self.line = self.line[:6] + '%5d' % (atom_sq_number % 100000) + self.line[11:]
self.line = self.line[:22] + '%4d' % (res_sq_number % 10000) + self.line[26:]
self.line = self.line[:76] + '%2.2s' % self.element.strip() + self.line[78:]
self.index = atom_index
return self.line
def TakeToOrigin(self):
self.pos[0] = 0.0
self.pos[1] = 0.0
self.pos[2] = 0.0
def ExtractElement(self):
""" Try to extract the atom element from the atom's name.
"""
element_str = self.line[76:78].strip()
name = self.name
if element_str !='' :
return element_str
for i in range(3): # move from left to right to find a match.
if name[i:i+2].capitalize() in Atom.two_letter_elements: # two letter elements
out = name[i:i+2].capitalize()
return out.strip()
else:
if name[i] in Atom.one_letter_elements: # one letter elements
return name[i].upper() # return element in upper case.
return ""
|
alinar/Molar
|
molar/atom.py
|
Python
|
gpl-2.0
| 4,127
|
[
"Gromacs",
"VTK"
] |
f71ce69bb80084c711e2fd97a429832504eef61e6c54bdc5e73f6e5089da007c
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012-2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Reports/Text Reports/Ahnentafel Report"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import math
#------------------------------------------------------------------------
#
# gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.errors import ReportError
from gramps.gen.lib import ChildRefType
from gramps.gen.plug.menu import (BooleanOption, NumberOption, PersonOption)
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC,
PARA_ALIGN_CENTER)
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.plugins.lib.libnarrate import Narrator
from gramps.gen.proxy import CacheProxyDb
from gramps.gen.display.name import displayer as _nd
#------------------------------------------------------------------------
#
# log2val
#
#------------------------------------------------------------------------
def log2(val):
"""
Calculate the log base 2 of a number
"""
return int(math.log(val, 2))
#------------------------------------------------------------------------
#
# AncestorReport
#
#------------------------------------------------------------------------
class AncestorReport(Report):
"""
Ancestor Report class
"""
def __init__(self, database, options, user):
"""
Create the AncestorReport object that produces the Ahnentafel report.
The arguments are:
database - the Gramps database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
gen - Maximum number of generations to include.
pagebbg - Whether to include page breaks between generations.
name_format - Preferred format to display names
incl_private - Whether to include private data
namebrk - Whether a line break should follow the name
inc_id - Whether to include Gramps IDs
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
self.map = {}
menu = options.menu
self.set_locale(menu.get_option_by_name('trans').get_value())
stdoptions.run_date_format_option(self, menu)
stdoptions.run_private_data_option(self, menu)
stdoptions.run_living_people_option(self, menu, self._locale)
self.database = CacheProxyDb(self.database)
self.max_generations = menu.get_option_by_name('maxgen').get_value()
self.pgbrk = menu.get_option_by_name('pagebbg').get_value()
self.opt_namebrk = menu.get_option_by_name('namebrk').get_value()
self.want_ids = menu.get_option_by_name('inc_id').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.center_person = self.database.get_person_from_gramps_id(pid)
if self.center_person is None:
raise ReportError(_("Person %s is not in the Database") % pid)
stdoptions.run_name_format_option(self, menu)
self.__narrator = Narrator(self.database, use_fulldate=True,
nlocale=self._locale)
def apply_filter(self, person_handle, index, generation=1):
"""
Recursive function to walk back all parents of the current person.
When max_generations are hit, we stop the traversal.
"""
# check for end of the current recursion level. This happens
# if the person handle is None, or if the max_generations is hit
if not person_handle or generation > self.max_generations:
return
# store the person in the map based off their index number
# which is passed to the routine.
self.map[index] = person_handle
# retrieve the Person instance from the database from the
# passed person_handle and find the parents from the list.
# Since this report is for natural parents (birth parents),
# we have to handle that parents may not
person = self.database.get_person_from_handle(person_handle)
if person is None:
return
father_handle = None
mother_handle = None
for family_handle in person.get_parent_family_handle_list():
family = self.database.get_family_from_handle(family_handle)
# filter the child_ref_list to find the reference that matches
# the passed person. There should be exactly one, but there is
# nothing that prevents the same child in the list multiple times.
ref = [ c for c in family.get_child_ref_list()
if c.get_reference_handle() == person_handle]
if ref:
# If the father_handle is not defined and the relationship is
# BIRTH, then we have found the birth father. Same applies to
# the birth mother. If for some reason, the we have multiple
# people defined as the birth parents, we will select based on
# priority in the list
if not father_handle and \
ref[0].get_father_relation() == ChildRefType.BIRTH:
father_handle = family.get_father_handle()
if not mother_handle and \
ref[0].get_mother_relation() == ChildRefType.BIRTH:
mother_handle = family.get_mother_handle()
# Recursively call the function. It is okay if the handle is None,
# since routine handles a handle of None
self.apply_filter(father_handle, index*2, generation+1)
self.apply_filter(mother_handle, (index*2)+1, generation+1)
def write_report(self):
"""
The routine the actually creates the report. At this point, the document
is opened and ready for writing.
"""
# Call apply_filter to build the self.map array of people in the
# database that match the ancestry.
self.apply_filter(self.center_person.get_handle(), 1)
# Write the title line. Set an INDEX mark so that this section will be
# identified as a major category if this is included in a Book report.
name = self._name_display.display_formal(self.center_person)
# feature request 2356: avoid genitive form
title = self._("Ahnentafel Report for %s") % name
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.start_paragraph("AHN-Title")
self.doc.write_text(title, mark)
self.doc.end_paragraph()
# get the entries out of the map, and sort them.
generation = 0
for key in sorted(self.map):
# check the index number to see if we need to start a new generation
if generation == log2(key):
# generate a page break if requested
if self.pgbrk and generation > 0:
self.doc.page_break()
generation += 1
# Create the Generation title, set an index marker
gen_text = self._("Generation %d") % generation
mark = None # don't need any with no page breaks
if self.pgbrk:
mark = IndexMark(gen_text, INDEX_TYPE_TOC, 2)
self.doc.start_paragraph("AHN-Generation")
self.doc.write_text(gen_text, mark)
self.doc.end_paragraph()
# Build the entry
self.doc.start_paragraph("AHN-Entry","%d." % key)
person = self.database.get_person_from_handle(self.map[key])
if person is None:
continue
name = self._name_display.display(person)
mark = utils.get_person_mark(self.database, person)
# write the name in bold
self.doc.start_bold()
self.doc.write_text(name.strip(), mark)
self.doc.end_bold()
if self.want_ids:
self.doc.write_text(' (%s)' % person.get_gramps_id())
# terminate with a period if it is not already terminated.
# This can happen if the person's name ends with something 'Jr.'
if name[-1:] == '.' and not self.want_ids:
self.doc.write_text(" ")
else:
self.doc.write_text(". ")
# Add a line break if requested
if self.opt_namebrk:
self.doc.write_text('\n')
self.__narrator.set_subject(person)
self.doc.write_text(self.__narrator.get_born_string())
self.doc.write_text(self.__narrator.get_baptised_string())
self.doc.write_text(self.__narrator.get_christened_string())
self.doc.write_text(self.__narrator.get_died_string())
self.doc.write_text(self.__narrator.get_buried_string())
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# AncestorOptions
#
#------------------------------------------------------------------------
class AncestorOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__db = dbase
self.__pid = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
return _nd.display(person)
def add_menu_options(self, menu):
"""
Add options to the menu for the ancestor report.
"""
category_name = _("Report Options")
self.__pid = PersonOption(_("Center Person"))
self.__pid.set_help(_("The center person for the report"))
menu.add_option(category_name, "pid", self.__pid)
maxgen = NumberOption(_("Generations"), 10, 1, 100)
maxgen.set_help(
_("The number of generations to include in the report"))
menu.add_option(category_name, "maxgen", maxgen)
stdoptions.add_gramps_id_option(menu, category_name)
pagebbg = BooleanOption(_("Page break between generations"), False)
pagebbg.set_help(
_("Whether to start a new page after each generation."))
menu.add_option(category_name, "pagebbg", pagebbg)
namebrk = BooleanOption(_("Add linebreak after each name"), False)
namebrk.set_help(_("Whether a line break should follow the name."))
menu.add_option(category_name, "namebrk", namebrk)
category_name = _("Report Options (2)")
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
locale_opt = stdoptions.add_localization_option(menu, category_name)
stdoptions.add_date_format_option(menu, category_name, locale_opt)
def make_default_style(self, default_style):
"""
Make the default output style for the Ahnentafel report.
There are 3 paragraph styles for this report.
AHN_Title - The title for the report. The options are:
Font : Sans Serif
Bold
16pt
Paragraph : First level header
0.25cm top and bottom margin
Centered
AHN-Generation - Used for the generation header
Font : Sans Serif
Italic
14pt
Paragraph : Second level header
0.125cm top and bottom margins
AHN - Normal text display for each entry
Font : default
Paragraph : 1cm margin, with first indent of -1cm
0.125cm top and bottom margins
"""
#
# AHN-Title
#
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=16, bold=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(1)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_('The style used for the title.'))
default_style.add_paragraph_style("AHN-Title", para)
#
# AHN-Generation
#
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=14, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set_top_margin(0.125)
para.set_bottom_margin(0.125)
para.set_description(_('The style used for the generation header.'))
default_style.add_paragraph_style("AHN-Generation", para)
#
# AHN-Entry
#
para = ParagraphStyle()
para.set(first_indent=-1.0, lmargin=1.0)
para.set_top_margin(0.125)
para.set_bottom_margin(0.125)
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("AHN-Entry", para)
|
sam-m888/gramps
|
gramps/plugins/textreport/ancestorreport.py
|
Python
|
gpl-2.0
| 14,703
|
[
"Brian"
] |
42e9c9327ebac2a3196c342e52fbf937e5490ad49a6a077e30f8898d7f15b6af
|
#!/usr/bin/python
## Adapted from code sample in
## http://stackoverflow.com/questions/11685716/how-to-extract-chains-from-a-pdb-file
##
## Change includes reading domain names from PDB40D file
from os.path import expanduser
from Bio import PDB
USER_HOME = expanduser("~")
INFILE = "%s/Downloads/pdb40d_j.fa" %USER_HOME
#INFILE = "%s/Downloads/pdb40d_test" %USER_HOME
OUTDIR = "%s/Downloads/pdb40d/" %USER_HOME
import os
from Bio import PDB
class ChainSplitter:
def __init__(self, out_dir=None):
""" Create parsing and writing objects, specify output directory. """
self.parser = PDB.PDBParser()
self.writer = PDB.PDBIO()
if out_dir is None:
out_dir = os.path.join(os.getcwd(), "chain_PDBs")
self.out_dir = out_dir
def make_pdb(self, pdb_path, chain_letters, overwrite=False, struct=None):
""" Create a new PDB file containing only the specified chains.
Returns the path to the created file.
:param pdb_path: full path to the crystal structure
:param chain_letters: iterable of chain characters (case insensitive)
:param overwrite: write over the output file if it exists
"""
chain_letters = [chain.upper() for chain in chain_letters]
# Input/output files
(pdb_dir, pdb_fn) = os.path.split(pdb_path)
pdb_id = pdb_fn[3:7]
out_name = "pdb%s_%s.ent" % (pdb_id, "".join(chain_letters))
out_path = os.path.join(self.out_dir, out_name)
print "OUT PATH:",out_path
plural = "s" if (len(chain_letters) > 1) else "" # for printing
# Skip PDB generation if the file already exists
if (not overwrite) and (os.path.isfile(out_path)):
print("Chain%s %s of '%s' already extracted to '%s'." %
(plural, ", ".join(chain_letters), pdb_id, out_name))
return out_path
print("Extracting chain%s %s from %s..." % (plural,
", ".join(chain_letters), pdb_fn))
# Get structure, write new file with only given chains
if struct is None:
struct = self.parser.get_structure(pdb_id, pdb_path)
if len(chain_letters) == 1 and chain_letters[0] == '_':
chain_letters = ['A']
self.writer.set_structure(struct)
self.writer.save(out_path, select=SelectChains(chain_letters))
return out_path
class SelectChains(PDB.Select):
""" Only accept the specified chains when saving. """
def __init__(self, chain_letters):
self.chain_letters = chain_letters
def accept_chain(self, chain):
return (chain.get_id() in self.chain_letters)
if __name__ == "__main__":
""" Parses PDB id's desired chains, and creates new PDB structures. """
pdbList = PDB.PDBList()
splitter = ChainSplitter("%s/Downloads/pdb40d" %USER_HOME)
for line in open(INFILE):
if not line.startswith('>'):
continue
scop_dom_name = line.replace('>','').replace('\n','').split(' ')[0]
##
pdb_dom_name = scop_dom_name[1:5]
pdb_chain = scop_dom_name[5]
##
try:
pdb_fn = pdbList.retrieve_pdb_file(pdb_dom_name)
splitter.make_pdb(pdb_fn, pdb_chain)
except:
print 'ERROR: %s not extracted' %pdb_dom_name
|
xulesc/general
|
download_pdb40d.py
|
Python
|
gpl-3.0
| 3,243
|
[
"CRYSTAL"
] |
bca1b478dc480dae1f9ba2bc8eb4606039964f34cc457ec9eb4fa295fcca3051
|
# -*- test-case-name: pyflakes -*-
# (c) 2005-2010 Divmod, Inc.
# See LICENSE file for details
import __builtin__
import os.path
import _ast
from . import messages
# utility function to iterate over an AST node's children, adapted
# from Python 2.6's standard ast module
try:
import ast
iter_child_nodes = ast.iter_child_nodes
except (ImportError, AttributeError):
def iter_child_nodes(node, astcls=_ast.AST):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name in node._fields:
field = getattr(node, name, None)
if isinstance(field, astcls):
yield field
elif isinstance(field, list):
for item in field:
yield item
class Binding(object):
"""
Represents the binding of a value to a name.
The checker uses this to keep track of which names have been bound and
which names have not. See L{Assignment} for a special type of binding that
is checked with stricter rules.
@ivar used: pair of (L{Scope}, line-number) indicating the scope and
line number that this binding was last used
"""
def __init__(self, name, source):
self.name = name
self.source = source
self.used = False
def __str__(self):
return self.name
def __repr__(self):
return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__,
self.name,
self.source.lineno,
id(self))
class UnBinding(Binding):
'''Created by the 'del' operator.'''
class Importation(Binding):
"""
A binding created by an import statement.
@ivar fullName: The complete name given to the import statement,
possibly including multiple dotted components.
@type fullName: C{str}
"""
def __init__(self, name, source):
self.fullName = name
name = name.split('.')[0]
super(Importation, self).__init__(name, source)
class Argument(Binding):
"""
Represents binding a name as an argument.
"""
class Assignment(Binding):
"""
Represents binding a name with an explicit assignment.
The checker will raise warnings for any Assignment that isn't used. Also,
the checker does not consider assignments in tuple/list unpacking to be
Assignments, rather it treats them as simple Bindings.
"""
class FunctionDefinition(Binding):
_property_decorator = False
class ExportBinding(Binding):
"""
A binding created by an C{__all__} assignment. If the names in the list
can be determined statically, they will be treated as names for export and
additional checking applied to them.
The only C{__all__} assignment that can be recognized is one which takes
the value of a literal list containing literal strings. For example::
__all__ = ["foo", "bar"]
Names which are imported and not otherwise used but appear in the value of
C{__all__} will not have an unused import warning reported for them.
"""
def names(self):
"""
Return a list of the names referenced by this binding.
"""
names = []
if isinstance(self.source, _ast.List):
for node in self.source.elts:
if isinstance(node, _ast.Str):
names.append(node.s)
return names
class Scope(dict):
importStarred = False # set to True when import * is found
def __repr__(self):
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), dict.__repr__(self))
def __init__(self):
super(Scope, self).__init__()
class ClassScope(Scope):
pass
class FunctionScope(Scope):
"""
I represent a name scope for a function.
@ivar globals: Names declared 'global' in this function.
"""
def __init__(self):
super(FunctionScope, self).__init__()
self.globals = {}
class ModuleScope(Scope):
pass
# Globally defined names which are not attributes of the __builtin__ module.
_MAGIC_GLOBALS = ['__file__', '__builtins__']
class Checker(object):
"""
I check the cleanliness and sanity of Python code.
@ivar _deferredFunctions: Tracking list used by L{deferFunction}. Elements
of the list are two-tuples. The first element is the callable passed
to L{deferFunction}. The second element is a copy of the scope stack
at the time L{deferFunction} was called.
@ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for
callables which are deferred assignment checks.
"""
nodeDepth = 0
traceTree = False
def __init__(self, tree, filename=None):
if filename is None:
filename = '(none)'
self._deferredFunctions = []
self._deferredAssignments = []
self.dead_scopes = []
self.messages = []
self.filename = filename
self.scopeStack = [ModuleScope()]
self.futuresAllowed = True
self.handleChildren(tree)
self._runDeferred(self._deferredFunctions)
# Set _deferredFunctions to None so that deferFunction will fail
# noisily if called after we've run through the deferred functions.
self._deferredFunctions = None
self._runDeferred(self._deferredAssignments)
# Set _deferredAssignments to None so that deferAssignment will fail
# noisly if called after we've run through the deferred assignments.
self._deferredAssignments = None
del self.scopeStack[1:]
self.popScope()
self.check_dead_scopes()
def deferFunction(self, callable):
'''
Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred
because code later in the file might modify the global scope. When
`callable` is called, the scope at the time this is called will be
restored, however it will contain any new bindings added to it.
'''
self._deferredFunctions.append((callable, self.scopeStack[:]))
def deferAssignment(self, callable):
"""
Schedule an assignment handler to be called just after deferred
function handlers.
"""
self._deferredAssignments.append((callable, self.scopeStack[:]))
def _runDeferred(self, deferred):
"""
Run the callables in C{deferred} using their associated scope stack.
"""
for handler, scope in deferred:
self.scopeStack = scope
handler()
def scope(self):
return self.scopeStack[-1]
scope = property(scope)
def popScope(self):
self.dead_scopes.append(self.scopeStack.pop())
def check_dead_scopes(self):
"""
Look at scopes which have been fully examined and report names in them
which were imported but unused.
"""
for scope in self.dead_scopes:
export = isinstance(scope.get('__all__'), ExportBinding)
if export:
all = scope['__all__'].names()
if os.path.split(self.filename)[1] != '__init__.py':
# Look for possible mistakes in the export list
undefined = set(all) - set(scope)
for name in undefined:
self.report(
messages.UndefinedExport,
scope['__all__'].source,
name)
else:
all = []
# Look for imported names that aren't used.
for importation in scope.itervalues():
if isinstance(importation, Importation):
if not importation.used and importation.name not in all:
self.report(
messages.UnusedImport,
importation.source,
importation.name)
def pushFunctionScope(self):
self.scopeStack.append(FunctionScope())
def pushClassScope(self):
self.scopeStack.append(ClassScope())
def report(self, messageClass, *args, **kwargs):
self.messages.append(messageClass(self.filename, *args, **kwargs))
def handleChildren(self, tree):
for node in iter_child_nodes(tree):
self.handleNode(node, tree)
def isDocstring(self, node):
"""
Determine if the given node is a docstring, as long as it is at the
correct place in the node tree.
"""
return isinstance(node, _ast.Str) or \
(isinstance(node, _ast.Expr) and
isinstance(node.value, _ast.Str))
def handleNode(self, node, parent):
node.parent = parent
if self.traceTree:
print ' ' * self.nodeDepth + node.__class__.__name__
self.nodeDepth += 1
if self.futuresAllowed and not \
(isinstance(node, _ast.ImportFrom) or self.isDocstring(node)):
self.futuresAllowed = False
nodeType = node.__class__.__name__.upper()
try:
handler = getattr(self, nodeType)
handler(node)
finally:
self.nodeDepth -= 1
if self.traceTree:
print ' ' * self.nodeDepth + 'end ' + node.__class__.__name__
def ignore(self, node):
pass
# "stmt" type nodes
RETURN = DELETE = PRINT = WHILE = IF = WITH = RAISE = TRYEXCEPT = \
TRYFINALLY = ASSERT = EXEC = EXPR = handleChildren
CONTINUE = BREAK = PASS = ignore
# "expr" type nodes
BOOLOP = BINOP = UNARYOP = IFEXP = DICT = SET = YIELD = COMPARE = \
CALL = REPR = ATTRIBUTE = SUBSCRIPT = LIST = TUPLE = handleChildren
NUM = STR = ELLIPSIS = ignore
# "slice" type nodes
SLICE = EXTSLICE = INDEX = handleChildren
# expression contexts are node instances too, though being constants
LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore
# same for operators
AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \
BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \
EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = ignore
# additional node types
COMPREHENSION = EXCEPTHANDLER = KEYWORD = handleChildren
def addBinding(self, loc, value, reportRedef=True):
'''Called when a binding is altered.
- `loc` is the location (an object with lineno and optionally
col_offset attributes) of the statement responsible for the change
- `value` is the optional new value, a Binding instance, associated
with the binding; if None, the binding is deleted if it exists.
- if `reportRedef` is True (default), rebinding while unused will be
reported.
'''
if (isinstance(self.scope.get(value.name), FunctionDefinition)
and isinstance(value, FunctionDefinition)):
if not value._property_decorator:
self.report(messages.RedefinedFunction,
loc, value.name, self.scope[value.name].source)
if not isinstance(self.scope, ClassScope):
for scope in self.scopeStack[::-1]:
existing = scope.get(value.name)
if (isinstance(existing, Importation)
and not existing.used
and (not isinstance(value, Importation) or value.fullName == existing.fullName)
and reportRedef):
self.report(messages.RedefinedWhileUnused,
loc, value.name, scope[value.name].source)
if isinstance(value, UnBinding):
try:
del self.scope[value.name]
except KeyError:
self.report(messages.UndefinedName, loc, value.name)
else:
self.scope[value.name] = value
def GLOBAL(self, node):
"""
Keep track of globals declarations.
"""
if isinstance(self.scope, FunctionScope):
self.scope.globals.update(dict.fromkeys(node.names))
def LISTCOMP(self, node):
# handle generators before element
for gen in node.generators:
self.handleNode(gen, node)
self.handleNode(node.elt, node)
GENERATOREXP = SETCOMP = LISTCOMP
# dictionary comprehensions; introduced in Python 2.7
def DICTCOMP(self, node):
for gen in node.generators:
self.handleNode(gen, node)
self.handleNode(node.key, node)
self.handleNode(node.value, node)
def FOR(self, node):
"""
Process bindings for loop variables.
"""
vars = []
def collectLoopVars(n):
if isinstance(n, _ast.Name):
vars.append(n.id)
elif isinstance(n, _ast.expr_context):
return
else:
for c in iter_child_nodes(n):
collectLoopVars(c)
collectLoopVars(node.target)
for varn in vars:
if (isinstance(self.scope.get(varn), Importation)
# unused ones will get an unused import warning
and self.scope[varn].used):
self.report(messages.ImportShadowedByLoopVar,
node, varn, self.scope[varn].source)
self.handleChildren(node)
def NAME(self, node):
"""
Handle occurrence of Name (which can be a load/store/delete access.)
"""
# Locate the name in locals / function / globals scopes.
if isinstance(node.ctx, (_ast.Load, _ast.AugLoad)):
# try local scope
importStarred = self.scope.importStarred
try:
self.scope[node.id].used = (self.scope, node)
except KeyError:
pass
else:
return
# try enclosing function scopes
for scope in self.scopeStack[-2:0:-1]:
importStarred = importStarred or scope.importStarred
if not isinstance(scope, FunctionScope):
continue
try:
scope[node.id].used = (self.scope, node)
except KeyError:
pass
else:
return
# try global scope
importStarred = importStarred or self.scopeStack[0].importStarred
try:
self.scopeStack[0][node.id].used = (self.scope, node)
except KeyError:
if ((not hasattr(__builtin__, node.id))
and node.id not in _MAGIC_GLOBALS
and not importStarred):
if (os.path.basename(self.filename) == '__init__.py' and
node.id == '__path__'):
# the special name __path__ is valid only in packages
pass
else:
self.report(messages.UndefinedName, node, node.id)
elif isinstance(node.ctx, (_ast.Store, _ast.AugStore)):
# if the name hasn't already been defined in the current scope
if isinstance(self.scope, FunctionScope) and node.id not in self.scope:
# for each function or module scope above us
for scope in self.scopeStack[:-1]:
if not isinstance(scope, (FunctionScope, ModuleScope)):
continue
# if the name was defined in that scope, and the name has
# been accessed already in the current scope, and hasn't
# been declared global
if (node.id in scope
and scope[node.id].used
and scope[node.id].used[0] is self.scope
and node.id not in self.scope.globals):
# then it's probably a mistake
self.report(messages.UndefinedLocal,
scope[node.id].used[1],
node.id,
scope[node.id].source)
break
if isinstance(node.parent,
(_ast.For, _ast.comprehension, _ast.Tuple, _ast.List)):
binding = Binding(node.id, node)
elif (node.id == '__all__' and
isinstance(self.scope, ModuleScope)):
binding = ExportBinding(node.id, node.parent.value)
else:
binding = Assignment(node.id, node)
if node.id in self.scope:
binding.used = self.scope[node.id].used
self.addBinding(node, binding)
elif isinstance(node.ctx, _ast.Del):
if isinstance(self.scope, FunctionScope) and \
node.id in self.scope.globals:
del self.scope.globals[node.id]
else:
self.addBinding(node, UnBinding(node.id, node))
else:
# must be a Param context -- this only happens for names in function
# arguments, but these aren't dispatched through here
raise RuntimeError(
"Got impossible expression context: %r" % (node.ctx,))
def FUNCTIONDEF(self, node):
# the decorators attribute is called decorator_list as of Python 2.6
if hasattr(node, 'decorators'):
for deco in node.decorators:
self.handleNode(deco, node)
else:
for deco in node.decorator_list:
self.handleNode(deco, node)
# Check for property decorator
func_def = FunctionDefinition(node.name, node)
for decorator in node.decorator_list:
if getattr(decorator, 'attr', None) in ('setter', 'deleter'):
func_def._property_decorator = True
self.addBinding(node, func_def)
self.LAMBDA(node)
def LAMBDA(self, node):
for default in node.args.defaults:
self.handleNode(default, node)
def runFunction():
args = []
def addArgs(arglist):
for arg in arglist:
if isinstance(arg, _ast.Tuple):
addArgs(arg.elts)
else:
if arg.id in args:
self.report(messages.DuplicateArgument,
node, arg.id)
args.append(arg.id)
self.pushFunctionScope()
addArgs(node.args.args)
# vararg/kwarg identifiers are not Name nodes
if node.args.vararg:
args.append(node.args.vararg)
if node.args.kwarg:
args.append(node.args.kwarg)
for name in args:
self.addBinding(node, Argument(name, node), reportRedef=False)
if isinstance(node.body, list):
# case for FunctionDefs
for stmt in node.body:
self.handleNode(stmt, node)
else:
# case for Lambdas
self.handleNode(node.body, node)
def checkUnusedAssignments():
"""
Check to see if any assignments have not been used.
"""
for name, binding in self.scope.iteritems():
if (not binding.used and not name in self.scope.globals
and isinstance(binding, Assignment)):
self.report(messages.UnusedVariable,
binding.source, name)
self.deferAssignment(checkUnusedAssignments)
self.popScope()
self.deferFunction(runFunction)
def CLASSDEF(self, node):
"""
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
"""
# decorator_list is present as of Python 2.6
for deco in getattr(node, 'decorator_list', []):
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
self.pushClassScope()
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, Binding(node.name, node))
def ASSIGN(self, node):
self.handleNode(node.value, node)
for target in node.targets:
self.handleNode(target, node)
def AUGASSIGN(self, node):
# AugAssign is awkward: must set the context explicitly and visit twice,
# once with AugLoad context, once with AugStore context
node.target.ctx = _ast.AugLoad()
self.handleNode(node.target, node)
self.handleNode(node.value, node)
node.target.ctx = _ast.AugStore()
self.handleNode(node.target, node)
def IMPORT(self, node):
for alias in node.names:
name = alias.asname or alias.name
importation = Importation(name, node)
self.addBinding(node, importation)
def IMPORTFROM(self, node):
if node.module == '__future__':
if not self.futuresAllowed:
self.report(messages.LateFutureImport, node,
[n.name for n in node.names])
else:
self.futuresAllowed = False
for alias in node.names:
if alias.name == '*':
self.scope.importStarred = True
self.report(messages.ImportStarUsed, node, node.module)
continue
name = alias.asname or alias.name
importation = Importation(name, node)
if node.module == '__future__':
importation.used = (self.scope, node)
self.addBinding(node, importation)
|
pronto/dotfiles
|
.vim/pylibs/pyflakes/checker.py
|
Python
|
bsd-2-clause
| 22,259
|
[
"VisIt"
] |
bdb745b9ae6a0922d67223cb6f7cfdbb47ad284d3301eeee82be8326933c0def
|
import os
import pathlib
from .. import cgns, h5m, vtu, xdmf
from .._common import error
from .._helpers import _filetypes_from_path, read, reader_map
def add_args(parser):
parser.add_argument("infile", type=str, help="mesh file to decompress")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
def decompress(args):
if args.input_format:
fmts = [args.input_format]
else:
fmts = _filetypes_from_path(pathlib.Path(args.infile))
# pick the first
fmt = fmts[0]
size = os.stat(args.infile).st_size
print(f"File size before: {size / 1024 ** 2:.2f} MB")
mesh = read(args.infile, file_format=args.input_format)
# # Some converters (like VTK) require `points` to be contiguous.
# mesh.points = np.ascontiguousarray(mesh.points)
# write it out
if fmt == "cgns":
cgns.write(args.infile, mesh, compression=None)
elif fmt == "h5m":
h5m.write(args.infile, mesh, compression=None)
elif fmt == "vtu":
vtu.write(args.infile, mesh, binary=True, compression=None)
elif fmt == "xdmf":
xdmf.write(args.infile, mesh, data_format="HDF", compression=None)
else:
error(f"Don't know how to decompress {args.infile}.")
exit(1)
size = os.stat(args.infile).st_size
print(f"File size after: {size / 1024 ** 2:.2f} MB")
|
nschloe/meshio
|
src/meshio/_cli/_decompress.py
|
Python
|
mit
| 1,480
|
[
"VTK"
] |
1d2d471a519d75c32e7150bf1aca44c6d3a4e8978bd58ae51959f21d5fc19509
|
"""
MUSE -- A Multi-algorithm-collaborative Universal Structure-prediction Environment
Copyright (C) 2010-2017 by Zhong-Li Liu
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation
version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
E-mail: zl.liu@163.com
"""
from spglib import spglib
def Findspg(atoms,prec=None):
if prec == None:
from muse.Readwrite.ReadInput import indict
prec = float(indict['SymPrec'][0])
spg0 = spglib.get_spacegroup(atoms,symprec=prec)
if spg0:
spg1 = spg0.split()
spg = [str(spg1[0]), int(spg1[1][1:-1])]
else:
spg = []
# print spg0,spg
return spg
if __name__ == '__main__':
from muse.Readwrite import Read_Write
pc = Read_Write.read_vasp('194.vasp')
dataset = spglib.get_symmetry_dataset(pc, symprec=0.1)
spg = Findspg(pc,prec=0.1)
print spg,dataset
|
zhongliliu/muse
|
muse/Symmetry/Findspg.py
|
Python
|
gpl-2.0
| 1,216
|
[
"VASP"
] |
6f8967530cc3c3b8c6bfa69de843dbc0482ed6e22e3288bc8226662718167d38
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the bencode parser plugin for Transmission BitTorrent files."""
from __future__ import unicode_literals
import unittest
from plaso.lib import definitions
from plaso.parsers import bencode_parser
from tests.parsers.bencode_plugins import test_lib
class BencodeTest(test_lib.BencodePluginTestCase):
"""Tests for bencode parser plugin for Transmission BitTorrent files."""
def testProcess(self):
"""Tests the Process function."""
parser = bencode_parser.BencodeParser()
storage_writer = self._ParseFile(['bencode', 'transmission'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 3)
# The order in which BencodeParser generates events is nondeterministic
# hence we sort the events.
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2013-11-08 15:31:20.000000')
self.assertEqual(event.timestamp_desc, definitions.TIME_DESCRIPTION_ADDED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.destination, '/Users/brian/Downloads')
self.assertEqual(event_data.seedtime, 4)
# Test on second event of first torrent.
event = events[1]
self.CheckTimestamp(event.timestamp, '2013-11-08 18:24:24.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.destination, '/Users/brian/Downloads')
self.assertEqual(event_data.seedtime, 4)
expected_message = (
'Saved to /Users/brian/Downloads; '
'Minutes seeded: 4')
self._TestGetMessageStrings(event_data, expected_message, expected_message)
if __name__ == '__main__':
unittest.main()
|
rgayon/plaso
|
tests/parsers/bencode_plugins/transmission.py
|
Python
|
apache-2.0
| 1,970
|
[
"Brian"
] |
0d24317362a161808602ce70b262ee0462bfbf23c904ce8c0aeda327643a89dd
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import sys
import time
import yaml
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_text
from ansible.playbook.role.requirement import RoleRequirement
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repostories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def set_action(self):
super(GalaxyCLI, self).set_action()
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--container-enabled', dest='container_enabled', action='store_true', default=False,
help='Initialize the skeleton role with default contents for a Container Enabled role.')
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if self.action not in ("delete", "import", "init", "login", "setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.unfrack_paths, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg'
'file (/etc/ansible/roles if not configured)', type='str')
if self.action in ("init", "install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
self.set_action()
super(GalaxyCLI, self).parse()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
def run(self):
super(GalaxyCLI, self).run()
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.options.ignore_errors:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
creates the skeleton framework of a role that complies with the galaxy metadata format.
"""
init_path = self.options.init_path
force = self.options.force
role_skeleton = self.options.role_skeleton
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
inject_data = dict(
role_name=role_name,
author='your name',
description='your description',
company='your company (optional)',
license='license (GPLv2, CC-BY, etc)',
issue_tracker_url='http://example.com/issue/tracker',
min_ansible_version='1.2',
container_enabled=self.options.container_enabled
)
# create role directory
if not os.path.exists(role_path):
os.makedirs(role_path)
if role_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
role_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
role_skeleton = os.path.expanduser(role_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
template_env = Environment(loader=FileSystemLoader(role_skeleton))
for root, dirs, files in os.walk(role_skeleton, topdown=True):
rel_root = os.path.relpath(root, role_skeleton)
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(os.path.join(rel_root, d)) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(role_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
for d in dirs:
dir_path = os.path.join(role_path, rel_root, d)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.options.roles_path
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not self.options.offline:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
"""
role_file = self.options.role_file
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and role_file is not None:
# using a role file is mutually exclusive of specifying the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.options.no_deps
force = self.options.force
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
display.deprecated("going forward only the yaml format will be supported", version="2.6")
# roles listed in a file, one per line
for rline in f.readlines():
if rline.startswith("#") or rline.strip() == '':
continue
display.debug('found role %s in text file' % str(rline))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
display.vvv('Installing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % str(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
(str(dep_role), role.name, dep_role.install_info['version']))
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
display.display("- %s, %s" % (name, version))
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.options.roles_path
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path)
elif not os.path.isdir(role_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.galaxy_tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.galaxy_tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = to_text(self.args.pop(), errors='surrogate_or_strict')
github_user = to_text(self.args.pop(), errors='surrogate_or_strict')
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference, role_name=self.options.role_name)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
return 0
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
fernandezcuesta/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 31,758
|
[
"Galaxy"
] |
0e7bdd2f65b312a75b59fb88f6f14ce645f72fd30baa3b5b7f3f05212dedc518
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from ._base import _validate, osd
def chao1(counts, bias_corrected=True):
"""Calculate chao1 richness estimator.
Uses the bias-corrected version unless `bias_corrected` is ``False`` *and*
there are both singletons and doubletons.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
bias_corrected : bool, optional
Indicates whether or not to use the bias-corrected version of the
equation. If ``False`` *and* there are both singletons and doubletons,
the uncorrected version will be used. The biased-corrected version will
be used otherwise.
Returns
-------
double
Computed chao1 richness estimator.
See Also
--------
chao1_ci
Notes
-----
The uncorrected version is based on Equation 6 in [1]_:
.. math::
chao1=S_{obs}+\\frac{F_1^2}{2F_2}
where :math:`F_1` and :math:`F_2` are the count of singletons and
doubletons, respectively.
The bias-corrected version is defined as
.. math::
chao1=S_{obs}+\\frac{F_1(F_1-1)}{2(F_2+1)}
References
----------
.. [1] Chao, A. 1984. Non-parametric estimation of the number of classes in
a population. Scandinavian Journal of Statistics 11, 265-270.
"""
counts = _validate(counts)
o, s, d = osd(counts)
if not bias_corrected and s and d:
return o + s ** 2 / (d * 2)
else:
return o + s * (s - 1) / (2 * (d + 1))
def chao1_ci(counts, bias_corrected=True, zscore=1.96):
"""Calculate chao1 confidence interval.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
bias_corrected : bool, optional
Indicates whether or not to use the bias-corrected version of the
equation. If ``False`` *and* there are both singletons and doubletons,
the uncorrected version will be used. The biased-corrected version will
be used otherwise.
zscore : scalar, optional
Score to use for confidence. Default of 1.96 is for a 95% confidence
interval.
Returns
-------
tuple
chao1 confidence interval as ``(lower_bound, upper_bound)``.
See Also
--------
chao1
Notes
-----
The implementation here is based on the equations in the EstimateS manual
[1]_. Different equations are employed to calculate the chao1 variance and
confidence interval depending on `bias_corrected` and the presence/absence
of singletons and/or doubletons.
Specifically, the following EstimateS equations are used:
1. No singletons, Equation 14.
2. Singletons but no doubletons, Equations 7, 13.
3. Singletons and doubletons, ``bias_corrected=True``, Equations 6, 13.
4. Singletons and doubletons, ``bias_corrected=False``, Equations 5, 13.
References
----------
.. [1] http://viceroy.eeb.uconn.edu/estimates/
"""
counts = _validate(counts)
o, s, d = osd(counts)
if s:
chao = chao1(counts, bias_corrected)
chaovar = _chao1_var(counts, bias_corrected)
return _chao_confidence_with_singletons(chao, o, chaovar, zscore)
else:
n = counts.sum()
return _chao_confidence_no_singletons(n, o, zscore)
def _chao1_var(counts, bias_corrected=True):
"""Calculates chao1 variance using decision rules in EstimateS."""
o, s, d = osd(counts)
if not d:
c = chao1(counts, bias_corrected)
return _chao1_var_no_doubletons(s, c)
if not s:
n = counts.sum()
return _chao1_var_no_singletons(n, o)
if bias_corrected:
return _chao1_var_bias_corrected(s, d)
else:
return _chao1_var_uncorrected(s, d)
def _chao1_var_uncorrected(singles, doubles):
"""Calculates chao1, uncorrected.
From EstimateS manual, equation 5.
"""
r = singles / doubles
return doubles * (.5 * r ** 2 + r ** 3 + .24 * r ** 4)
def _chao1_var_bias_corrected(s, d):
"""Calculates chao1 variance, bias-corrected.
`s` is the number of singletons and `d` is the number of doubletons.
From EstimateS manual, equation 6.
"""
return (s * (s - 1) / (2 * (d + 1)) + (s * (2 * s - 1) ** 2) /
(4 * (d + 1) ** 2) + (s ** 2 * d * (s - 1) ** 2) /
(4 * (d + 1) ** 4))
def _chao1_var_no_doubletons(s, chao1):
"""Calculates chao1 variance in absence of doubletons.
From EstimateS manual, equation 7.
`s` is the number of singletons, and `chao1` is the estimate of the mean of
Chao1 from the same dataset.
"""
return s * (s - 1) / 2 + s * (2 * s - 1) ** 2 / 4 - s ** 4 / (4 * chao1)
def _chao1_var_no_singletons(n, o):
"""Calculates chao1 variance in absence of singletons.
`n` is the number of individuals and `o` is the number of observed OTUs.
From EstimateS manual, equation 8.
"""
return o * np.exp(-n / o) * (1 - np.exp(-n / o))
def _chao_confidence_with_singletons(chao, observed, var_chao, zscore=1.96):
"""Calculates confidence bounds for chao1 or chao2.
Uses Eq. 13 of EstimateS manual.
`zscore` is the score to use for confidence. The default of 1.96 is for 95%
confidence.
"""
T = chao - observed
# if no diff betweeh chao and observed, CI is just point estimate of
# observed
if T == 0:
return observed, observed
K = np.exp(abs(zscore) * np.sqrt(np.log(1 + (var_chao / T ** 2))))
return observed + T / K, observed + T * K
def _chao_confidence_no_singletons(n, s, zscore=1.96):
"""Calculates confidence bounds for chao1/chao2 in absence of singletons.
Uses Eq. 14 of EstimateS manual.
`n` is the number of individuals and `s` is the number of OTUs.
"""
P = np.exp(-n / s)
return (max(s, s / (1 - P) - zscore * np.sqrt((s * P / (1 - P)))),
s / (1 - P) + zscore * np.sqrt(s * P / (1 - P)))
|
jensreeder/scikit-bio
|
skbio/diversity/alpha/_chao1.py
|
Python
|
bsd-3-clause
| 6,329
|
[
"scikit-bio"
] |
947fa6461dd3e7cdfbdb71d7f6befd6df70fd95b97d4519101ace42131595669
|
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2015, PAL Team.
# All rights reserved. See LICENSE for details.
CHAINS = set([
"A&W Restaurants",
"America's Incredible Pizza Company",
"Applebee's",
"Arby's",
"Arctic Circle Restaurants",
"Arthur Treacher's Fish & Chips",
"Atlanta Bread Company",
"Auntie Anne's",
"Bahama Breeze",
"Baja Fresh",
"Bakers Square",
"Baskin-Robbins",
"Beef O'Brady's",
"Ben & Jerry's",
"Benihana",
"Bennigan's",
"Bertucci's",
"Big Boy",
"Bikinis Sports Bar & Grill",
"BJ's Restaurant & Brewery",
"Black Angus Steakhouse",
"Black-eyed Pea",
"Blimpie",
"Bob Evans Restaurants",
"Bojangles' Famous Chicken 'n Biscuits",
"Bonefish Grill",
"Boston Market",
"Braum's",
"Bravo!, Cucina Italiana",
"Brio",
"Bubba Gump Shrimp Company",
"Buca di Beppo",
"Buffalo Wild Wings",
"Burger King",
"Burger Street",
"Burgerville",
"Cafe Rio",
"California Pizza Kitchen",
"California Tortilla",
"Camille's Sidewalk Cafe",
"Captain D's",
"The Capital Grille",
"Carino's Italian Grill",
"Carl's Jr.",
"Carrabba's Italian Grill",
"Carrows",
"Carvel Ice Cream",
"Champps Americana",
"Charley's Grilled Subs",
"Charlie Brown's Fresh Grill",
"Checkers",
"Cheeburger Cheeburger",
"Cheeseburger in Paradise",
"Cheesecake Factory",
"Cheddar's Casual Café",
"Chevys Fresh Mex",
"Chicken Express",
"Chick-fil-A",
"Chili's",
"Chipotle Mexican Grill",
"Chronic Tacos",
"Chuck-A-Rama",
"Chuck E. Cheese's",
"Church's",
"CiCi's Pizza",
"Cinnabon",
"Claim Jumper",
"Coco's Bakery",
"Cold Stone Creamery",
"Copeland's",
"Country Buffet",
"Cracker Barrel Old Country Store",
"Culver's",
"Dairy Queen",
"Damon's Grill",
"Dave & Buster's",
"Denny's",
"Del Taco",
"Dickey's Barbecue Pit",
"Dixie Chili and Deli",
"Domino's Pizza",
"Don Pablo's",
"Donatos Pizza",
"Dunkin' Donuts",
"East of Chicago Pizza",
"EatZi's",
"Eat'n Park",
"Eegee's",
"El Chico",
"El Pollo Loco",
"Elephant Bar",
"El Taco Tote",
"Famous Dave's",
"Farmer Boys",
"Fatburger",
"FATZ",
"Fazoli's",
"Five Guys Famous Burgers and Fries",
"Fleming's Prime Steakhouse & Wine Bar",
"Freddy's Frozen Custard & Steakburgers",
"Freebirds World Burrito",
"Fresh Choice",
"Friendly's",
"Fuddruckers",
"Gatti's Pizza",
"GameWorks",
"Gino's Pizza and Spaghetti",
"Godfather's Pizza",
"Golden Chick",
"Golden Corral",
"Green Burrito",
"Green Mill[disambiguation needed]",
"Ground Round",
"Hard Rock Cafe",
"Hardee's",
"Hobee's Restaurant",
"Hooters",
"Houlihan's",
"Houston's Restaurant",
"Howard Johnson's",
"Huddle House",
"HuHot Mongolian Grill",
"Hungry Howie's Pizza",
"IHOP",
"In-N-Out Burger",
"Jack in the Box",
"Jack's",
"Jamba Juice",
"Jason's Deli",
"Jerry's Subs & Pizza",
"Jersey Mike's Subs",
"Jimmy John's",
"Jim's Restaurants",
"Joe's Crab Shack",
"John's Incredible Pizza",
"Johnny Rockets",
"Ker's WingHouse",
"KFC",
"Krispy Kreme",
"Krystal",
"L&L Hawaiian Barbecue",
"Landry's Restaurants",
"Ledo Pizza",
"Lee Roy Selmon's",
"Lee's Famous Recipe Chicken",
"Little Caesars",
"Logan's Roadhouse",
"Lone Star Steakhouse & Saloon",
"LongHorn Steakhouse",
"Long John Silver's",
"Luby's",
"Lyon's",
"Maggiano's Little Italy",
"Marie Callender's",
"Max & Erma's",
"McAlister's Deli",
"McDonald's",
"The Melting Pot",
"Mazzio's Italian Eatery",
"Miller's Ale House",
"Milo's Hamburgers",
"Mitchell's Fish Market",
"Moe's Southwest Grill",
"Montana Mike's",
"Mr. Hero",
"Mrs. Fields",
"National Coney Island",
"Naugles",
"Noodles & Company",
"O'Charley's",
"Old Country Buffet",
"Olive Garden",
"On the Border Mexican Grill & Cantina",
"Outback Steakhouse",
"The Old Spaghetti Factory",
"The Original Italian Pie",
"The Original Pancake House",
"Panera Bread",
"Panda Express",
"Papa Gino's",
"Papa John's Pizza",
"Papa Murphy's Take 'N' Bake pizza",
"Pei Wei Asian Diner",
"Penn Station (restaurant)",
"Perkins Restaurant and Bakery",
"P. F. Chang's China Bistro",
"Pita Pit",
"Pizza Hut",
"Pizza Inn",
"Pizza Ranch",
"Planet Hollywood",
"Ponderosa Steakhouse and Bonanza Steakhouse",
"Popeyes Chicken & Biscuits",
"Portillo's Restaurants",
"Port of Subs",
"Potbelly Sandwich Works",
"Qdoba Mexican Grill",
"Quaker Steak & Lube",
"Quiznos",
"RA Sushi",
"Rainforest Cafe",
"Rally's",
"Raising Cane's Chicken Fingers",
"Rax",
"Red Hot & Blue",
"Red Lobster",
"Red Robin",
"Redstone American Grill",
"Robeks",
"Rock Bottom",
"Romano's Macaroni Grill",
"Round Table Pizza",
"Roy Rogers Restaurants",
"Roy's",
"Ruby Tuesday",
"Rubio's Fresh Mexican Grill",
"Ruth's Chris Steak House",
"Runza",
"Saladworks",
"Sbarro",
"Schlotzsky's",
"Seasons 52",
"Seattle's Best Coffee",
"Shake Shack",
"Shane's Rib Shack",
"Showmars",
"Shoney's",
"Sizzler",
"Skyline Chili",
"Smashburger",
"Smokey Bones",
"Sneaky Pete's",
"Sonic Drive-In",
"Souplantation and Sweet Tomatoes",
"Spaghetti Warehouse",
"Spangles",
"Starbucks",
"Steak 'n Shake",
"Sticky Fingers",
"Stir Crazy",
"Sub Station II",
"Subway",
"Sweet Tomatoes",
"Swensen's",
"Swensons",
"T.G.I. Friday's",
"Taco Bell",
"Taco Bueno",
"Taco Cabana",
"Taco John's",
"Taco Mayo",
"Taco Tico",
"Taco Time",
"Texas Roadhouse",
"Tijuana Flats",
"Tilted Kilt",
"Tony Roma's",
"Trader Vic's",
"Twin Peaks",
"Umami Burger",
"Uncle Maddio's Pizza Joint",
"Uno Chicago Grill",
"Valentino's",
"Village Inn",
"Waffle House",
"Wendy's",
"Wetzel's Pretzels",
"Whataburger",
"Which Wich?",
"White Castle",
"Wienerschnitzel",
"Wild Wing Cafe",
"York Steak House",
"Zaxby's"
])
|
Machyne/pal
|
pal/name_entities/chains.py
|
Python
|
bsd-3-clause
| 6,461
|
[
"MOE"
] |
cfafe871e374e4b7ccf544f25bbf447f3dd59b36f65bf022b39a9a066f70940e
|
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import renpy.display
import renpy.pyanalysis
import random
def compiling(loc):
file, number = loc # @ReservedAssignment
renpy.game.exception_info = "Compiling ATL code at %s:%d" % (file, number)
def executing(loc):
file, number = loc # @ReservedAssignment
renpy.game.exception_info = "Executing ATL code at %s:%d" % (file, number)
# A map from the name of a time warp function to the function itself.
warpers = { }
def atl_warper(f):
name = f.func_name
warpers[name] = f
return f
# The pause warper is used internally when no other warper is
# specified.
@atl_warper
def pause(t):
if t < 1.0:
return 0.0
else:
return 1.0
@atl_warper
def instant(t):
return 1.0
position = renpy.object.Sentinel("position")
def any_object(x):
return x
def bool_or_none(x):
if x is None:
return x
return bool(x)
def float_or_none(x):
if x is None:
return x
return float(x)
# A dictionary giving property names and the corresponding default
# values.
PROPERTIES = {
"pos" : (position, position),
"xpos" : position,
"ypos" : position,
"anchor" : (position, position),
"xanchor" : position,
"yanchor" : position,
"xaround" : position,
"yaround" : position,
"xanchoraround" : float,
"yanchoraround" : float,
"align" : (float, float),
"xalign" : float,
"yalign" : float,
"rotate" : float,
"rotate_pad" : bool,
"transform_anchor" : bool,
"xzoom" : float,
"yzoom" : float,
"zoom" : float,
"nearest" : bool_or_none,
"alpha" : float,
"additive" : float,
"around" : (position, position),
"alignaround" : (float, float),
"angle" : float,
"radius" : float,
"crop" : (float, float, float, float),
"crop_relative" : bool,
"size" : (int, int),
"corner1" : (float, float),
"corner2" : (float, float),
"subpixel" : bool,
"delay" : float,
"xoffset" : float,
"yoffset" : float,
"offset" : (int, int),
"xcenter" : position,
"ycenter" : position,
"debug" : any_object,
"events" : bool,
"xpan" : float_or_none,
"ypan" : float_or_none,
"xtile" : int,
"ytile" : int,
}
def correct_type(v, b, ty):
"""
Corrects the type of v to match ty. b is used to inform the match.
"""
if ty is position:
if v is None:
return None
else:
return type(b)(v)
else:
return ty(v)
def interpolate(t, a, b, type): # @ReservedAssignment
"""
Linearly interpolate the arguments.
"""
# Recurse into tuples.
if isinstance(b, tuple):
if a is None:
a = [ None ] * len(b)
return tuple(interpolate(t, i, j, ty) for i, j, ty in zip(a, b, type))
# Deal with booleans, nones, etc.
elif b is None or isinstance(b, (bool, basestring)):
if t >= 1.0:
return b
else:
return a
# Interpolate everything else.
else:
if a is None:
a = 0
return correct_type(a + t * (b - a), b, type)
# Interpolate the value of a spline. This code is based on Aenakume's code,
# from 00splines.rpy.
def interpolate_spline(t, spline):
if isinstance(spline[-1], tuple):
return tuple(interpolate_spline(t, i) for i in zip(*spline))
if spline[0] is None:
return spline[-1]
if len(spline) == 2:
t_p = 1.0 - t
rv = t_p * spline[0] + t * spline[-1]
elif len(spline) == 3:
t_pp = (1.0 - t)**2
t_p = 2 * t * (1.0 - t)
t2 = t**2
rv = t_pp * spline[0] + t_p * spline[1] + t2 * spline[2]
elif len(spline) == 4:
t_ppp = (1.0 - t)**3
t_pp = 3 * t * (1.0 - t)**2
t_p = 3 * t**2 * (1.0 - t)
t3 = t**3
rv = t_ppp * spline[0] + t_pp * spline[1] + t_p * spline[2] + t3 * spline[3]
else:
raise Exception("ATL can't interpolate splines of length %d." % len(spline))
return correct_type(rv, spline[-1], position)
# A list of atl transforms that may need to be compile.
compile_queue = [ ]
def compile_all():
"""
Called after the init phase is finished and transforms are compiled,
to compile all transforms.
"""
global compile_queue
for i in compile_queue:
if i.atl.constant == GLOBAL_CONST:
i.compile()
compile_queue = [ ]
# This is the context used when compiling an ATL statement. It stores the
# scopes that are used to evaluate the various expressions in the statement,
# and has a method to do the evaluation and return a result.
class Context(object):
def __init__(self, context):
self.context = context
def eval(self, expr): # @ReservedAssignment
expr = renpy.python.escape_unicode(expr)
return eval(expr, renpy.store.__dict__, self.context) # @UndefinedVariable
def __eq__(self, other):
if not isinstance(other, Context):
return False
return self.context == other.context
# This is intended to be subclassed by ATLTransform. It takes care of
# managing ATL execution, which allows ATLTransform itself to not care
# much about the contents of this file.
class ATLTransformBase(renpy.object.Object):
# Compatibility with older saves.
parameters = renpy.ast.ParameterInfo([ ], [ ], None, None)
parent_transform = None
atl_st_offset = 0
# The block, as first compiled for prediction.
predict_block = None
nosave = [ 'parent_transform' ]
def __init__(self, atl, context, parameters):
# The constructor will be called by atltransform.
if parameters is None:
parameters = ATLTransformBase.parameters
# The parameters that we take.
self.parameters = parameters
# The raw code that makes up this ATL statement.
self.atl = atl
# The context in which execution occurs.
self.context = Context(context)
# The code after it has been compiled into a block.
self.block = None
# The same thing, but only if the code was compiled into a block
# for prediction purposes only.
self.predict_block = None
# The properties of the block, if it contains only an
# Interpolation.
self.properties = None
# The state of the statement we are executing. As this can be
# shared between more than one object (in the case of a hide),
# the data must not be altered.
self.atl_state = None
# Are we done?
self.done = False
# The transform event we are going to process.
self.transform_event = None
# The transform event we last processed.
self.last_transform_event = None
# The child transform event we last processed.
self.last_child_transform_event = None
# The child, without any transformations.
self.raw_child = None
# The parent transform that was called to create this transform.
self.parent_transform = None
# The offset between st and when this ATL block first executed.
self.atl_st_offset = 0
if renpy.game.context().init_phase:
compile_queue.append(self)
def get_block(self):
"""
Returns the compiled block to use.
"""
if self.block:
return self.block
elif self.predict_block and renpy.display.predict.predicting:
return self.predict_block
else:
return None
def take_execution_state(self, t):
"""
Updates self to begin executing from the same point as t. This
requires that t.atl is self.atl.
"""
super(ATLTransformBase, self).take_execution_state(t)
self.atl_st_offset = None
if self is t:
return
elif not isinstance(t, ATLTransformBase):
return
elif t.atl is not self.atl:
return
# Important to do it this way, so we use __eq__. The exception handling
# optimistically assumes that uncomparable objects are the same.
try:
if not (t.context == self.context):
return
except:
pass
self.done = t.done
self.block = t.block
self.atl_state = t.atl_state
self.transform_event = t.transform_event
self.last_transform_event = t.last_transform_event
self.last_child_transform_event = t.last_child_transform_event
self.st = t.st
self.at = t.at
self.st_offset = t.st_offset
self.at_offset = t.at_offset
self.atl_st_offset = t.atl_st_offset
if self.child is renpy.display.motion.null:
self.child = t.child
self.raw_child = t.raw_child
def __call__(self, *args, **kwargs):
_args = kwargs.pop("_args", None)
context = self.context.context.copy()
for k, v in self.parameters.parameters:
if v is not None:
context[k] = renpy.python.py_eval(v)
positional = list(self.parameters.positional)
args = list(args)
child = None
if not positional and args:
child = args.pop(0)
# Handle positional arguments.
while positional and args:
name = positional.pop(0)
value = args.pop(0)
if name in kwargs:
raise Exception('Parameter %r is used as both a positional and keyword argument to a transition.' % name)
context[name] = value
if args:
raise Exception("Too many arguments passed to ATL transform.")
# Handle keyword arguments.
for k, v in kwargs.iteritems():
if k in positional:
positional.remove(k)
context[k] = v
elif k in context:
context[k] = v
elif k == 'child':
child = v
else:
raise Exception('Parameter %r is not known by ATL Transform.' % k)
if child is None:
child = self.child
# Create a new ATL Transform.
parameters = renpy.ast.ParameterInfo({ }, positional, None, None)
rv = renpy.display.motion.ATLTransform(
atl=self.atl,
child=child,
style=self.style_arg,
context=context,
parameters=parameters,
_args=_args,
)
rv.parent_transform = self
rv.take_state(self)
return rv
def compile(self): # @ReservedAssignment
"""
Compiles the ATL code into a block. As necessary, updates the
properties.
"""
constant = (self.atl.constant == GLOBAL_CONST)
if not constant:
for p in self.parameters.positional:
if p not in self.context.context:
raise Exception("Cannot compile ATL Transform at %s:%d, as it's missing positional parameter %s." % (
self.atl.loc[0],
self.atl.loc[1],
self.parameters.positional[0],
))
if constant and self.parent_transform:
if self.parent_transform.block:
self.block = self.parent_transform.block
self.properties = self.parent_transform.properties
self.parent_transform = None
return self.block
old_exception_info = renpy.game.exception_info
block = self.atl.compile(self.context)
if len(block.statements) == 1 and isinstance(block.statements[0], Interpolation):
interp = block.statements[0]
if interp.duration == 0 and interp.properties:
self.properties = interp.properties[:]
if not constant and renpy.display.predict.predicting:
self.predict_block = block
else:
self.block = block
self.predict_block = None
renpy.game.exception_info = old_exception_info
if constant and self.parent_transform:
self.parent_transform.block = self.block
self.parent_transform.properties = self.properties
self.parent_transform = None
return block
def execute(self, trans, st, at):
if self.done:
return None
block = self.get_block()
if block is None:
block = self.compile()
events = [ ]
# Hide request.
if trans.hide_request:
self.transform_event = "hide"
if trans.replaced_request:
self.transform_event = "replaced"
# Notice transform events.
if renpy.config.atl_multiple_events:
if self.transform_event != self.last_transform_event:
events.append(self.transform_event)
self.last_transform_event = self.transform_event
# Propagate transform_events from children.
if (self.child is not None) and self.child.transform_event != self.last_child_transform_event:
self.last_child_transform_event = self.child.transform_event
if self.child.transform_event is not None:
self.transform_event = self.child.transform_event
# Notice transform events, again.
if self.transform_event != self.last_transform_event:
events.append(self.transform_event)
self.last_transform_event = self.transform_event
old_exception_info = renpy.game.exception_info
if (self.atl_st_offset is None) or (st - self.atl_st_offset) < 0:
self.atl_st_offset = st
if self.atl.animation:
timebase = at
else:
timebase = st - self.atl_st_offset
action, arg, pause = block.execute(trans, timebase, self.atl_state, events)
renpy.game.exception_info = old_exception_info
if action == "continue" and not renpy.display.predict.predicting:
self.atl_state = arg
else:
self.done = True
return pause
def predict_one(self):
self.atl.predict(self.context)
def visit(self):
block = self.get_block()
if block is None:
block = self.compile()
return self.children + block.visit()
# This is used in mark_constant to analyze expressions for constness.
is_constant_expr = renpy.pyanalysis.Analysis().is_constant_expr
GLOBAL_CONST = renpy.pyanalysis.GLOBAL_CONST
# The base class for raw ATL statements.
class RawStatement(object):
constant = None
def __init__(self, loc):
super(RawStatement, self).__init__()
self.loc = loc
# Compiles this RawStatement into a Statement, by using ctx to
# evaluate expressions as necessary.
def compile(self, ctx): # @ReservedAssignment
raise Exception("Compile not implemented.")
# Predicts the images used by this statement.
def predict(self, ctx):
return
def mark_constant(self):
"""
Sets self.constant to true if all expressions used in this statement
and its children are constant.
"""
self.constant = 0
# The base class for compiled ATL Statements.
class Statement(renpy.object.Object):
def __init__(self, loc):
super(Statement, self).__init__()
self.loc = loc
# trans is the transform we're working on.
# st is the time since this statement started executing.
# state is the state stored by this statement, or None if
# we've just started executing this statement.
# event is an event we're triggering.
#
# "continue", state, pause - Causes this statement to execute
# again, with the given state passed in the second time around.
#
#
# "next", timeleft, pause - Causes the next statement to execute,
# with timeleft being the amount of time left after this statement
# finished.
#
# "event", (name, timeleft), pause - Causes an event to be reported,
# and control to head up to the event handler.
#
# "repeat", (count, timeleft), pause - Causes the repeat behavior
# to occur.
#
# As the Repeat statement can only appear in a block, only Block
# needs to deal with the repeat behavior.
#
# Pause is the amount of time until execute should be called again,
# or None if there's no need to call execute ever again.
def execute(self, trans, st, state, events):
raise Exception("Not implemented.")
# Return a list of displayable children.
def visit(self):
return [ ]
# This represents a Raw ATL block.
class RawBlock(RawStatement):
# Should we use the animation timebase or the showing timebase?
animation = False
def __init__(self, loc, statements, animation):
super(RawBlock, self).__init__(loc)
# A list of RawStatements in this block.
self.statements = statements
self.animation = animation
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
statements = [ i.compile(ctx) for i in self.statements ]
return Block(self.loc, statements)
def predict(self, ctx):
for i in self.statements:
i.predict(ctx)
def mark_constant(self):
constant = GLOBAL_CONST
for i in self.statements:
i.mark_constant()
constant = min(constant, i.constant)
self.constant = constant
# A compiled ATL block.
class Block(Statement):
def __init__(self, loc, statements):
super(Block, self).__init__(loc)
# A list of statements in the block.
self.statements = statements
# The start times of various statements.
self.times = [ ]
for i, s in enumerate(statements):
if isinstance(s, Time):
self.times.append((s.time, i + 1))
self.times.sort()
def execute(self, trans, st, state, events):
executing(self.loc)
# Unpack the state.
if state is not None:
index, start, loop_start, repeats, times, child_state = state
else:
index, start, loop_start, repeats, times, child_state = 0, 0, 0, 0, self.times[:], None
# What we might be returning.
action = "continue"
arg = None
pause = None
while action == "continue":
# Target is the time we're willing to execute to.
# Max_pause is how long we'll wait before executing again.
# If we have times queued up, then use them to inform target
# and time.
if times:
time, tindex = times[0]
target = min(time, st)
max_pause = time - target
# Otherwise, take the defaults.
else:
target = st
max_pause = 15
while True:
# If we've hit the last statement, it's the end of
# this block.
if index >= len(self.statements):
return "next", target - start, None
# Find the statement and try to run it.
stmt = self.statements[index]
action, arg, pause = stmt.execute(trans, target - start, child_state, events)
# On continue, persist our state.
if action == "continue":
if pause is None:
pause = max_pause
action, arg, pause = "continue", (index, start, loop_start, repeats, times, arg), min(max_pause, pause)
break
elif action == "event":
return action, arg, pause
# On next, advance to the next statement in the block.
elif action == "next":
index += 1
start = target - arg
child_state = None
# On repeat, either terminate the block, or go to
# the first statement.
elif action == "repeat":
count, arg = arg
loop_end = target - arg
duration = loop_end - loop_start
if duration <= 0:
raise Exception("ATL appears to be in an infinite loop.")
# Figure how many durations can occur between the
# start of the loop and now.
new_repeats = int((target - loop_start) / duration)
if count is not None:
if repeats + new_repeats >= count:
new_repeats = count - repeats
loop_start += new_repeats * duration
return "next", target - loop_start, None
repeats += new_repeats
loop_start = loop_start + new_repeats * duration
start = loop_start
index = 0
child_state = None
if times:
time, tindex = times[0]
if time <= target:
times.pop(0)
index = tindex
start = time
child_state = None
continue
return action, arg, pause
def visit(self):
return [ j for i in self.statements for j in i.visit() ]
# This can become one of four things:
#
# - A pause.
# - An interpolation (which optionally can also reference other
# blocks, as long as they're not time-dependent, and have the same
# arity as the interpolation).
# - A call to another block.
# - A command to change the image, perhaps with a transition.
#
# We won't decide which it is until runtime, as we need the
# values of the variables here.
class RawMultipurpose(RawStatement):
warp_function = None
def __init__(self, loc):
super(RawMultipurpose, self).__init__(loc)
self.warper = None
self.duration = None
self.properties = [ ]
self.expressions = [ ]
self.splines = [ ]
self.revolution = None
self.circles = "0"
def add_warper(self, name, duration, warp_function):
self.warper = name
self.duration = duration
self.warp_function = warp_function
def add_property(self, name, exprs):
self.properties.append((name, exprs))
def add_expression(self, expr, with_clause):
self.expressions.append((expr, with_clause))
def add_revolution(self, revolution):
self.revolution = revolution
def add_circles(self, circles):
self.circles = circles
def add_spline(self, name, exprs):
self.splines.append((name, exprs))
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
# Figure out what kind of statement we have. If there's no
# interpolator, and no properties, than we have either a
# call, or a child statement.
if (self.warper is None and
self.warp_function is None and
not self.properties and
not self.splines and
len(self.expressions) == 1):
expr, withexpr = self.expressions[0]
child = ctx.eval(expr)
if withexpr:
transition = ctx.eval(withexpr)
else:
transition = None
if isinstance(child, (int, float)):
return Interpolation(self.loc, "pause", child, [ ], None, 0, [ ])
child = renpy.easy.displayable(child)
if isinstance(child, ATLTransformBase):
child.compile()
return child.get_block()
else:
return Child(self.loc, child, transition)
compiling(self.loc)
# Otherwise, we probably have an interpolation statement.
if self.warp_function:
warper = ctx.eval(self.warp_function)
else:
warper = self.warper or "instant"
if warper not in warpers:
raise Exception("ATL Warper %s is unknown at runtime." % warper)
properties = [ ]
for name, expr in self.properties:
if name not in PROPERTIES:
raise Exception("ATL Property %s is unknown at runtime." % property)
value = ctx.eval(expr)
properties.append((name, value))
splines = [ ]
for name, exprs in self.splines:
if name not in PROPERTIES:
raise Exception("ATL Property %s is unknown at runtime." % property)
values = [ ctx.eval(i) for i in exprs ]
splines.append((name, values))
for expr, _with in self.expressions:
try:
value = ctx.eval(expr)
except:
raise Exception("Could not evaluate expression %r when compiling ATL." % expr)
if not isinstance(value, ATLTransformBase):
raise Exception("Expression %r is not an ATL transform, and so cannot be included in an ATL interpolation." % expr)
value.compile()
if value.properties is None:
raise Exception("ATL transform %r is too complicated to be included in interpolation." % expr)
properties.extend(value.properties)
duration = ctx.eval(self.duration)
circles = ctx.eval(self.circles)
return Interpolation(self.loc, warper, duration, properties, self.revolution, circles, splines)
def mark_constant(self):
constant = GLOBAL_CONST
constant = min(constant, is_constant_expr(self.warp_function))
constant = min(constant, is_constant_expr(self.duration))
constant = min(constant, is_constant_expr(self.circles))
for _name, expr in self.properties:
constant = min(constant, is_constant_expr(expr))
for _name, exprs in self.splines:
for expr in exprs:
constant = min(constant, is_constant_expr(expr))
for expr, withexpr in self.expressions:
constant = min(constant, is_constant_expr(expr))
constant = min(constant, is_constant_expr(withexpr))
self.constant = constant
def predict(self, ctx):
for i, _j in self.expressions:
try:
i = ctx.eval(i)
except:
continue
if isinstance(i, ATLTransformBase):
i.atl.predict(ctx)
return
try:
renpy.easy.predict(i)
except:
continue
# This lets us have an ATL transform as our child.
class RawContainsExpr(RawStatement):
def __init__(self, loc, expr):
super(RawContainsExpr, self).__init__(loc)
self.expression = expr
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
child = ctx.eval(self.expression)
return Child(self.loc, child, None)
def mark_constant(self):
self.constant = is_constant_expr(self.expression)
# This allows us to have multiple ATL transforms as children.
class RawChild(RawStatement):
def __init__(self, loc, child):
super(RawChild, self).__init__(loc)
self.children = [ child ]
def compile(self, ctx): # @ReservedAssignment
children = [ ]
for i in self.children:
children.append(renpy.display.motion.ATLTransform(i, context=ctx.context))
box = renpy.display.layout.MultiBox(layout='fixed')
for i in children:
box.add(i)
return Child(self.loc, box, None)
def mark_constant(self):
constant = GLOBAL_CONST
for i in self.children:
i.mark_constant()
constant = min(constant, i.constant)
self.constant = constant
# This changes the child of this statement, optionally with a transition.
class Child(Statement):
def __init__(self, loc, child, transition):
super(Child, self).__init__(loc)
self.child = child
self.transition = transition
def execute(self, trans, st, state, events):
executing(self.loc)
old_child = trans.raw_child
child = self.child
if child._duplicatable:
child = self.child._duplicate(trans._args)
child._unique()
if (old_child is not None) and (old_child is not renpy.display.motion.null) and (self.transition is not None):
child = self.transition(old_widget=old_child,
new_widget=child)
child._unique()
else:
child = child
trans.set_child(child, duplicate=False)
trans.raw_child = self.child
return "next", st, None
def visit(self):
return [ self.child ]
# This causes interpolation to occur.
class Interpolation(Statement):
def __init__(self, loc, warper, duration, properties, revolution, circles, splines):
super(Interpolation, self).__init__(loc)
self.warper = warper
self.duration = duration
self.properties = properties
self.splines = splines
# The direction we revolve in: cw, ccw, or None.
self.revolution = revolution
# The number of complete circles we make.
self.circles = circles
def execute(self, trans, st, state, events):
executing(self.loc)
warper = warpers.get(self.warper, self.warper)
if (self.warper != "instant") and (state is None) and (
(trans.atl_state is not None) or (trans.st == 0)
):
first = True
else:
first = False
if self.duration:
complete = min(1.0, st / self.duration)
else:
complete = 1.0
if complete < 0.0:
complete = 0.0
elif complete > 1.0:
complete = 1.0
complete = warper(complete)
if state is None:
# Create a new transform state, and apply the property
# changes to it.
newts = renpy.display.motion.TransformState()
newts.take_state(trans.state)
has_angle = False
for k, v in self.properties:
setattr(newts, k, v)
if k == "angle":
newts.last_angle = v
has_angle = True
# Now, the things we change linearly are in the difference
# between the new and old states.
linear = trans.state.diff(newts)
revolution = None
splines = [ ]
revdir = self.revolution
circles = self.circles
if (revdir or (has_angle and renpy.config.automatic_polar_motion)) and (newts.xaround is not None):
# Remove various irrelevant motions.
for i in [ 'xpos', 'ypos',
'xanchor', 'yanchor',
'xaround', 'yaround',
'xanchoraround', 'yanchoraround',
]:
linear.pop(i, None)
if revdir is not None:
# Ensure we rotate around the new point.
trans.state.xaround = newts.xaround
trans.state.yaround = newts.yaround
trans.state.xanchoraround = newts.xanchoraround
trans.state.yanchoraround = newts.yanchoraround
# Get the start and end angles and radii.
startangle = trans.state.angle
endangle = newts.angle
startradius = trans.state.radius
endradius = newts.radius
# Make sure the revolution is in the appropriate direction,
# and contains an appropriate number of circles.
if revdir == "clockwise":
if endangle < startangle:
startangle -= 360
startangle -= circles * 360
elif revdir == "counterclockwise":
if endangle > startangle:
startangle += 360
startangle += circles * 360
# Store the revolution.
revolution = (startangle, endangle, startradius, endradius)
else:
last_angle = trans.state.last_angle or trans.state.angle
revolution = (last_angle, newts.last_angle, trans.state.radius, newts.radius)
# Figure out the splines.
for name, values in self.splines:
splines.append((name, [ getattr(trans.state, name) ] + values))
state = (linear, revolution, splines)
# Ensure that we set things, even if they don't actually
# change from the old state.
for k, v in self.properties:
if k not in linear:
setattr(trans.state, k, v)
else:
linear, revolution, splines = state
# Linearly interpolate between the things in linear.
for k, (old, new) in linear.iteritems():
value = interpolate(complete, old, new, PROPERTIES[k])
setattr(trans.state, k, value)
# Handle the revolution.
if revolution is not None:
startangle, endangle, startradius, endradius = revolution
angle = interpolate(complete, startangle, endangle, float)
trans.state.last_angle = angle
trans.state.angle = angle
trans.state.radius = interpolate(complete, startradius, endradius, float)
# Handle any splines we might have.
for name, values in splines:
value = interpolate_spline(complete, values)
setattr(trans.state, name, value)
if ((not first) or (not renpy.config.atl_one_frame)) and (st >= self.duration):
return "next", st - self.duration, None
else:
if not self.properties and not self.revolution and not self.splines:
return "continue", state, max(0, self.duration - st)
else:
return "continue", state, 0
# Implementation of the repeat statement.
class RawRepeat(RawStatement):
def __init__(self, loc, repeats):
super(RawRepeat, self).__init__(loc)
self.repeats = repeats
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
repeats = self.repeats
if repeats is not None:
repeats = ctx.eval(repeats)
return Repeat(self.loc, repeats)
def mark_constant(self):
self.constant = is_constant_expr(self.repeats)
class Repeat(Statement):
def __init__(self, loc, repeats):
super(Repeat, self).__init__(loc)
self.repeats = repeats
def execute(self, trans, st, state, events):
return "repeat", (self.repeats, st), 0
# Parallel statement.
class RawParallel(RawStatement):
def __init__(self, loc, block):
super(RawParallel, self).__init__(loc)
self.blocks = [ block ]
def compile(self, ctx): # @ReservedAssignment
return Parallel(self.loc, [i.compile(ctx) for i in self.blocks])
def predict(self, ctx):
for i in self.blocks:
i.predict(ctx)
def mark_constant(self):
constant = GLOBAL_CONST
for i in self.blocks:
i.mark_constant()
constant = min(constant, i.constant)
self.constant = constant
class Parallel(Statement):
def __init__(self, loc, blocks):
super(Parallel, self).__init__(loc)
self.blocks = blocks
def execute(self, trans, st, state, events):
executing(self.loc)
if state is None:
state = [ (i, None) for i in self.blocks ]
# The amount of time left after finishing this block.
left = [ ]
# The duration of the pause.
pauses = [ ]
# The new state structure.
newstate = [ ]
for i, istate in state:
action, arg, pause = i.execute(trans, st, istate, events)
if pause is not None:
pauses.append(pause)
if action == "continue":
newstate.append((i, arg))
elif action == "next":
left.append(arg)
elif action == "event":
return action, arg, pause
if newstate:
return "continue", newstate, min(pauses)
else:
return "next", min(left), None
def visit(self):
return [ j for i in self.blocks for j in i.visit() ]
# The choice statement.
class RawChoice(RawStatement):
def __init__(self, loc, chance, block):
super(RawChoice, self).__init__(loc)
self.choices = [ (chance, block) ]
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
return Choice(self.loc, [ (ctx.eval(chance), block.compile(ctx)) for chance, block in self.choices])
def predict(self, ctx):
for _i, j in self.choices:
j.predict(ctx)
def mark_constant(self):
constant = GLOBAL_CONST
for _chance, block in self.choices:
block.mark_constant()
constant = min(constant, block.constant)
self.constant = constant
class Choice(Statement):
def __init__(self, loc, choices):
super(Choice, self).__init__(loc)
self.choices = choices
def execute(self, trans, st, state, events):
executing(self.loc)
if state is None:
total = 0
for chance, choice in self.choices:
total += chance
n = random.uniform(0, total)
for chance, choice in self.choices:
if n < chance:
break
n -= chance
cstate = None
else:
choice, cstate = state
action, arg, pause = choice.execute(trans, st, cstate, events)
if action == "continue":
return "continue", (choice, arg), pause
else:
return action, arg, None
def visit(self):
return [ j for i in self.choices for j in i[1].visit() ]
# The Time statement.
class RawTime(RawStatement):
def __init__(self, loc, time):
super(RawTime, self).__init__(loc)
self.time = time
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
return Time(self.loc, ctx.eval(self.time))
def mark_constant(self):
self.constant = is_constant_expr(self.time)
class Time(Statement):
def __init__(self, loc, time):
super(Time, self).__init__(loc)
self.time = time
def execute(self, trans, st, state, events):
return "continue", None, None
# The On statement.
class RawOn(RawStatement):
def __init__(self, loc, names, block):
super(RawOn, self).__init__(loc)
self.handlers = { }
for i in names:
self.handlers[i] = block
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
handlers = { }
for k, v in self.handlers.iteritems():
handlers[k] = v.compile(ctx)
return On(self.loc, handlers)
def predict(self, ctx):
for i in self.handlers.itervalues():
i.predict(ctx)
def mark_constant(self):
constant = GLOBAL_CONST
for block in self.handlers.itervalues():
block.mark_constant()
constant = min(constant, block.constant)
self.constant = constant
class On(Statement):
def __init__(self, loc, handlers):
super(On, self).__init__(loc)
self.handlers = handlers
def execute(self, trans, st, state, events):
executing(self.loc)
# If it's our first time through, start in the start state.
if state is None:
name, start, cstate = ("start", st, None)
else:
name, start, cstate = state
# If we have an external event, and we have a handler for it,
# handle it.
for event in events:
if event in self.handlers:
# Do not allow people to abort the hide or replaced event.
lock_event = (name == "hide" and trans.hide_request) or (name == "replaced" and trans.replaced_request)
if not lock_event:
name = event
start = st
cstate = None
while True:
# If we don't have a handler, return until we change event.
if name not in self.handlers:
return "continue", (name, start, cstate), None
action, arg, pause = self.handlers[name].execute(trans, st - start, cstate, events)
# If we get a continue, save our state.
if action == "continue":
# If it comes from a hide block, indicate that.
if name == "hide" or name == "replaced":
trans.hide_response = False
trans.replaced_response = False
return "continue", (name, start, arg), pause
# If we get a next, then try going to the default
# event, unless we're already in default, in which case we
# go to None.
elif action == "next":
if name == "default" or name == "hide" or name == "replaced":
name = None
else:
name = "default"
start = st - arg
cstate = None
continue
# If we get an event, then either handle it if we can, or
# pass it up the stack if we can't.
elif action == "event":
name, arg = arg
if name in self.handlers:
start = max(st - arg, st - 30)
cstate = None
continue
return "event", (name, arg), None
def visit(self):
return [ j for i in self.handlers.itervalues() for j in i.visit() ]
# Event statement.
class RawEvent(RawStatement):
def __init__(self, loc, name):
super(RawEvent, self).__init__(loc)
self.name = name
def compile(self, ctx): # @ReservedAssignment
return Event(self.loc, self.name)
def mark_constant(self):
self.constant = GLOBAL_CONST
class Event(Statement):
def __init__(self, loc, name):
super(Event, self).__init__(loc)
self.name = name
def execute(self, trans, st, state, events):
return "event", (self.name, st), None
class RawFunction(RawStatement):
def __init__(self, loc, expr):
super(RawFunction, self).__init__(loc)
self.expr = expr
def compile(self, ctx): # @ReservedAssignment
compiling(self.loc)
return Function(self.loc, ctx.eval(self.expr))
def mark_constant(self):
self.constant = is_constant_expr(self.expr)
class Function(Statement):
def __init__(self, loc, function):
super(Function, self).__init__(loc)
self.function = function
def execute(self, trans, st, state, events):
fr = self.function(trans, st, trans.at)
if fr is not None:
return "continue", None, fr
else:
return "next", 0, None
# This parses an ATL block.
def parse_atl(l):
l.advance()
block_loc = l.get_location()
statements = [ ]
animation = False
while not l.eob:
loc = l.get_location()
if l.keyword('repeat'):
repeats = l.simple_expression()
statements.append(RawRepeat(loc, repeats))
elif l.keyword('block'):
l.require(':')
l.expect_eol()
l.expect_block('block')
block = parse_atl(l.subblock_lexer())
statements.append(block)
elif l.keyword('contains'):
expr = l.simple_expression()
if expr:
l.expect_noblock('contains expression')
statements.append(RawContainsExpr(loc, expr))
else:
l.require(':')
l.expect_eol()
l.expect_block('contains')
block = parse_atl(l.subblock_lexer())
statements.append(RawChild(loc, block))
elif l.keyword('parallel'):
l.require(':')
l.expect_eol()
l.expect_block('parallel')
block = parse_atl(l.subblock_lexer())
statements.append(RawParallel(loc, block))
elif l.keyword('choice'):
chance = l.simple_expression()
if not chance:
chance = "1.0"
l.require(':')
l.expect_eol()
l.expect_block('choice')
block = parse_atl(l.subblock_lexer())
statements.append(RawChoice(loc, chance, block))
elif l.keyword('on'):
names = [ l.require(l.word) ]
while l.match(','):
name = l.word()
if name is None:
break
names.append(name)
l.require(':')
l.expect_eol()
l.expect_block('on')
block = parse_atl(l.subblock_lexer())
statements.append(RawOn(loc, names, block))
elif l.keyword('time'):
time = l.require(l.simple_expression)
l.expect_noblock('time')
statements.append(RawTime(loc, time))
elif l.keyword('function'):
expr = l.require(l.simple_expression)
l.expect_noblock('function')
statements.append(RawFunction(loc, expr))
elif l.keyword('event'):
name = l.require(l.word)
l.expect_noblock('event')
statements.append(RawEvent(loc, name))
elif l.keyword('pass'):
l.expect_noblock('pass')
statements.append(None)
elif l.keyword('animation'):
l.expect_noblock('animation')
animation = True
else:
# If we can't assign it it a statement more specifically,
# we try to parse it into a RawMultipurpose. That will
# then be turned into another statement, as appropriate.
# The RawMultipurpose we add things to.
rm = renpy.atl.RawMultipurpose(loc)
# Is the last clause an expression?
last_expression = False
# Is this clause an expression?
this_expression = False
# First, look for a warper.
cp = l.checkpoint()
warper = l.name()
if warper in warpers:
duration = l.require(l.simple_expression)
warp_function = None
elif warper == "warp":
warper = None
warp_function = l.require(l.simple_expression)
duration = l.require(l.simple_expression)
else:
l.revert(cp)
warper = None
warp_function = None
duration = "0"
rm.add_warper(warper, duration, warp_function)
# Now, look for properties and simple_expressions.
while True:
# Update expression status.
last_expression = this_expression
this_expression = False
if l.keyword('pass'):
continue
# Parse revolution keywords.
if l.keyword('clockwise'):
rm.add_revolution('clockwise')
continue
if l.keyword('counterclockwise'):
rm.add_revolution('counterclockwise')
continue
if l.keyword('circles'):
expr = l.require(l.simple_expression)
rm.add_circles(expr)
# Try to parse a property.
cp = l.checkpoint()
prop = l.name()
if prop in PROPERTIES:
expr = l.require(l.simple_expression)
# We either have a property or a spline. It's the
# presence of knots that determine which one it is.
knots = [ ]
while l.keyword('knot'):
knots.append(l.require(l.simple_expression))
if knots:
knots.append(expr)
rm.add_spline(prop, knots)
else:
rm.add_property(prop, expr)
continue
# Otherwise, try to parse it as a simple expressoon,
# with an optional with clause.
l.revert(cp)
expr = l.simple_expression()
if not expr:
break
if last_expression:
l.error('ATL statement contains two expressions in a row; is one of them a misspelled property? If not, separate them with pass.')
this_expression = True
if l.keyword("with"):
with_expr = l.require(l.simple_expression)
else:
with_expr = None
rm.add_expression(expr, with_expr)
l.expect_noblock('ATL')
statements.append(rm)
if l.eol():
l.advance()
continue
l.require(",", "comma or end of line")
# Merge together statements that need to be merged together.
merged = [ ]
old = None
for new in statements:
if isinstance(old, RawParallel) and isinstance(new, RawParallel):
old.blocks.extend(new.blocks)
continue
elif isinstance(old, RawChoice) and isinstance(new, RawChoice):
old.choices.extend(new.choices)
continue
elif isinstance(old, RawChild) and isinstance(new, RawChild):
old.children.extend(new.children)
continue
elif isinstance(old, RawOn) and isinstance(new, RawOn):
old.handlers.update(new.handlers)
continue
# None is a pause statement, which gets skipped, but also
# prevents things from combining.
elif new is None:
old = new
continue
merged.append(new)
old = new
return RawBlock(block_loc, merged, animation)
|
kfcpaladin/sze-the-game
|
renpy/atl.py
|
Python
|
mit
| 51,943
|
[
"VisIt"
] |
226860d327027d13c583e799b5ae51d58304a2138384466066b6a52d95512537
|
from parametrized_set import ParametrizedSet
from sympy import integrate, Expr, Number, NumberSymbol, S
#FIXME 0-measure
#FIXME Dirac measure
#FIXME allow numeric for special set.
# computing quad points for triangles
class Measure(object):
'''Integral over domain describing points in Cartesian coordinate system.'''
def __init__(self, domain):
self.domain = domain
def __call__(self, integrand):
'''
Integrate scalar integrand with the measure. This is a working horse for
specialized classes. There is no Jacobian!
'''
if isinstance(integrand, (int, float)):
integrand = S(integrand)
assert isinstance(integrand, (Expr, Number, NumberSymbol))
# Substitute
f = self.domain.substitute(integrand)
# Integrate over parameter domain
ans = f
for var, bounds in self.domain.items():
ans = integrate(ans, (var, bounds[0], bounds[1]))
return ans
def __add__(self, other):
'''Product of two measures is a new ProductMeasure.'''
assert isinstance(other, (Measure, ProductMeasure))
if isinstance(other, ProductMeasure):
return ProductMeasure([self] + other.measures)
else:
return ProductMeasure([self, other])
class ProductMeasure(Measure):
'''Product of measures.'''
def __init__(self, measures):
'''Initialize from the list of measures.'''
assert isinstance(measures, list)
# Check domain compatibility. this does not mean that integration won't
# blow up
gdim = measures[0].domain.gdim
tdim = measures[0].domain.tdim
assert all(measure.domain.tdim == tdim and measure.domain.gdim == gdim
for measure in measures[1:]),\
'Cannot sum measures of different tdim and gdim'
self.measures = measures
# No explicit domain
Measure.__init__(self, None)
def __call__(self, integrand):
'''Call makes no sense with None domain.'''
raise NotImplementedError('No __call__ for product measure')
def __rmul__(self, integrand):
'''Integrate with individual measures.'''
measures = self.measures
ans = integrand*measures[0]
# FIXME if we add += to vectors/tensors this can be simplified
for measure in measures[1:]:
ans = ans + integrand*measure
return ans
def __add__(self, other):
'''Combine measures.'''
assert isinstance(other, (Measure, ProductMeasure))
if isinstance(other, ProductMeasure):
return ProductMeasure(self.measures + other.measures)
else:
return ProductMeasure(self.measures + [other])
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from parametrized_set import Interval
from sympy import Symbol
domain = Interval(-1, 1)
dl = Measure(domain)
x = Symbol('x')
f = 1 + x**2
print dl(f)
print float(integrate(f, (x, -1, 1)))
|
MiroK/vector_calculus
|
vector_calculus/measures/measure.py
|
Python
|
mit
| 3,144
|
[
"DIRAC"
] |
3d76bc94346e3b6ee1b9c4008fb0ce5ebaddf52c8d76595047e3fb625a2864c9
|
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import re
try:
import json
except ImportError:
import simplejson as json
__author__ = 'Ehsan Foroughi'
__email__ = 'ehsan.foroughi@teltub.com',
__copyright__ = 'Copyright 2010, TELTUB Inc'
__credits__ = ['Ehsan Foroughi']
__license__ = 'GPLv3'
__version__ = '1.0'
__all__ = ['GoogleVoice', 'GoVoError', 'LoginError', 'ServerError']
DEFAULT_CAPTCHA_RETRY = 5
GET_JSON_RE = re.compile(r'<json><!\[CDATA(.+)\]></json>\n', re.MULTILINE)
RNR_SE_RE = re.compile(r"'_rnr_se': '(.+)'")
CONTENT_TYPE = 'application/x-www-form-urlencoded;charset=utf-8'
USER_AGENT = 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.13) Gecko/20100914 Firefox/3.5.13 (.NET CLR 3.5.30729)'
REQ_HEADER = {'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8', 'User-Agent': USER_AGENT}
SERVICE = 'grandcentral'
ACC_TYPE = 'GOOGLE'
APP_SOURCE = 'korylprince-sms'
AUTH_ARGS = {'accountType':ACC_TYPE, 'source':APP_SOURCE, 'service':SERVICE}
AUTH_URL = 'https://www.google.com/accounts/ClientLogin'
CAPTCHA_URL_PREFIX = 'http://www.google.com/accounts/'
BASE_URL = 'https://www.google.com/voice'
INDEX_URL = '/'
SETTINGS_URL = '/settings/tab/phones'
CALL_URL = '/call/connect/'
CANCEL_URL = '/call/cancel/'
SMS_URL = '/sms/send/'
LOGIN_ERR_MSG = 'You have not yet setup your Google Voice account. Please <a href="http://google.com/voice" target="_blank" style="text-decoration:underline">configure your Google Voice</a> and try again.'
class GoVoError(Exception):
pass
class LoginError(GoVoError):
"""
The Exception class to handle Login/Authentication errors
See <reason> for more details.
Legend for <reason>:
'failed': Credentials are invalid.
'captcha': Account is locked awaiting a captcha unlock.
'error': An unknown/unexpected error has occured.
Note that 'failed' and 'captcha' are expected in normal operation flow.
"""
def __init__(self, reason, msg):
Exception.__init__(self, reason, msg)
self.reason = reason
self.msg = msg
class ServerError(GoVoError):
"""
The Exception class to handle ServerErrors.
Note that these errors are not expected in normal operation flow
"""
def __init__(self, code, msg):
Exception.__init__(self, code, msg)
self.code = code
self.msg = msg
class GoogleVoice:
def __init__(self, user, passwd, auth_token=None, rnr_se=None):
"""
Initialize either using just <user> and <passwd>,
or pass all parameters from a saved state (see get_state).
Parameters:
<user>: can be username@gmail.com or username
<passwd>: password
<auth_token>: [optional] is the token for Google's ClientLogin interface.
<rnr_se>: [optional] is the internal variable used by Google Voice.
"""
self.user = user
self.passwd = passwd
self.auth_token = auth_token
self.rnr_se = rnr_se
self.reset_captcha()
self.handle_captcha_entry = None
self.handle_save_token = None
self.account_settings = None
def _get_url_data(self, url, data, header=None):
req_header = REQ_HEADER
if header:
req_header = req_header.copy()
req_header.update(header)
data = urllib.parse.urlencode(data)
if data == '':
data = None
else:
data = data.encode('utf8')
request = urllib.request.Request(url, data, req_header)
err_code = None
try:
resp_obj = urllib.request.urlopen(request)
except urllib.error.HTTPError as e:
err_code = e.code
return err_code, e.read()
resp = resp_obj.read()
resp_obj.close()
return None, resp
def _get_rnr_se(self):
ret = self.get_auth_url(INDEX_URL, mode='raw', with_retry=False)
rnr_se = RNR_SE_RE.search(ret.decode('utf8'))
if rnr_se:
rnr_se = rnr_se.groups()[0]
else:
if 'not available in your country' in ret:
raise LoginError('countryerror', LOGIN_ERR_MSG)
else:
raise LoginError('error', 'Unable to get rnr_se token')
self.rnr_se = rnr_se
def _process_resp(self, resp):
ret_data = {}
for line in resp.split('\n'):
if '=' in line:
var, val = line.split('=', 1)
ret_data[var] = val
return ret_data
def _get_auth_token(self, captcha_retry=DEFAULT_CAPTCHA_RETRY):
data = AUTH_ARGS.copy()
data.update({'Email':self.user, 'Passwd':self.passwd})
if self.captcha_entry and self.captcha_token:
data.update({'logintoken':self.captcha_token, 'logincaptcha':self.captcha_entry})
err_code, resp = self._get_url_data(AUTH_URL, data)
if (err_code is not None) and (err_code != 403):
raise LoginError('unknown', "HTTP Error %d" % (err_code))
ret_data = self._process_resp(resp.decode('utf8'))
if 'Auth' in ret_data:
self.reset_captcha()
self.auth_token = ret_data['Auth']
self._get_rnr_se()
if self.handle_save_token:
self.handle_save_token(self)
return
if 'Error' not in ret_data:
raise LoginError('error', 'unknown')
elif ret_data['Error'] == 'BadAuthentication':
raise LoginError('failed', 'Invalid Credentials')
elif ret_data['Error'] == 'CaptchaRequired':
self.captcha_token = ret_data['CaptchaToken']
self.captcha_url = CAPTCHA_URL_PREFIX + ret_data['CaptchaUrl']
self.captcha_entry = None
if self.handle_captcha_entry and captcha_retry:
self.handle_captcha_entry(self)
self._get_auth_token(captcha_retry-1)
else:
raise LoginError('captcha', (self.captcha_token, self.captcha_url))
else:
raise LoginError('error', ret_data['Error'])
def _get_account_settings(self):
if self.account_settings:
return
ret = self.get_auth_url(SETTINGS_URL)
if ret is None:
return
self.account_settings = ret[0]
def reset_captcha(self):
"""
Resets the state of a captcha locked account so that a new attempt can be tried.
Use it if you have asked the user to visit the following URL instead of following
the complete process:
https://www.google.com/accounts/UnlockCaptcha
"""
self.captcha_token = None
self.captcha_url = None
self.captcha_entry = None
def unlock_captcha(self, captcha_entry, captcha_token=None):
"""
Unlocks a captcha locked account using the user/human entered captcha value.
Call this function after you have got a LoginError exception with e.reason == 'captcha'.
"""
if captcha_token:
self.captcha_token = captcha_token
if not captcha_token:
raise LoginError('error', 'Captcha not found')
self.captcha_entry = captcha_entry
def get_auth_url(self, url, data={}, mode='json', with_retry=True):
"""
Get a custom URL using the saved ClientLogin token.
Parameters:
<url>: The function to be called (Note: Do not include base URL)
<data>: A dictionary of parameters for the call.
<mode>: 'raw'|'json'
'raw' -> Returns the raw result
'json' -> Attempts to extract JSON return from the page. Otherwise returns None.
<will_retry>: True|False
Pass True to allow for a auto-retry in case of missing or expired token.
"""
if not self.auth_token:
self._get_auth_token()
if url is None:
return None
err_code, resp = self._get_url_data(BASE_URL + url, data, {'Authorization': 'GoogleLogin auth=' + self.auth_token})
if with_retry:
retry = False
if (err_code == 401):
self._get_auth_token()
retry = True
elif (err_code == 500):
self._get_rnr_se()
if self.handle_save_token:
self.handle_save_token(self)
retry = True
if retry:
err_code, resp = self._get_url_data(BASE_URL + url, data, {'Authorization': 'GoogleLogin auth=' + self.auth_token})
if err_code is not None:
raise ServerError(err_code, resp)
if mode != 'json':
return resp
res = GET_JSON_RE.search(resp)
if not res:
if 'not available in your country' in resp:
raise LoginError('countryerror', LOGIN_ERR_MSG)
return None
if len(res.groups()) == 0:
return None
return json.loads(res.groups()[0])
def get_state(self, mode='full'):
"""
Returns the state to be saved for later usage of the class.
Parameters:
<mode>: 'full'|'tokens_only' (default='full')
'full' -> Returns all arguments for constructor
'tokens_only' -> Returns only the varaible part, i.e. tokens
"""
if mode=='full':
return self.user, self.passwd, self.auth_token, self.rnr_se
elif mode=='tokens_only':
return self.auth_token, self.rnr_se
def validate_credentials(self):
"""
This just forces the library to get an authentication done so that the
credentials can be verified and state can be saved.
Returns: None or raises proper Exception if credentials are invalid
"""
self.get_auth_url(None)
def get_numbers(self):
"""
Fetches the registered phones for the Google Voice account and their properties.
Returns:
{num:{attribute:value, ...}, ...}
Note: the <num> returns as the key for the dictionary is in standard format and
will not contain '+' sign before it.
Hint: ret[num]['verified'] << True or False will show if the number is validated.
Hint: to force a refresh of account settings, you can do:
self.account_settings = None
"""
self._get_account_settings()
if not self.account_settings:
raise LoginError('notinitiated', LOGIN_ERR_MSG)
ret_dict = {}
if 'phones' not in self.account_settings:
self.account_settings['phones'] = {}
for item in list(self.account_settings['phones'].values()):
num = item['phoneNumber']
if num.startswith('+'):
num = num[1:]
ret_dict[num] = item
return ret_dict
def get_settings(self):
"""
Fetches the settings of the account and returns them in a dictionary format.
"""
self._get_account_settings()
if not self.account_settings:
return None
return self.account_settings['settings']
def call(self, outgoing_number, forwarding_number, phone_type=1, subscriber_number='undefined'):
"""
Places a call.
Parameters:
<outgoing_number>: number to be called
<fowarding_number>: registered phone number to be called from
<phone_type>: [optional] type of the destination number
Returns: True|False
"""
ret = self.get_auth_url(CALL_URL, {'outgoingNumber': outgoing_number, 'forwardingNumber': forwarding_number,
'subscriberNumber': subscriber_number, 'phoneType': phone_type, 'remember': '1', '_rnr_se':self.rnr_se}, mode='raw')
if not ret:
return False
try:
ret = json.loads(ret)
except ValueError:
return False
return ('ok' in ret) and (ret['ok'])
def cancel(self):
"""
Cancels the current ongoing call (if one exists).
Returns: True|False
"""
ret = self.get_auth_url(CANCEL_URL, {'outgoingNumber': '', 'forwardingNumber': '', 'cancelType':'C2C',
'_rnr_se':self.rnr_se}, mode='raw')
if not ret:
return False
try:
ret = json.loads(ret)
except ValueError:
return False
return ('ok' in ret) and (ret['ok'])
def sms(self, outgoing_number, msg):
"""
Places a call.
Parameters:
<outgoing_number>: number to send message to
<msg>: text of message to send
Returns: True|False
"""
ret = self.get_auth_url(SMS_URL, {'phoneNumber': outgoing_number, 'text': msg,
'_rnr_se':self.rnr_se}, mode='raw')
if not ret:
return False
try:
ret = json.loads(ret.decode('utf8'))
except ValueError:
return False
return ('ok' in ret) and (ret['ok'])
|
mtmosier/gateAlarm
|
gateAlarmLib/pygvoicelib.py
|
Python
|
gpl-3.0
| 13,082
|
[
"VisIt"
] |
865515bf2b35862be497572d1a1d7a2769d36d6f614fa1fc8a7ec406c02aaefd
|
#red blue green rbg, dashes squares triangles -- s ^
from ase.io import read
from ase.io.trajectory import PickleTrajectory
from ase.calculators.neighborlist import NeighborList
import matplotlib.pyplot as plt
from ase.units import fs, kB
from matplotlib.backends.backend_pdf import PdfPages
def plot_energies(path,md_type,title,t0_histo,bin_histo,write=False):
traj = PickleTrajectory(path+md_type+'.traj')
ref_e = traj[0].get_total_energy()
ref_epot = traj[0].get_potential_energy()
e = [ atoms.get_total_energy() - ref_e for atoms in traj]
ekin = [ atoms.get_kinetic_energy() for atoms in traj]
epot = [ atoms.get_potential_energy() - ref_epot for atoms in traj]
temp = [ atoms.get_temperature() for atoms in traj]
plt.figure(1)
plt.title(title)
plt.ylabel('Energy [eV]')
plt.xlabel('Time [fs]')
plt.plot( e, 'g-' , label=r'Etot[t]-Etot[0]')
plt.legend(loc='upper right')
if write:
plt.savefig(md_type+'_etot.eps', format='eps')
plt.savefig(pp, format='pdf')
plt.figure(2)
plt.subplot(211)
plt.title(title)
plt.ylabel('Energy [eV]')
plt.xlabel('Time [fs]')
plt.plot( epot, 'g-' , label=r'Epot[t]-Epot[0]')
plt.legend(loc='upper right')
plt.subplot(212)
plt.ylabel('Energy [eV]')
plt.xlabel('Time [fs]')
plt.plot( ekin, 'b-' ,label=r'Ekin')
plt.legend(loc='upper right')
if write:
plt.savefig(md_type+'_e.eps', format='eps')
plt.savefig(pp, format='pdf')
plt.figure(3)
plt.title(title)
plt.subplot(211)
plt.xlabel('Temperature [K]')
plt.annotate("using t0 = {0} fs".format(t0_histo), xy=(0.75, 0.75),
xycoords="axes fraction")
plt.hist(temp[t0_histo:], bin_histo)
plt.subplot(212)
plt.ylabel('Temperature [K]')
plt.xlabel('Time [fs]')
plt.plot(temp, 'b-' )
if write:
plt.savefig(md_type+'_T.eps', format='eps')
plt.savefig(pp, format='pdf')
return
def plot_oh(path,md_type,title,t0_histo,bin_histo,write=False):
traj = PickleTrajectory(path+md_type+'.traj')
oh = []
oh_distances = []
o_index = [atom.index for atom in traj[0] if atom.symbol == 'O']
h_index = [atom.index for atom in traj[0] if atom.symbol == 'H']
bonds_init = [ (o,h,traj[0].get_distance(o,h,mic=True))
for o in o_index for h in h_index]
bonds_pva = [ (o,h) for o,h,d in bonds_init if d < 1.1 ]
no = len(o_index)
assert len(bonds_pva) == no
for atoms in traj:
bonds = [ (o,h,atoms.get_distance(o,h,mic=True))
for o1,h in bonds_pva for o in o_index]
bonds_pva_traj = [ d for o,h,d in bonds if d < 1.2 ]
assert len(bonds_pva_traj) == no
bonds_oh = [ d for o,h,d in bonds if d <= 2.0]
bonds_no_pva = [ d for d in bonds_oh if d > 1.1]
oh += [1.0*len(bonds_no_pva)/no]
oh_distances += bonds_no_pva
plt.figure(4)
plt.subplot(211)
plt.title(title)
plt.ylabel('Number of bonds/O atom')
plt.xlabel('Time [fs]')
plt.plot(oh, 'r-')
plt.subplot(212)
plt.xlabel('Number of bonds/O atom')
plt.annotate("using t0 = {0} fs".format(t0_histo), xy=(0.75, 0.75),
xycoords="axes fraction")
plt.hist(oh[t0_histo:], 10)
if write:
plt.savefig(md_type+'_oh_number.eps', format='eps')
plt.savefig(pp, format='pdf')
plt.figure(5)
plt.title(title)
plt.xlabel('O-H distances')
plt.hist(oh_distances[t0_histo:],bin_histo)
if write:
plt.savefig(md_type+'_oh_distance.eps', format='eps')
plt.savefig(pp, format='pdf')
return
verlet= True
berendsen = False
write = True
if verlet:
temp = '500K'
path_traj = './NVE/'+temp+'/'
title = 'NVE Verlet 1nm PVA'
md_type = 'verlet'
t0_histo = 2000
multipage = md_type+'_'+temp+'.pdf'
if berendsen:
path_traj = './NVT/berendsen/'
title = 'NVT berendsen 1nm PVA'
md_type = 'berendsen'
t0_histo = 2000
multipage = md_type+'.pdf'
bin_histo = 20
pp = PdfPages(multipage)
plot_energies(path_traj,md_type,title,t0_histo, bin_histo,
write=write)
bin_histo = 10
plot_oh(path_traj,md_type,title,t0_histo, bin_histo,
write=write)
plt.show()
pp.close()
|
csmm/multiase
|
plot/plot_e_bond.py
|
Python
|
gpl-2.0
| 4,293
|
[
"ASE"
] |
4cdb2809a0d69982dc621f4f716314d1a9b4c1722925880a371a5a142ace9989
|
#!/usr/bin/env python
import argparse
import csv
import alnclst
import code
import copy
from os import path
from Bio import AlignIO, SeqIO
from Bio.SeqRecord import SeqRecord
from hyperfreq.cluster import load_cluster_map, parse_clusters
from hyperfreq.core import Alignment, AlignmentSet, analysis_defaults
from hyperfreq import mut_pattern, core, __version__
from hyperfreq.analysis_writer import write_analysis
def split(args):
hm_col_reader = csv.DictReader(args.columns)
hm_columns = map(lambda x: int(x['column']), hm_col_reader)
hm_columns = list(set(hm_columns))
seq_records = SeqIO.parse(args.alignment, 'fasta')
aln = Alignment(seq_records)
aln.split_hypermuts(hm_columns = hm_columns)
fn_base = path.join(args.out_dir, args.prefix)
hm_pos_handle = open(fn_base + '.pos.fasta', 'w')
hm_neg_handle = open(fn_base + '.neg.fasta', 'w')
AlignIO.write(aln.hm_pos_aln, hm_pos_handle, 'fasta')
AlignIO.write(aln.hm_neg_aln, hm_neg_handle, 'fasta')
for handle in [args.alignment, args.columns, hm_pos_handle, hm_neg_handle]:
handle.close()
def write_reference_seqs(alignments, fn_base):
handle = open(fn_base + '.ref_seqs.fasta', 'w')
def refseq(cluster):
seq = alignments.cluster_alns[cluster].reference_sequence
return SeqRecord(seq, id=cluster, name=cluster, description="")
refseqs = (refseq(cluster) for cluster in alignments.clusters)
SeqIO.write(refseqs, handle, 'fasta')
handle.close()
def analyze(args):
import logging; logging.captureWarnings(True)
# Fetch sequence records and analysis patterns
seq_records = SeqIO.to_dict(SeqIO.parse(args.alignment, 'fasta'))
patterns = [mut_pattern.patterns[p] for p in args.patterns]
pattern_names = [p.name for p in patterns]
prefix = path.join(args.out_dir, args.prefix)
analysis_settings = dict(
rpr_cutoff=args.rpr_cutoff, significance_level=args.significance_level, quants=args.quants,
pos_quants_only=args.pos_quants_only, caller=args.caller, prior=args.prior, cdfs=args.cdfs,
quadr_maxiter=args.quadr_maxiter, optim_maxiter=args.optim_maxiter)
# Need to think about how best to fork things here; for instance, might make sense to let the user specify
# the initial clusters for whatever reason... However, specifying the reference sequences shouldn't make
# any sense there
if args.reference_sequences:
reference_sequences = SeqIO.to_dict(SeqIO.parse(args.reference_sequences, 'fasta'))
else:
reference_sequences = None
# This lets the cluster map be optional, so that this script can be used
# for naive hm filtering/analysis
cluster_map = load_cluster_map(args.cluster_map, cluster_col=args.cluster_col) if args.cluster_map else None
alignments = AlignmentSet(seq_records, cluster_map, consensus_threshold=args.consensus_threshold,
reference_sequences=reference_sequences)
# Create the analysis generator
analysis = alignments.multiple_context_analysis(patterns, **analysis_settings)
if args.cluster_threshold:
for hm_it in range(args.cluster_iterations - 1):
print " ..On hm/cluster iteration", hm_it
# Grab the HM columns from the most recent analysis and split out the pos sites
hm_columns = []
for result in analysis:
hm_columns += result['call']['mut_columns']
hm_neg_aln = Alignment(seq_records.values()).split_hypermuts(hm_columns).hm_neg_aln
# Cluster with the specified settings
clustering = alnclst.Clustering(hm_neg_aln, args.cluster_threshold,
args.consensus_threshold)
clustering = clustering.recenter(args.recentering_iterations)
clustering.merge_small_clusters(args.min_per_cluster)
cluster_map = parse_clusters(clustering.mapping_iterator(), cluster_key=0, sequence_key=1)
# Create the Alignment set
clustered_alignment = AlignmentSet(seq_records, cluster_map,
consensus_threshold=args.consensus_threshold)
analysis = clustered_alignment.multiple_context_analysis(patterns, **analysis_settings)
# write out the final clusters
clusterout_handle = file(prefix + '.clst.csv', 'w')
clustering.write(clusterout_handle)
if args.interactive:
local = copy.copy(locals())
import hyperfreq
local.update(dict(hyperfreq=hyperfreq,
Alignment=Alignment,
AlignmentSet=AlignmentSet,
mut_pattern=mut_pattern,
write_analysis=write_analysis))
code.interact(local=local)
# Write the final analysis to file
write_analysis(analysis, prefix, pattern_names, args.quants, args.cdfs, call_only=args.call_only)
if args.write_references:
write_reference_seqs(alignments, prefix)
# Closing files
args.alignment.close()
if args.cluster_map:
args.cluster_map.close()
def setup_common_args(subparser):
subparser.add_argument('alignment', type=argparse.FileType('r'),
help="""Sequence alignment on which to operate. (This argument must come before any arguments
which take multiple inputs, such as --patterns and --cdfs)""")
subparser.add_argument('-o', '--out-dir', default='.',
help="Where to put files")
subparser.add_argument('-P', '--prefix',
help="Prefix for output files (extensions chosen automatically)")
subparser.add_argument('-v', '--verbose', action='store_true', default=False)
class QuantAction(argparse.Action):
"""This class is for doing some slick command line magick with specification of what quantiles to compute
and how"""
# This solution to the problem leaves q around in namespace, and admittedly is a little weird.
default_quants = [0.05]
def __call__(self, parser, namespace, values, option_string=None):
if values:
namespace.quants = values
else:
namespace.quants = parser.get_default('q')
parser.set_defaults(q=None)
namespace.pos_quants_only = True if option_string == '-q' else False
@classmethod
def register(cls, parser):
""" Adds the -q/-Q argument to the parser, and sets stuff up so that the results can be accessed from
the quants namespace, as well as pos_quants_only """
parser.add_argument('-q', '-Q', nargs='*', type=float, action=cls,
help="""Compute quantiles, separated by spaces. If specified with no args, default quantiles are
%(default)s. If specified using -q, quantiles are only computed for positive sequences to save time
(quantiles take a while). If -Q is used, specified quantiles are computed for all sequences.""")
parser.set_defaults(q=cls.default_quants)
def setup_analyze_args(subparsers):
def cs_arg(arg):
return arg.split(',')
analyze_args = subparsers.add_parser('analyze',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Analyze alignment for evidence of hypermuation')
setup_common_args(analyze_args)
analyze_args.add_argument('-F', '--full-output', default=True, action='store_false', dest='call_only',
help="""Generate a separate file for each of the analysis patterns instead of just the call
pattern (default: call_only=%(default)s)""")
analyze_args.add_argument('-N', '--interactive', default=False, action='store_true',
help="""Instead of writing results to file, load up analysis results into an interactive shell""")
eval_group = analyze_args.add_argument_group('HM EVALUATION SETTINGS')
# Should we get this to be smarter about whether to write out a call file when only one analysis is run?
pattern_choices = mut_pattern.patterns.keys()
eval_group.add_argument('-p', '--patterns', choices=pattern_choices, default=['GG'], nargs='+',
help="""Specify the type of apobec activity to analyze. For example, 'GG' specifies
a focus pattern of GG to AG, characteristic of APOBEC3G activity. Multiple patterns should be
separated by spaces. Characters M, R and V correspond to IUPAC codes.""")
eval_group.add_argument('--rpr-cutoff', type=float,
help="""For hm_pos determination: if a sequence has a RPR higher than this value with confidence
specified by --significance-level, it will be marked as hypermutation positive.""")
eval_group.add_argument('-s', '--significance-level', type=float,
help="""For hm_pos determination: if, with this specified level of confidence, a sequence has a RPR
higher than the specified rpr_cutoff, it will be marked as hypermutation positive.""")
QuantAction.register(eval_group)
eval_group.add_argument('--cdfs', nargs="+", type=float,
help="""Specify cdfs to be computed (separated by spaces). These are computed in addition to the
CDF of the rpr-cutoff, as described above.""")
eval_group.add_argument('--caller',
help="""Statistic to be used for deciding which mutation pattern has the strongest hypermutation
signal. The choice specified should be the name of a column in the output file, such as "map",
"cutoff_cdf" or "q_0.05". Note: the value must exist for each sequence and call pattern analyzed.
As such, you must use the `-Q` flag (for computing all quantiles) if you wish to call based on
quantiles.""")
eval_group.add_argument('--quadr-maxiter', type=int,
help="""Set the maxiter option for calls to scipy.integrate.quadrature in betarat. Increasing may
improve precision if your logfiles show accuracy warnings for quadrature, but will take more time.
Decreasing can reduce computation time for large data sets, at the potential expense of precision.""")
eval_group.add_argument('--optim-maxiter', type=int,
help="""As with --quadr-maxiter, but for calls to scipy.optimize.brenth.""")
prior_group = analyze_args.add_argument_group('PRIORS')
prior_group = prior_group.add_mutually_exclusive_group()
prior_group.add_argument('--prior', type=float, nargs=2,
help="""Prior on Beta distributions. The default (%(default)s) corresponds to a belief that
mutations are relatively rare, but unbiased with respect to context.""")
prior_group.add_argument('--jeff', help='Use Jeffreys prior (0.5, 0.5) on Beta distributions.', action='store_const',
dest='prior', const=(0.5, 0.5))
prior_group.add_argument('--uniform', help='Uniform prior (1.0, 1.0) on Beta distributions.', action='store_const',
dest='prior', const=(1.0, 1.0))
refseq_group = analyze_args.add_argument_group('REFERENCE SEQS',
"""Hyperfreq requires each query sequence have a reference sequence for comparison. This
sequence should be evolutionarily close while not exhibiting a hypermutation pattern.
By default, hyperfreq computes this sequence as global consensus from the query
alignment. You can also manually specify reference sequences and compute them from clusters
of sequences (see the ITERATIVE CLUSTERING SETTINGS group for automatic identification of
clusters).""")
refseq_group.add_argument('-c', '--cluster-map', type=argparse.FileType('r'),
help="""CSV file mapping sequences to clusters; cluster consensus sequences will be used as query
sequences. Any sequences not mapped to a cluster will be implicity put in an 'all' cluster.""")
# Should make this smarter so that it guesses a few things first...
refseq_group.add_argument('--cluster-col', default='cluster',
help="Column in cluster-map to be used for cluster specification")
refseq_group.add_argument('--consensus-threshold', type=float,
help="""For computing consensus sequences. See biopython's AlignInfo.SummmaryInfo.dumb_consensus
method. (default: %(default)s; no threshold, most frequent base taken)""")
# Should remove necessity for "all" and just take the first, if no matches (with warning?)
refseq_group.add_argument('-r', '--reference-sequences', type=argparse.FileType('r'),
help="""Manually specify reference sequences. Sequence name(s) should correspond to cluster names
if using a cluster map. Otherwise, ensure a sequence named 'all' is included in the alignment.
Clusters for which no reference sequence is specified will be compared to a computed consensus
sequence.""")
refseq_group.add_argument('-R', '--write-references', default=False, action='store_true',
help="""Writes reference sequences used for HM evalutation. If sequences are clustered, the
reference sequence for each cluster will be given the name of the cluster. Consequently, the
output file can be used subsequently as input to --reference-sequences""")
# make some mutually exclusive with refseq spec methods; other future options
# -M --min-per-cluster-percent; -C --write-intermediate-clusters; -g --global-cons-first
autoclst_group = analyze_args.add_argument_group('ITERATIVE CLUSTERING SETTINGS',
"""Use hyperfreq's iterative clustering strategy to find reference sequences. This involves
iterations of hypermutation analysis, removal of hypermutated columns from alignment, and
clustering of these 'HM free' alignments, so that clusters don't reflect hypermutation within the
data.""")
autoclst_group.add_argument('-t', '--cluster-threshold', type=float,
help="""[Required for iterative clustering] If specified, triggers the iterative clustering
algorithm with the given clustering similarity threshold.""")
autoclst_group.add_argument('-i', '--cluster-iterations', type=int, default=5,
help="""Number of iterations of hm analysis and clustering.""")
autoclst_group.add_argument('-I', '--recentering-iterations', type=int, default=4,
help="""Not to be confused with --cluster-iterations, this specifies the number of recentering
steps to perform for each clustering step (inspired by http://goo.gl/RIoWBU).""")
autoclst_group.add_argument('-m', '--min-per-cluster', type=int, default=5,
help="""This value specifies the minimum number of sequences in a cluster. Clusters smaller than
this value will be merged with the closest cluster until not small clusters are left. This avoids
comparing a query sequence to a consensus sequence for a small enough cluster that the consensus
reflects hypermutation patterns.""")
# Apply analysis defaults
for arg, default in analysis_defaults.iteritems():
try:
analyze_args.set_defaults(**dict([(arg, default)]))
except KeyError:
print "{} is not an args option".format(arg)
analyze_args.set_defaults(prefix='hm_analysis')
analyze_args.set_defaults(func=analyze)
def setup_split_args(subparsers):
split_args = subparsers.add_parser('split', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
setup_common_args(split_args)
split_args.add_argument('columns', type=argparse.FileType('r'),
help="""File identifying hypermuted columns. Typically, you would want to use the `*.sites.csv`
output file from running `hyperfreq analyze`, but you can contruct your own file if you like.""")
split_args.add_argument('--column-name', default='column',
help="""Column name in the columns file which identifies the hypermutated sites.""")
split_args.set_defaults(prefix='hm_split')
split_args.set_defaults(func=split)
def main():
parser = argparse.ArgumentParser(prog='hyperfreq',
description="""Hypermutation analysis software using BetaRat distribution for Bayesian analysis of
the relative probability ratio (RPR) of observing mutations in two contexts.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(__version__))
subparsers = parser.add_subparsers(title='subcommands', help='additional help')
setup_analyze_args(subparsers)
setup_split_args(subparsers)
args = parser.parse_args()
if args.verbose:
core.VERBOSE = True
args.func(args)
if __name__ == '__main__':
main()
|
fhcrc/hyperfreq
|
hyperfreq/scripts/cli.py
|
Python
|
gpl-3.0
| 16,714
|
[
"Biopython"
] |
6ce6b75f9fd8a428a0f7368ea21f8c35474902c1f8921d4a94f362762e8c3f5b
|
"""
This is used to test the ElasticSearchDB module. It is used to discover all possible changes of Elasticsearch api.
If you modify the test data, you have to update the test cases...
"""
import unittest
import datetime
import time
from DIRAC import gLogger
from DIRAC.Core.Utilities.ElasticSearchDB import ElasticSearchDB
from DIRAC.Core.Utilities.ElasticSearchDB import generateFullIndexName
elHost = 'localhost'
elPort = 9200
class ElasticTestCase( unittest.TestCase ):
def setUp( self ):
gLogger.setLevel( 'DEBUG' )
self.el = ElasticSearchDB( host = elHost,
port = elPort,
useSSL = False )
self.data = [{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "b", "timestamp": "2015-02-09 16:30:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp":"2015-02-09 09:00:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 2, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp":"2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 2, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"},
{"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09 09:15:00.0"},
{"Color": "red", "quantity": 2, "Product": "b", "timestamp": "2015-02-09 16:15:00.0"}]
self.index_name = ''
def tearDown( self ):
pass
class ElasticBulkCreateChain( ElasticTestCase ):
def test_bulkindex( self ):
result = self.el.bulk_index( 'integrationtest', 'test', self.data )
self.assertTrue(result['OK'])
self.assertEqual( result['Value'], 10 )
time.sleep( 10 )
def test_bulkindexMonthly(self):
result = self.el.bulk_index( indexprefix = 'integrationtestmontly',
doc_type = 'test',
data = self.data,
period = 'month' )
self.assertTrue(result['OK'])
self.assertEqual( result['Value'], 10 )
time.sleep( 10 )
class ElasticCreateChain( ElasticTestCase ):
def tearDown( self ):
self.el.deleteIndex( self.index_name )
def test_wrongdataindex( self ):
result = self.el.createIndex( 'dsh63tsdgad', {} )
self.assertTrue(result['OK'])
index_name = result['Value']
result = self.el.index( index_name, 'test', {"Color": "red", "quantity": 1, "Product": "a", "timestamp": 1458226213})
self.assertTrue(result['OK'])
result = self.el.index( index_name, 'test', {"Color": "red", "quantity": 1, "Product": "a", "timestamp": "2015-02-09T16:15:00Z"})
self.assertTrue( result['Message'] )
result = self.el.deleteIndex( index_name )
self.assertTrue(result['OK'])
def test_index( self ):
result = self.el.createIndex( 'integrationtest', {} )
self.assertTrue(result['OK'])
self.index_name = result['Value']
for i in self.data:
result = self.el.index( self.index_name, 'test', i )
self.assertTrue(result['OK'])
class ElasticDeleteChain( ElasticTestCase ):
def test_deleteNonExistingIndex(self):
result = self.el.deleteIndex( 'dsdssuu' )
self.assertTrue( result['Message'] )
def test_deleteIndex( self ):
result = generateFullIndexName( 'integrationtest' )
res = self.el.deleteIndex( result )
self.assertTrue(res['OK'])
self.assertEqual( res['Value'], result )
def test_deleteMonthlyIndex( self ):
result = generateFullIndexName( 'integrationtestmontly', 'month' )
res = self.el.deleteIndex( result )
self.assertTrue(res['OK'])
self.assertEqual( res['Value'], result )
class ElasticTestChain( ElasticTestCase ):
def setUp( self ):
self.el = ElasticSearchDB( host = elHost,
port = elPort,
useSSL = False )
result = generateFullIndexName( 'integrationtest' )
self.assertTrue( len( result ) > len( 'integrationtest' ) )
self.index_name = result
def test_getIndexes( self ):
result = self.el.getIndexes()
self.assertTrue( len( result ) > 0 )
def test_getDocTypes( self ):
result = self.el.getDocTypes( self.index_name )
self.assertTrue( result )
self.assertDictEqual( result['Value'], {u'test': {u'properties': {u'Color': {u'type': u'string'}, u'timestamp': {u'type': u'long'}, u'Product': {u'type': u'string'}, u'quantity': {u'type': u'long'}}}} )
def test_exists( self ):
result = self.el.exists( self.index_name )
self.assertTrue( result )
def test_generateFullIndexName( self ):
indexName = 'test'
today = datetime.datetime.today().strftime( "%Y-%m-%d" )
expected = "%s-%s" % ( indexName, today )
result = generateFullIndexName( indexName )
self.assertEqual( result, expected )
def test_generateFullIndexName2( self ):
indexName = 'test'
month = datetime.datetime.today().strftime( "%Y-%m" )
expected = "%s-%s" % ( indexName, month )
result = generateFullIndexName( indexName, 'month' )
self.assertEqual( result, expected )
def test_getUniqueValue( self ):
result = self.el.getUniqueValue( self.index_name, 'Color', )
self.assertTrue( result )
self.assertEqual( result['Value'], [] )
result = self.el.getUniqueValue( self.index_name, 'Product' )
self.assertTrue( result )
self.assertEqual( result['Value'], [] )
result = self.el.getUniqueValue( self.index_name, 'quantity' )
self.assertTrue( result )
self.assertEqual( result['Value'], [] )
def test_query( self ):
body = { "size": 0,
"query": { "filtered": { "query": { "query_string": { "query": "*" } },
"filter": { "bool": { "must": [{ "range": {
"timestamp": {
"gte": 1423399451544,
"lte": 1423631917911
}
}
}],
"must_not": []
}
}
}
},
"aggs": {
"3": {
"date_histogram": {
"field": "timestamp",
"interval": "3600000ms",
"min_doc_count": 1,
"extended_bounds": {
"min": 1423399451544,
"max": 1423631917911
}
},
"aggs": {
"4": {
"terms": {
"field": "Product",
"size": 0,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "quantity"
}
}
}
}
}
}
}
}
result = self.el.query( self.index_name, body )
self.assertEqual( result['aggregations'], {u'3': {u'buckets': [{u'4': {u'buckets': [{u'1': {u'value': 5.0}, u'key': u'a', u'doc_count': 5}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': 1423468800000, u'doc_count': 5}, {u'4': {u'buckets': [{u'1': {u'value': 8.0}, u'key': u'b', u'doc_count': 5}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': 1423494000000, u'doc_count': 5}]}} )
def test_queryMontly( self ):
body = { "size": 0,
"query": { "filtered": { "query": { "query_string": { "query": "*" } },
"filter": { "bool": { "must": [{ "range": {
"timestamp": {
"gte": 1423399451544,
"lte": 1423631917911
}
}
}],
"must_not": []
}
}
}
},
"aggs": {
"3": {
"date_histogram": {
"field": "timestamp",
"interval": "3600000ms",
"min_doc_count": 1,
"extended_bounds": {
"min": 1423399451544,
"max": 1423631917911
}
},
"aggs": {
"4": {
"terms": {
"field": "Product",
"size": 0,
"order": {
"1": "desc"
}
},
"aggs": {
"1": {
"sum": {
"field": "quantity"
}
}
}
}
}
}
}
}
result = self.el.query( 'integrationtestmontly*', body )
self.assertEqual( result['aggregations'], {u'3': {u'buckets': [{u'4': {u'buckets': [{u'1': {u'value': 5.0}, u'key': u'a', u'doc_count': 5}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': 1423468800000, u'doc_count': 5}, {u'4': {u'buckets': [{u'1': {u'value': 8.0}, u'key': u'b', u'doc_count': 5}], u'sum_other_doc_count': 0, u'doc_count_error_upper_bound': 0}, u'key': 1423494000000, u'doc_count': 5}]}} )
def test_Search( self ):
s = self.el._Search( self.index_name )
result = s.execute()
self.assertEqual( len( result.hits ), 10 )
self.assertEqual( dir( result.hits[0] ), [u'Color', u'Product', 'meta', u'quantity', u'timestamp'] )
def test_Q1( self ):
q = self.el._Q( 'range', timestamp = {'lte':1423501337292, 'gte': 1423497057518} )
s = self.el._Search( self.index_name )
s = s.filter( 'bool', must = q )
query = s.to_dict()
self.assertEqual( query, {'query': {'bool': {'filter': [{'bool': {'must': [{'range': {'timestamp': {'gte': 1423497057518, 'lte': 1423501337292}}}]}}]}}} )
result = s.execute()
self.assertEqual( len( result.hits ), 0 )
q = self.el._Q( 'range', timestamp = {'lte':1423631917911, 'gte': 1423399451544} )
s = self.el._Search( self.index_name )
s = s.filter( 'bool', must = q )
query = s.to_dict()
self.assertEqual( query, {'query': {'bool': {'filter': [{'bool': {'must': [{'range': {'timestamp': {'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}}} )
result = s.execute()
self.assertEqual( len( result.hits ), 10 )
self.assertEqual( dir( result.hits[0] ), [u'Color', u'Product', 'meta', u'quantity', u'timestamp'] )
def test_Q2( self ):
q = [self.el._Q( 'range', timestamp = {'lte':1423631917911, 'gte': 1423399451544} ), self.el._Q( 'match', Product = 'a' )]
s = self.el._Search( self.index_name )
s = s.filter( 'bool', must = q )
query = s.to_dict()
self.assertEqual( query, {'query': {'bool': {'filter': [{'bool': {'must': [{'range': {'timestamp': {'gte': 1423399451544, 'lte': 1423631917911}}}, {'match': {'Product': 'a'}}]}}]}}} )
result = s.execute()
self.assertEqual( len( result.hits ), 5 )
self.assertEqual( result.hits[0].Product, 'a' )
self.assertEqual( result.hits[4].Product, 'a' )
def test_A1( self ):
q = [self.el._Q( 'range', timestamp = {'lte':1423631917911, 'gte': 1423399451544} )]
s = self.el._Search( self.index_name )
s = s.filter( 'bool', must = q )
a1 = self.el._A( 'terms', field = 'Product', size = 0 )
s.aggs.bucket( '2', a1 )
query = s.to_dict()
self.assertEqual( query, {'query': {'bool': {'filter': [{'bool': {'must': [{'range': {'timestamp': {'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}}, 'aggs': {'2': {'terms': {'field': 'Product', 'size': 0}}}} )
result = s.execute()
self.assertEqual( result.aggregations['2'].buckets, [{u'key': u'a', u'doc_count': 5}, {u'key': u'b', u'doc_count': 5}] )
def test_A2( self ):
q = [self.el._Q( 'range', timestamp = {'lte':1423631917911, 'gte': 1423399451544} )]
s = self.el._Search( self.index_name )
s = s.filter( 'bool', must = q )
a1 = self.el._A( 'terms', field = 'Product', size = 0 )
a1.metric( 'total_quantity', 'sum', field = 'quantity' )
s.aggs.bucket( '2', a1 )
query = s.to_dict()
self.assertEqual( query, {'query': {'bool': {'filter': [{'bool': {'must': [{'range': {'timestamp': {'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}}, 'aggs': {'2': {'terms': {'field': 'Product', 'size': 0}, 'aggs': {'total_quantity': {'sum': {'field': 'quantity'}}}}}} )
result = s.execute()
self.assertEqual( result.aggregations['2'].buckets, [{u'total_quantity': {u'value': 5.0}, u'key': u'a', u'doc_count': 5}, {u'total_quantity': {u'value': 8.0}, u'key': u'b', u'doc_count': 5}] )
def test_piplineaggregation( self ):
q = [self.el._Q( 'range', timestamp = {'lte':1423631917911, 'gte': 1423399451544} )]
s = self.el._Search( self.index_name )
s = s.filter( 'bool', must = q )
a1 = self.el._A( 'terms', field = 'Product', size = 0 )
a2 = self.el._A( 'terms', field = 'timestamp' )
a2.metric( 'total_quantity', 'sum', field = 'quantity' )
a1.bucket( 'end_data', 'date_histogram', field = 'timestamp', interval = '3600000ms' ).metric( 'tt', a2 ).pipeline( 'avg_buckets', 'avg_bucket', buckets_path = 'tt>total_quantity', gap_policy = 'insert_zeros' )
s.aggs.bucket( '2', a1 )
query = s.to_dict()
self.assertEqual( query, {'query': {'bool': {'filter': [{'bool': {'must': [{'range': {'timestamp': {'gte': 1423399451544, 'lte': 1423631917911}}}]}}]}}, 'aggs': {'2': {'terms': {'field': 'Product', 'size': 0}, 'aggs': {'end_data': {'date_histogram': {'field': 'timestamp', 'interval': '3600000ms'}, 'aggs': {'tt': {'terms': {'field': 'timestamp'}, 'aggs': {'total_quantity': {'sum': {'field': 'quantity'}}}}, 'avg_buckets': {'avg_bucket': {'buckets_path': 'tt>total_quantity', 'gap_policy': 'insert_zeros'}}}}}}}} )
result = s.execute()
self.assertEqual( len( result.aggregations['2'].buckets ), 2 )
self.assertEqual( result.aggregations['2'].buckets[0].key, u'a' )
self.assertEqual( result.aggregations['2'].buckets[1].key, u'b' )
self.assertEqual( result.aggregations['2'].buckets[0]['end_data'].buckets[0].avg_buckets, {u'value': 2.5} )
self.assertEqual( result.aggregations['2'].buckets[1]['end_data'].buckets[0].avg_buckets, {u'value': 4} )
if __name__ == '__main__':
testSuite = unittest.defaultTestLoader.loadTestsFromTestCase( ElasticTestCase )
testSuite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ElasticCreateChain ) )
testSuite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ElasticBulkCreateChain ) )
testSuite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ElasticTestChain ) )
testSuite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ElasticDeleteChain ) )
unittest.TextTestRunner( verbosity = 2 ).run( testSuite )
|
hgiemza/DIRAC
|
tests/Integration/Test_ElasticsearchDB.py
|
Python
|
gpl-3.0
| 15,280
|
[
"DIRAC"
] |
50d216f3ed5bc94869fe3639eda525a7c8d12e6d5880dfe8bbe863caa0384541
|
#!/usr/bin/python
class Node:
"""Klasa reprezentujaca wezel listy jednokierunkowej."""
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def __str__(self):
return str(self.data) # bardzo ogolnie
def traverse(node, visit):
"""Iteracyjne przejscie przez liste jednokierunkowa."""
while node:
visit(node)
node = node.next
def visit(node):
print("node: ", node.data)
def remove_head(node):
if node is None:
raise ValueError("List is empty")
else:
tmp_ptr, tmp_value = node, node.data
node = node.next
del tmp_ptr
return (node, tmp_value)
def remove_tail(node):
if node is None:
raise ValueError("List is empty")
else:
head = node
before = node
if node.next != None:
node = node.next
while node.next != None:
before = node
node = node.next
tmp_value, before.next = node.data, None
return head, tmp_value
else:
return remove_head(head)
head = None # [], pusta lista
head = Node(7, head) # [3]
head = Node(6, head) # [2, 3]
head = Node(5, head) # [4, 2, 3]
head = Node(4, head)
head = Node(3, head)
head = Node(2, head)
head = Node(1, head)
print("Before remove head: ")
traverse(head, visit)
head, data = remove_head(head)
print("Element, which was deleted: ", data)
print("After remove head and before remove tail: ")
traverse(head, visit)
head, data = remove_tail(head)
print("After remove tail")
traverse(head, visit)
|
Damian9449/Python
|
lab9_strukturyDanych/9.1.py
|
Python
|
mit
| 1,655
|
[
"VisIt"
] |
67781897386999870d9361577c306180948ce68b531a7cc455a05e89c199d901
|
from scipy import io
import numpy as np
from evtk.hl import imageToVTK
from evtk.hl import gridToVTK
test = {}
io.loadmat('iCPC3D04a_v3d_x1_uint8.mat',mdict=test)
vox3dnp = np.array(test['vox3d'])
imageToVTK('./test', origin=(0.0,0.0,0.0), spacing = (sp,sp,sp), cellData = {'grayscale': np.ascontiguousarray(vox3dnp)})
sp = test['voxsize'][0][0]
w, h, d = vox3dnp.shape
x = np.linspace(0.0, sp*(w), w+1, dtype=np.float32)
y = np.linspace(0.0, sp*(h), h+1, dtype=np.float32)
z = np.linspace(0.0, sp*(d), d+1, dtype=np.float32)
from tvtk.api import tvtk, write_data
grid = tvtk.ImageData(spacing=(sp, sp, sp), origin=(0.0, 0.0, 0.0), dimensions=vox3dnp.shape)
grid.point_data.scalars = np.ravel(vox3dnp, order='F')
grid.point_data.scalars.name = 'grayscale'
# Writes legacy ".vtk" format if filename ends with "vtk", otherwise
# this will write data using the newer xml-based format.
write_data(grid, 'test3.vtk')
|
kayarre/dicomwrangle
|
mat2vtk.py
|
Python
|
bsd-2-clause
| 919
|
[
"VTK"
] |
c1e9ffba9c8f813bcc9d886c08b27828ba8d281d542c9b0b4b1babc419f6ea08
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import math
from espressomd.shapes import Rhomboid
from espressomd.shapes import Cylinder
small_epsilon = 0.000000001
large_number = 10000000.0
output_precision = 14
def custom_str(realn):
return str('{:.{prec}f}'.format(realn, prec=output_precision))
def get_triangle_normal(a, b, c):
"""
Returns the normal vector of a triangle given by points a,b,c.
Parameters
----------
a : list of :obj:`float`
vector with 3 components, point a
b : list of :obj:`float`
vector with 3 components, point b
c : list of :obj:`float`
vector with 3 components, point c
"""
n = [0.0, 0.0, 0.0]
n[0] = (b[1] - a[1]) * (c[2] - a[2]) - (b[2] - a[2]) * (c[1] - a[1])
n[1] = (b[2] - a[2]) * (c[0] - a[0]) - (b[0] - a[0]) * (c[2] - a[2])
n[2] = (b[0] - a[0]) * (c[1] - a[1]) - (b[1] - a[1]) * (c[0] - a[0])
return np.array(n)
def norm(vect):
"""
Returns the norm of a vector.
Parameters
----------
vect : list of :obj:`float`
vector with 3 components
"""
v = np.array(vect)
return np.sqrt(np.dot(v, v))
def vec_distance(a, b):
"""
Returns the length of vector between points a and b.
Parameters
----------
a : list of :obj:`float`
vector with 3 components, point a
b : list of :obj:`float`
vector with 3 components, point b
"""
return norm(np.array(a) - np.array(b))
def area_triangle(a, b, c):
"""
Returns the area of a triangle given by points a,b,c.
Parameters
----------
a : list of :obj:`float`
vector with 3 components, point a
b : list of :obj:`float`
vector with 3 components, point b
c : list of :obj:`float`
vector with 3 components, point c
"""
n = get_triangle_normal(a, b, c)
area = 0.5 * norm(n)
return area
def angle_btw_triangles(P1, P2, P3, P4):
"""
Returns the size of an angle between triangles given by points P2, P1, P3 and P2, P3, P4.
Parameters
----------
P1 : list of :obj:`float`
vector with 3 components, point P1
P2 : list of :obj:`float`
vector with 3 components, point P2
P3 : list of :obj:`float`
vector with 3 components, point P3
P4 : list of :obj:`float`
vector with 3 components, point P4
"""
n1 = get_triangle_normal(P2, P1, P3)
n2 = get_triangle_normal(P2, P3, P4)
tmp11 = np.dot(n1, n2)
tmp11 = tmp11 * abs(tmp11)
tmp22 = np.dot(n1, n1)
tmp33 = np.dot(n2, n2)
tmp11 /= (tmp22 * tmp33)
if tmp11 > 0:
tmp11 = np.sqrt(tmp11)
else:
tmp11 = - np.sqrt(- tmp11)
if tmp11 >= 1.0:
tmp11 = 0.0
elif tmp11 <= -1.:
tmp11 = np.pi
phi = np.pi - math.acos(tmp11)
if (np.dot(n1, np.array(P4)) - np.dot(n1, np.array(P1))) < 0:
phi = 2.0 * np.pi - phi
return phi
def discard_epsilon(x):
"""
Returns zero if the argument is too small.
Parameters
----------
x : :obj:`float`
real number
"""
if (x > -small_epsilon and x < small_epsilon):
res = 0.0
else:
res = x
return res
def oif_neo_hookean_nonlin(lambd):
"""
Defines NeoHookean nonlinearity.
Parameters
----------
lambd : :obj:`float`
real number
"""
# Defined by (19) from Dupin2007
res = (pow(lambd, 0.5) + pow(lambd, -2.5)) / (lambd + pow(lambd, -3.))
return res
def oif_calc_stretching_force(ks, pA, pB, dist0, dist):
"""
Calculates nonlinear stretching forces between two points on an edge.
Parameters
----------
ks : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
dist0 : :obj:`float`
relaxed distance btw particles
dist : :obj:`float`
current distance btw particles
"""
# this has to correspond to the calculation in oif_local_forces.hpp: calc_oif_local
# as of now, corresponds to git commit
# f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
dr = dist - dist0
# nonlinear stretching:
lambd = 1.0 * dist / dist0
fac = ks * oif_neo_hookean_nonlin(lambd) * dr
# no negative sign here! different from C implementation
# due to reverse order of vector subtraction
f = fac * (np.array(pB) - np.array(pA)) / dist
return f
def oif_calc_linear_stretching_force(ks, pA, pB, dist0, dist):
"""
Calculates linear stretching forces between two points on an edge.
Parameters
----------
ks : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
dist0 : :obj:`float`
relaxed distance btw particles
dist : :obj:`float`
current distance btw particles
"""
dr = dist - dist0
fac = ks * dr
# no negative sign here! different from C implementation due to
# reverse order of vector subtraction
f = fac * (np.array(pB) - np.array(pA)) / dist
return f
def oif_calc_bending_force(kb, pA, pB, pC, pD, phi0, phi):
"""
Calculates bending forces for four points on two adjacent triangles.
Parameters
----------
kb : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
pC : list of :obj:`float`
position of the third particle
pD : list of :obj:`float`
position of the fourth particle
phi0 : :obj:`float`
relaxed angle btw two triangles
phi : :obj:`float`
current angle btw two triangles
"""
# this has to correspond to the calculation in oif_local_forces.hpp: calc_oif_local
# as of now, corresponds to git commit
# f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
n1 = get_triangle_normal(pB, pA, pC)
n2 = get_triangle_normal(pB, pC, pD)
angles = (phi - phi0) / phi0
fac = kb * angles
f1 = fac * np.array(n1) / norm(n1)
f2 = fac * np.array(n2) / norm(n2)
f = [f1[0], f1[1], f1[2], f2[0], f2[1], f2[2]]
return f
def oif_calc_local_area_force(kal, pA, pB, pC, A0, A):
"""
Calculates local area forces between three points in one triangle.
Parameters
----------
kal : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
pC : list of :obj:`float`
position of the third particle
A0 : :obj:`float`
relaxed area of the triangle
A : :obj:`float`
current area of the triangle
"""
# this has to correspond to the calculation in oif_local_forces.hpp: calc_oif_local
# except for division by 3 - each triangle enters this calculation once, while each triangle enters the
# calc_oif_local three times
# as of now, corresponds to git commit
# f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
centroid = np.array((pA + pB + pC) / 3.0)
delta_area = A - A0
ta = centroid - pA
ta_norm = norm(ta)
tb = centroid - pB
tb_norm = norm(tb)
tc = centroid - pC
tc_norm = norm(tc)
common_factor = kal * delta_area / \
(ta_norm * ta_norm + tb_norm * tb_norm + tc_norm * tc_norm)
# local area force for first node
f1 = common_factor * ta
# local area force for second node
f2 = common_factor * tb
# local area force for third node
f3 = common_factor * tc
f = [f1[0], f1[1], f1[2], f2[0], f2[1], f2[2], f3[0], f3[1], f3[2]]
return f
def oif_calc_global_area_force(kag, pA, pB, pC, Ag0, Ag):
"""
Calculates global area forces between three points in a triangle.
Parameters
----------
kag : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
pC : list of :obj:`float`
position of the third particle
Ag0 : :obj:`float`
relaxed surface area of the cell
Ag : :obj:`float`
current surface area of the cell
"""
# this has to correspond to the calculation in oif_global_forces.hpp: add_oif_global_forces
# as of now, corresponds to git commit
# f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
centroid = np.array((pA + pB + pC) / 3.0)
delta = Ag - Ag0
ta = centroid - pA
ta_norm = norm(ta)
tb = centroid - pB
tb_norm = norm(tb)
tc = centroid - pC
tc_norm = norm(tc)
A = area_triangle(pA, pB, pC)
common_factor = kag * A * delta / \
(ta_norm * ta_norm + tb_norm * tb_norm + tc_norm * tc_norm)
# global area force for first node
f1 = common_factor * ta
# global area force for second node
f2 = common_factor * tb
# global area force for third node
f3 = common_factor * tc
f = [f1[0], f1[1], f1[2], f2[0], f2[1], f2[2], f3[0], f3[1], f3[2]]
return f
def oif_calc_volume_force(kv, pA, pB, pC, V0, V):
"""
Calculates volume forces for three points in a triangle.
Parameters
----------
kv : :obj:`float`
coefficient of the stretching, spring stiffness
pA : list of :obj:`float`
position of the first particle
pB : list of :obj:`float`
position of the second particle
pC : list of :obj:`float`
position of the third particle
V0 : :obj:`float`
relaxed volume of the cell
V : :obj:`float`
current volume of the cell
"""
# this has to correspond to the calculation in oif_global_forces.hpp: add_oif_global_forces
# as of now, corresponds to git commit
# f156f9b44dcfd3cef9dd5537a1adfc903ac4772a
n = get_triangle_normal(pA, pB, pC)
dn = norm(n)
vv = (V - V0) / V0
A = area_triangle(pA, pB, pC)
f = kv * vv * A * np.array(n) / (dn * 3.0)
return f
def output_vtk_rhomboid(rhom_shape, out_file):
"""
Outputs the VTK files for visualisation of a rhomboid in e.g. Paraview.
Parameters
----------
rhom_shape : :obj:`shape`
rhomboid shape
out_file : :obj:`str`
filename for the output
"""
corner = rhom_shape.corner
a = rhom_shape.a
b = rhom_shape.b
c = rhom_shape.c
output_file = open(out_file, "w")
output_file.write("# vtk DataFile Version 3.0\n")
output_file.write("Data\n")
output_file.write("ASCII\n")
output_file.write("DATASET POLYDATA\n")
output_file.write("POINTS 8 float\n")
output_file.write(str(corner[0]) + " " + str(
corner[1]) + " " + str(corner[2]) + "\n")
output_file.write(str(corner[0] + a[0]) + " " + str(
corner[1] + a[1]) + " " + str(corner[2] + a[2]) + "\n")
output_file.write(str(corner[0] + a[0] + b[0]) + " " + str(corner[1] + a[1] + b[1]) + " " +
str(corner[2] + a[2] + b[2]) + "\n")
output_file.write(str(corner[0] + b[0]) + " " + str(
corner[1] + b[1]) + " " + str(corner[2] + b[2]) + "\n")
output_file.write(str(corner[0] + c[0]) + " " + str(
corner[1] + c[1]) + " " + str(corner[2] + c[2]) + "\n")
output_file.write(str(corner[0] + a[0] + c[0]) + " " + str(corner[1] + a[1] + c[1]) + " " +
str(corner[2] + a[2] + c[2]) + "\n")
output_file.write(str(corner[0] + a[0] + b[0] + c[0]) + " " + str(corner[1] + a[1] + b[1] + c[1]) + " " +
str(corner[2] + a[2] + b[2] + c[2]) + "\n")
output_file.write(str(corner[0] + b[0] + c[0]) + " " + str(corner[1] + b[1] + c[1]) + " " +
str(corner[2] + b[2] + c[2]) + "\n")
output_file.write("POLYGONS 6 30\n")
output_file.write("4 0 1 2 3\n")
output_file.write("4 4 5 6 7\n")
output_file.write("4 0 1 5 4\n")
output_file.write("4 2 3 7 6\n")
output_file.write("4 0 4 7 3\n")
output_file.write("4 1 2 6 5")
output_file.close()
return 0
def output_vtk_cylinder(cyl_shape, n, out_file):
"""
Outputs the VTK files for visualisation of a cylinder in e.g. Paraview.
Parameters
----------
cyl_shape : :obj:`shape`
cylindrical shape
n : :obj:`int`
number of discretization sections
out_file : :obj:`str`
filename for the output
"""
# length is the full height of the cylinder (note: used to be just half in the previous versions)
# only vertical cylinders are supported for now, i.e. with normal (0.0,
# 0.0, 1.0)
axis = cyl_shape.axis
length = cyl_shape.length
radius = cyl_shape.radius
center = cyl_shape.center
check_axis = True
if axis[0] != 0.0:
check_axis = False
if axis[1] != 0.0:
check_axis = False
if axis[2] == 0.0:
check_axis = False
if check_axis is False:
raise Exception(
"output_vtk_cylinder: Output for this type of cylinder is not supported yet.")
axisZ = 1.0
# setting points on perimeter
alpha = 2 * np.pi / n
points = 2 * n
# shift center to the bottom circle
p1 = center - length * np.array(axis) / 2.0
output_file = open(out_file, "w")
output_file.write("# vtk DataFile Version 3.0\n")
output_file.write("Data\n")
output_file.write("ASCII\n")
output_file.write("DATASET POLYDATA\n")
output_file.write("POINTS " + str(points) + " float\n")
for i in range(0, n):
output_file.write(
str(p1[0] + radius * np.cos(i * alpha)) + " " + str(p1[1] + radius * np.sin(i * alpha)) + " " +
str(p1[2]) + "\n")
for i in range(0, n):
output_file.write(
str(p1[0] + radius * np.cos(i * alpha)) + " " + str(p1[1] + radius * np.sin(i * alpha)) + " " +
str(p1[2] + length * axisZ) + "\n")
output_file.write(
"POLYGONS " + str(n + 2) + " " + str(5 * n + (n + 1) * 2) + "\n")
# writing bottom "circle"
output_file.write(str(n) + " ")
for i in range(0, n - 1):
output_file.write(str(i) + " ")
output_file.write(str(n - 1) + "\n")
# writing top "circle"
output_file.write(str(n) + " ")
for i in range(0, n - 1):
output_file.write(str(i + n) + " ")
output_file.write(str(2 * n - 1) + "\n")
# writing sides - rectangles
for i in range(0, n - 1):
output_file.write("4 " + str(i) + " " + str(
i + 1) + " " + str(i + n + 1) + " " + str(i + n) + "\n")
output_file.write("4 " + str(n - 1) + " " + str(
0) + " " + str(n) + " " + str(2 * n - 1) + "\n")
output_file.close()
return 0
def output_vtk_lines(lines, out_file):
"""
Outputs the VTK files for visualisation of lines in e.g. Paraview.
Parameters
----------
lines : list of :obj:`float`
lines is a list of pairs of points p1, p2
each pair represents a line segment to output to vtk
each line in lines contains 6 floats: p1x, p1y, p1z, p2x, p2y, p2z
out_file : :obj:`str`
filename for the output
"""
n_lines = len(lines)
output_file = open(out_file, "w")
output_file.write("# vtk DataFile Version 3.0\n")
output_file.write("Data\n")
output_file.write("ASCII\n")
output_file.write("DATASET POLYDATA\n")
output_file.write("POINTS " + str(2 * n_lines) + " float\n")
for i in range(0, n_lines):
one_line = lines[i]
output_file.write(str(one_line[0]) + " " + str(
one_line[1]) + " " + str(one_line[2]) + "\n")
output_file.write(str(one_line[3]) + " " + str(
one_line[4]) + " " + str(one_line[5]) + "\n")
output_file.write("LINES " + str(n_lines) + " " + str(3 * n_lines) + "\n")
for i in range(0, n_lines):
output_file.write(
str(2) + " " + str(2 * i) + " " + str(2 * i + 1) + "\n")
output_file.close()
return 0
def output_vtk_pore(
axis, length, outer_rad_left, outer_rad_right, pos, rad_left, rad_right,
smoothing_radius, m, out_file):
"""
Outputs the VTK files for visualisation of a pore in e.g. Paraview.
Parameters
----------
axis : list of :obj:`float`
3 floats specifying the axis
length : :obj:`float`
length of pore
outer_rad_left : :obj:`float`
outer left radius of pore
outer_rad_right : :obj:`float`
outer right radius of pore
rad_left : :obj:`float`
inner left radius of pore
rad_right : :obj:`float`
inner right radius of pore
smoothing_radius : :obj:`float`
smoothing radius for surface connecting outer and inner radii of the pore
pos : list of :obj:`float`
3 floats specifying position of the center of the pore
m : :obj:`int`
number of discretization sections
out_file : :obj:`str`
filename for the output
"""
# length is the length of the pore without the smoothing part
# for now, only axis=(1,0,0) is supported
# should implement rotation
# m is sufficient to be 10
if ".vtk" not in out_file:
print(
"output_vtk_pore warning: A file with vtk format will be written without .vtk extension.")
# n must be even therefore:
n = 2 * m
# setting points on perimeter
alpha = 2 * np.pi / n
beta = 2 * np.pi / n
number_of_points = 2 * n * (n / 2 + 1)
output_file = open(out_file, "w")
output_file.write("# vtk DataFile Version 3.0\n")
output_file.write("Data\n")
output_file.write("ASCII\n")
output_file.write("DATASET POLYDATA\n")
output_file.write("POINTS " + str(number_of_points) + " float\n")
# shift center to the left half torus
p1 = pos - length / 2 * np.array(axis)
# points on the left half torus
for j in range(0, n / 2 + 1):
for i in range(0, n):
output_file.write(str(p1[0] - np.sin(j * beta)) + " " +
str(p1[1] + (rad_left + smoothing_radius - np.cos(j * beta)) * np.cos(i * alpha)) + " " +
str(p1[2] + (rad_left + smoothing_radius - np.cos(j * beta)) * np.sin(i * alpha)) + "\n")
n_points_left = n * (n / 2 + 1)
# shift center to the right half torus
p1 = pos + length / 2 * np.array(axis)
# points on the right half torus
for j in range(0, n / 2 + 1):
for i in range(0, n):
output_file.write(str(p1[0] + np.sin(j * beta)) + " " +
str(p1[1] + (rad_right + smoothing_radius - np.cos(j * beta)) * np.cos(i * alpha)) + " " +
str(p1[2] + (rad_right + smoothing_radius - np.cos(j * beta)) * np.sin(i * alpha)) + "\n")
number_of_rectangles = n * n + 2 * n
output_file.write("POLYGONS " + str(number_of_rectangles)
+ " " + str(5 * number_of_rectangles) + "\n")
# writing inner side rectangles
for i in range(0, n - 1):
output_file.write("4 " + str(i) + " " +
str(i + 1) + " " +
str(i + n_points_left + 1) + " " +
str(i + n_points_left) + "\n")
output_file.write("4 " + str(n - 1) + " " +
str(0) + " " +
str(n_points_left) + " " +
str(n_points_left + n - 1) + "\n")
# writing outer side rectangles
for i in range(0, n - 1):
output_file.write("4 " + str(n_points_left - n + i) + " " +
str(n_points_left - n + i + 1) + " " +
str(n_points_left - n + i + n_points_left + 1) + " " +
str(n_points_left - n + i + n_points_left) + "\n")
output_file.write("4 " + str(n_points_left - n + n - 1) + " " +
str(n_points_left - n) + " " +
str(n_points_left - n + n_points_left) + " " +
str(n_points_left - n + n_points_left + n - 1) + "\n")
# writing rectangles on the left half of the torus
for j in range(0, n / 2):
for i in range(0, n - 1):
output_file.write("4 " + str(n * j + i) + " " +
str(n * j + i + 1) + " " +
str(n * j + i + n + 1) + " " +
str(n * j + i + n) + "\n")
output_file.write("4 " + str(n * j + n - 1) + " " +
str(n * j) + " " +
str(n * j + n) + " " +
str(n * j + 2 * n - 1) + "\n")
# writing rectangles on the right half of the torus
for j in range(0, n / 2):
for i in range(0, n - 1):
output_file.write("4 " + str(n_points_left + n * j + i) + " " +
str(n_points_left + n * j + i + 1) + " " +
str(n_points_left + n * j + i + n + 1) + " " +
str(n_points_left + n * j + i + n) + "\n")
output_file.write("4 " + str(n_points_left + n * j + n - 1) + " " +
str(n_points_left + n * j) + " " +
str(n_points_left + n * j + n) + " " +
str(n_points_left + n * j + 2 * n - 1) + "\n")
output_file.close()
return 0
|
hmenke/espresso
|
src/python/object_in_fluid/oif_utils.py
|
Python
|
gpl-3.0
| 22,295
|
[
"ESPResSo",
"ParaView",
"VTK"
] |
ce52e51cca5207de0799fec51c714ee7dd558ecd44abce30aa68ee9e089eb7e6
|
import os
import re
import glob
import shutil
from nose.tools import (assert_true, assert_equal, assert_is_not_none)
import nipype.pipeline.engine as pe
import qixnat
from qixnat.helpers import xnat_path
from qipipe.pipeline import registration
from ... import (ROOT, PROJECT, CONF_DIR)
from ...helpers.logging import logger
from ...helpers.name_generator import generate_unique_name
from .volume_test_base import VolumeTestBase
RESULTS = os.path.join(ROOT, 'results', 'pipeline', 'registration')
"""The test results directory."""
RESOURCE = generate_unique_name(__name__)
"""The XNAT registration resource name."""
class TestRegistrationWorkflow(VolumeTestBase):
"""
Registration workflow unit tests.
This test exercises the registration workflow on three volumes of one visit
in each of the Breast and Sarcoma studies.
"""
def __init__(self):
super(TestRegistrationWorkflow, self).__init__(
logger(__name__), RESULTS, use_mask=True
)
def test_breast(self):
for args in self.stage('Breast'):
self._test_workflow('mock', *args)
def test_sarcoma(self):
for args in self.stage('Sarcoma'):
self._test_workflow('mock', *args)
def _test_workflow(self, technique, project, subject, session, scan,
*images):
"""
Executes :meth:`qipipe.pipeline.registration.run` on the given
input.
:param technique: the built-in registration technique
:param project: the input project name
:param subject: the input subject name
:param session: the input session name
:param scan: the input scan number
:param images: the input 3D NIfTI images to register
"""
# Register against the first image.
ref_0 = images[0]
# Realign the remaining images.
moving = images[1:]
# The target location.
self.dest = os.path.join(RESULTS, technique, subject, session, 'scans',
str(scan), 'registration', RESOURCE)
logger(__name__).debug("Testing the %s registration workflow on %s %s"
" Scan %d..." %
(technique, subject, session, scan))
with qixnat.connect() as xnat:
xnat.delete(project, subject)
result = registration.run(technique, project, subject, session, scan,
ref_0, *moving, config_dir=CONF_DIR,
resource=RESOURCE, dest=self.dest,
base_dir=self.base_dir)
# Verify the result.
try:
self._verify_result(xnat, subject, session, scan, result)
finally:
xnat.delete(project, subject)
def _verify_result(self, xnat, subject, session, scan, result):
"""
:param xnat: the XNAT connection
:param subject: the registration subject
:param session: the registration session
:param scan: the input scan number
:param result: the meth:`qipipe.pipeline.registration.run` result
output file paths
"""
# Verify that the XNAT resource object was created.
rsc = xnat.find_one(PROJECT, subject, session, scan=scan,
resource=RESOURCE)
assert_is_not_none(rsc, "The %s %s scan %d %s XNAT registration"
" resource object was not created" %
(subject, session, scan, RESOURCE))
# Verify that the registration result is accurate.
split = (os.path.split(location) for location in result)
out_dirs, out_files = (set(files) for files in zip(*split))
rsc_files = set(rsc.files().get())
cfg_file = "%s.cfg" % RESOURCE
assert_true(cfg_file in rsc_files,
"The XNAT registration resource %s does not contain"
" the profile %s" %
(xnat_path(rsc), cfg_file)
assert_equal(out_dirs, set([self.dest]),
"The %s %s scan %d %s registration result directory"
" is incorrect - expected: %s, found: %s" %
(subject, session, scan, RESOURCE, self.dest, out_dirs))
rsc_img_files = {f for f in rsc_files if f != cfg_file}
assert_equal(out_files, rsc_img_files,
"The %s %s scan %d %s XNAT registration image file"
" names are incorrect - expected: %s, found: %s" %
(subject, session, scan, RESOURCE, rsc_img_files, out_files))
# Verify that the output files were created.
dest_files = (os.path.join(self.dest, location)
for location in os.listdir(self.dest))
assert_equal(set(dest_files), set(result),
"The %s %s scan %d %s XNAT registration result is"
" incorrect: %s" %
(subject, session, scan, RESOURCE, result))
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
ohsu-qin/qipipe
|
test/unit/pipeline/test_registration.py
|
Python
|
bsd-2-clause
| 5,168
|
[
"VisIt"
] |
38d895295aef63e14b85041be5b81409ec646de05edfa5c0f44ead97d31b0ab5
|
#
# The Python Imaging Library.
# $Id$
#
# the Image class wrapper
#
# partial release history:
# 1995-09-09 fl Created
# 1996-03-11 fl PIL release 0.0 (proof of concept)
# 1996-04-30 fl PIL release 0.1b1
# 1999-07-28 fl PIL release 1.0 final
# 2000-06-07 fl PIL release 1.1
# 2000-10-20 fl PIL release 1.1.1
# 2001-05-07 fl PIL release 1.1.2
# 2002-03-15 fl PIL release 1.1.3
# 2003-05-10 fl PIL release 1.1.4
# 2005-03-28 fl PIL release 1.1.5
# 2006-12-02 fl PIL release 1.1.6
# 2009-11-15 fl PIL release 1.1.7
#
# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2009 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from . import VERSION, PILLOW_VERSION, _plugins
import logging
import warnings
import math
logger = logging.getLogger(__name__)
class DecompressionBombWarning(RuntimeWarning):
pass
class DecompressionBombError(Exception):
pass
class _imaging_not_installed(object):
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
# Limit to around a quarter gigabyte for a 24 bit (3 bpp) image
MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 // 4 // 3)
try:
# If the _imaging C module is not present, Pillow will not load.
# Note that other modules should not refer to _imaging directly;
# import Image and use the Image.core variable instead.
# Also note that Image.core is not a publicly documented interface,
# and should be considered private and subject to change.
from . import _imaging as core
if PILLOW_VERSION != getattr(core, 'PILLOW_VERSION', None):
raise ImportError("The _imaging extension was built for another "
"version of Pillow or PIL:\n"
"Core version: %s\n"
"Pillow version: %s" %
(getattr(core, 'PILLOW_VERSION', None),
PILLOW_VERSION))
except ImportError as v:
core = _imaging_not_installed()
# Explanations for ways that we know we might have an import error
if str(v).startswith("Module use of python"):
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
warnings.warn(
"The _imaging extension was built for another version "
"of Python.",
RuntimeWarning
)
elif str(v).startswith("The _imaging extension"):
warnings.warn(str(v), RuntimeWarning)
elif "Symbol not found: _PyUnicodeUCS2_" in str(v):
# should match _PyUnicodeUCS2_FromString and
# _PyUnicodeUCS2_AsLatin1String
warnings.warn(
"The _imaging extension was built for Python with UCS2 support; "
"recompile Pillow or build Python --without-wide-unicode. ",
RuntimeWarning
)
elif "Symbol not found: _PyUnicodeUCS4_" in str(v):
# should match _PyUnicodeUCS4_FromString and
# _PyUnicodeUCS4_AsLatin1String
warnings.warn(
"The _imaging extension was built for Python with UCS4 support; "
"recompile Pillow or build Python --with-wide-unicode. ",
RuntimeWarning
)
# Fail here anyway. Don't let people run with a mostly broken Pillow.
# see docs/porting.rst
raise
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
from . import ImageMode
from ._binary import i8
from ._util import isPath, isStringType, deferred_error
import os
import sys
import io
import struct
import atexit
# type stuff
import collections
import numbers
# works everywhere, win for pypy, not cpython
USE_CFFI_ACCESS = hasattr(sys, 'pypy_version_info')
try:
import cffi
HAS_CFFI = True
except ImportError:
HAS_CFFI = False
try:
from pathlib import Path
HAS_PATHLIB = True
except ImportError:
try:
from pathlib2 import Path
HAS_PATHLIB = True
except ImportError:
HAS_PATHLIB = False
def isImageType(t):
"""
Checks if an object is an image object.
.. warning::
This function is for internal use only.
:param t: object to check if it's an image
:returns: True if the object is an image
"""
return hasattr(t, "im")
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
TRANSPOSE = 5
TRANSVERSE = 6
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters
NEAREST = NONE = 0
BOX = 4
BILINEAR = LINEAR = 2
HAMMING = 5
BICUBIC = CUBIC = 3
LANCZOS = ANTIALIAS = 1
# dithers
NEAREST = NONE = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
LIBIMAGEQUANT = 3
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
if hasattr(core, 'DEFAULT_STRATEGY'):
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
FILTERED = core.FILTERED
HUFFMAN_ONLY = core.HUFFMAN_ONLY
RLE = core.RLE
FIXED = core.FIXED
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
SAVE_ALL = {}
EXTENSION = {}
DECODERS = {}
ENCODERS = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
"LAB": ("RGB", "L", ("L", "A", "B")),
"HSV": ("RGB", "L", ("H", "S", "V")),
# Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and
# BGR;24. Use these modes only if you know exactly what you're
# doing...
}
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('|b1', None), # Bits need to be extended to bytes
"L": ('|u1', None),
"LA": ('|u1', 2),
"I": (_ENDIAN + 'i4', None),
"F": (_ENDIAN + 'f4', None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 3),
"LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1
"HSV": ('|u1', 3),
# I;16 == I;16L, and I;32 == I;32L
"I;16": ('<u2', None),
"I;16B": ('>u2', None),
"I;16L": ('<u2', None),
"I;16S": ('<i2', None),
"I;16BS": ('>i2', None),
"I;16LS": ('<i2', None),
"I;32": ('<u4', None),
"I;32B": ('>u4', None),
"I;32L": ('<u4', None),
"I;32S": ('<i4', None),
"I;32BS": ('>i4', None),
"I;32LS": ('<i4', None),
}
def _conv_type_shape(im):
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return (im.size[1], im.size[0]), typ
else:
return (im.size[1], im.size[0], extra), typ
MODES = sorted(_MODEINFO)
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
def getmodebase(mode):
"""
Gets the "base" mode for given mode. This function returns "L" for
images that contain grayscale data, and "RGB" for images that
contain color data.
:param mode: Input mode.
:returns: "L" or "RGB".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basemode
def getmodetype(mode):
"""
Gets the storage type mode. Given a mode, this function returns a
single-layer mode suitable for storing individual bands.
:param mode: Input mode.
:returns: "L", "I", or "F".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basetype
def getmodebandnames(mode):
"""
Gets a list of individual band names. Given a mode, this function returns
a tuple containing the names of individual bands (use
:py:method:`~PIL.Image.getmodetype` to get the mode used to store each
individual band.
:param mode: Input mode.
:returns: A tuple containing band names. The length of the tuple
gives the number of bands in an image of the given mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).bands
def getmodebands(mode):
"""
Gets the number of individual bands for this mode.
:param mode: Input mode.
:returns: The number of bands in this mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return len(ImageMode.getmode(mode).bands)
# --------------------------------------------------------------------
# Helpers
_initialized = 0
def preinit():
"Explicitly load standard file format drivers."
global _initialized
if _initialized >= 1:
return
try:
from . import BmpImagePlugin
except ImportError:
pass
try:
from . import GifImagePlugin
except ImportError:
pass
try:
from . import JpegImagePlugin
except ImportError:
pass
try:
from . import PpmImagePlugin
except ImportError:
pass
try:
from . import PngImagePlugin
except ImportError:
pass
# try:
# import TiffImagePlugin
# except ImportError:
# pass
_initialized = 1
def init():
"""
Explicitly initializes the Python Imaging Library. This function
loads all available file format drivers.
"""
global _initialized
if _initialized >= 2:
return 0
for plugin in _plugins:
try:
logger.debug("Importing %s", plugin)
__import__("PIL.%s" % plugin, globals(), locals(), [])
except ImportError as e:
logger.debug("Image: failed to import %s: %s", plugin, e)
if OPEN or SAVE:
_initialized = 2
return 1
# --------------------------------------------------------------------
# Codec factories (used by tobytes/frombytes and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
decoder = DECODERS[decoder_name]
return decoder(mode, *args + extra)
except KeyError:
pass
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print(decoder, mode, args + extra)
return decoder(mode, *args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
encoder = ENCODERS[encoder_name]
return encoder(mode, *args + extra)
except KeyError:
pass
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print(encoder, mode, args + extra)
return encoder(mode, *args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Simple expression analyzer
def coerce_e(value):
return value if isinstance(value, _E) else _E(value)
class _E(object):
def __init__(self, data):
self.data = data
def __add__(self, other):
return _E((self.data, "__add__", coerce_e(other).data))
def __mul__(self, other):
return _E((self.data, "__mul__", coerce_e(other).data))
def _getscaleoffset(expr):
stub = ["stub"]
data = expr(_E(stub)).data
try:
(a, b, c) = data # simplified syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number)):
return c, 0.0
if a is stub and b == "__add__" and isinstance(c, numbers.Number):
return 1.0, c
except TypeError:
pass
try:
((a, b, c), d, e) = data # full syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number) and
d == "__add__" and isinstance(e, numbers.Number)):
return c, e
except TypeError:
pass
raise ValueError("illegal expression")
# --------------------------------------------------------------------
# Implementation wrapper
class Image(object):
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format = None
format_description = None
_close_exclusive_fp_after_loading = True
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
self.pyaccess = None
@property
def width(self):
return self.size[0]
@property
def height(self):
return self.size[1]
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
if im.mode in ('P', 'PA'):
if self.palette:
new.palette = self.palette.copy()
else:
from . import ImagePalette
new.palette = ImagePalette.ImagePalette()
new.info = self.info.copy()
return new
# Context Manager Support
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is only required to close images that have not
had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method.
"""
try:
self.fp.close()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
if getattr(self, 'map', None):
self.map = None
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self.im = deferred_error(ValueError("Operation on closed image"))
if sys.version_info.major >= 3:
def __del__(self):
if (hasattr(self, 'fp') and hasattr(self, '_exclusive_fp')
and self.fp and self._exclusive_fp):
self.fp.close()
self.fp = None
def _copy(self):
self.load()
self.im = self.im.copy()
self.pyaccess = None
self.readonly = 0
def _ensure_mutable(self):
if self.readonly:
self._copy()
else:
self.load()
def _dump(self, file=None, format=None, **options):
import tempfile
suffix = ''
if format:
suffix = '.'+format
if not file:
f, filename = tempfile.mkstemp(suffix)
os.close(f)
else:
filename = file
if not filename.endswith(suffix):
filename = filename + suffix
self.load()
if not format or format == "PPM":
self.im.save_ppm(filename)
else:
self.save(filename, format, **options)
return filename
def __eq__(self, other):
return (isinstance(other, Image) and
self.__class__.__name__ == other.__class__.__name__ and
self.mode == other.mode and
self.size == other.size and
self.info == other.info and
self.category == other.category and
self.readonly == other.readonly and
self.getpalette() == other.getpalette() and
self.tobytes() == other.tobytes())
def __ne__(self, other):
eq = (self == other)
return not eq
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__, self.__class__.__name__,
self.mode, self.size[0], self.size[1],
id(self)
)
def _repr_png_(self):
""" iPython display hook support
:returns: png version of the image as bytes
"""
from io import BytesIO
b = BytesIO()
self.save(b, 'PNG')
return b.getvalue()
@property
def __array_interface__(self):
# numpy array interface support
new = {}
shape, typestr = _conv_type_shape(self)
new['shape'] = shape
new['typestr'] = typestr
new['version'] = 3
if self.mode == '1':
# Binary images need to be extended from bits to bytes
# See: https://github.com/python-pillow/Pillow/issues/350
new['data'] = self.tobytes('raw', 'L')
else:
new['data'] = self.tobytes()
return new
def __getstate__(self):
return [
self.info,
self.mode,
self.size,
self.getpalette(),
self.tobytes()]
def __setstate__(self, state):
Image.__init__(self)
self.tile = []
info, mode, size, palette, data = state
self.info = info
self.mode = mode
self.size = size
self.im = core.new(mode, size)
if mode in ("L", "P") and palette:
self.putpalette(palette)
self.frombytes(data)
def tobytes(self, encoder_name="raw", *args):
"""
Return image as a bytes object.
.. warning::
This method returns the raw image data from the internal
storage. For compressed image data (e.g. PNG, JPEG) use
:meth:`~.save`, with a BytesIO parameter for in-memory
data.
:param encoder_name: What encoder to use. The default is to
use the standard "raw" encoder.
:param args: Extra arguments to the encoder.
:rtype: A bytes object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
data = []
while True:
l, s, d = e.encode(bufsize)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tobytes" % s)
return b"".join(data)
def tostring(self, *args, **kw):
raise NotImplementedError("tostring() has been removed. "
"Please call tobytes() instead.")
def tobitmap(self, name="image"):
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tobytes("xbm")
return b"".join([
("#define %s_width %d\n" % (name, self.size[0])).encode('ascii'),
("#define %s_height %d\n" % (name, self.size[1])).encode('ascii'),
("static char %s_bits[] = {\n" % name).encode('ascii'), data, b"};"
])
def frombytes(self, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
def fromstring(self, *args, **kw):
raise NotImplementedError("fromstring() has been removed. "
"Please call frombytes() instead.")
def load(self):
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time. This method will close the file
associated with the image.
:returns: An image access object.
:rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess`
"""
if self.im and self.palette and self.palette.dirty:
# realize palette
self.im.putpalette(*self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if "transparency" in self.info:
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
if self.im:
if HAS_CFFI and USE_CFFI_ACCESS:
if self.pyaccess:
return self.pyaccess
from . import PyAccess
self.pyaccess = PyAccess.new(self, self.readonly)
if self.pyaccess:
return self.pyaccess
return self.im.pixel_access(self.readonly)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(self, mode=None, matrix=None, dither=None,
palette=WEB, colors=256):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK." The **matrix** argument only supports "L"
and "RGB".
When translating a color image to black and white (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is NONE, all non-zero values are set to 255 (white). To
use other thresholds, use the :py:meth:`~PIL.Image.Image.point`
method.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are NONE or FLOYDSTEINBERG (default).
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are WEB or ADAPTIVE.
:param colors: Number of colors to use for the ADAPTIVE palette.
Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if not mode or (mode == self.mode and not matrix):
return self.copy()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, matrix)
return self._new(im)
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if "transparency" in self.info and \
self.info['transparency'] is not None:
if self.mode in ('L', 'RGB') and mode == 'RGBA':
# Use transparent conversion to promote from transparent
# color to an alpha channel.
new_im = self._new(self.im.convert_transparent(
mode, self.info['transparency']))
del(new_im.info['transparency'])
return new_im
elif self.mode in ('L', 'RGB', 'P') and mode in ('L', 'RGB', 'P'):
t = self.info['transparency']
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn('Palette images with Transparency ' +
' expressed in bytes should be converted ' +
'to RGBA images')
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1, 1)))
if self.mode == 'P':
trns_im.putpalette(self.palette)
if isinstance(t, tuple):
try:
t = trns_im.palette.getcolor(t)
except:
raise ValueError("Couldn't allocate a palette "
"color for transparency")
trns_im.putpixel((0, 0), t)
if mode in ('L', 'RGB'):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert('RGB')
trns = trns_im.getpixel((0, 0))
elif self.mode == 'P' and mode == 'RGBA':
t = self.info['transparency']
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
raise ValueError("Transparency for P mode should" +
" be bytes or int")
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from . import ImagePalette
new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del(new.info['transparency'])
if trns is not None:
try:
new.info['transparency'] = new.palette.getcolor(trns)
except:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del(new.info['transparency'])
warnings.warn("Couldn't allocate palette entry " +
"for transparency")
return new
# colorspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
new_im = self._new(im)
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del(new_im.info['transparency'])
if trns is not None:
if new_im.mode == 'P':
try:
new_im.info['transparency'] = new_im.palette.getcolor(trns)
except:
del(new_im.info['transparency'])
warnings.warn("Couldn't allocate palette entry " +
"for transparency")
else:
new_im.info['transparency'] = trns
return new_im
def quantize(self, colors=256, method=None, kmeans=0, palette=None):
"""
Convert the image to 'P' mode with the specified number
of colors.
:param colors: The desired number of colors, <= 256
:param method: 0 = median cut
1 = maximum coverage
2 = fast octree
3 = libimagequant
:param kmeans: Integer
:param palette: Quantize to the palette of given :py:class:`PIL.Image.Image`.
:returns: A new image
"""
self.load()
if method is None:
# defaults:
method = 0
if self.mode == 'RGBA':
method = 2
if self.mode == 'RGBA' and method not in (2, 3):
# Caller specified an invalid mode.
raise ValueError(
'Fast Octree (method == 2) and libimagequant (method == 3) ' +
'are the only valid methods for quantizing RGBA images')
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", 1, palette.im)
return self._new(im)
return self._new(self.im.quantize(colors, method, kmeans))
def copy(self):
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
return self._new(self.im.copy())
__copy__ = copy
def crop(self, box=None):
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
Note: Prior to Pillow 3.4.0, this was a lazy operation.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if box is None:
return self.copy()
self.load()
return self._new(self._crop(self.im, box))
def _crop(self, im, box):
"""
Returns a rectangular region from the core image object im.
This is equivalent to calling im.crop((x0, y0, x1, y1)), but
includes additional sanity checks.
:param im: a core image object
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:returns: A core image object.
"""
x0, y0, x1, y1 = map(int, map(round, box))
if x1 < x0:
x1 = x0
if y1 < y0:
y1 = y0
_decompression_bomb_check((x1, y1))
return im.crop((x0, y0, x1, y1))
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it, or to extract a 128x192
version from a PCD file.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
Note: This method is not implemented for most images. It is
currently implemented only for JPEG and PCD images.
:param mode: The requested mode.
:param size: The requested size.
"""
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
def filter(self, filter):
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object. """
from . import ImageFilter
self.load()
if isinstance(filter, collections.Callable):
filter = filter()
if not hasattr(filter, "filter"):
raise TypeError("filter argument should be ImageFilter.Filter " +
"instance or class")
multiband = isinstance(filter, ImageFilter.MultibandFilter)
if self.im.bands == 1 or multiband:
return self._new(filter.filter(self.im))
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
def getbands(self):
"""
Returns a tuple containing the name of each band in this image.
For example, **getbands** on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. If the image
is completely empty, this method returns None.
"""
self.load()
return self.im.getbbox()
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band=None):
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use **list(im.getdata())**.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self):
"""
Gets the the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
def getim(self):
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self):
"""
Returns the image palette as a list.
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
if bytes is str:
return [i8(c) for c in self.im.getpalette()]
else:
return list(self.im.getpalette())
except ValueError:
return None # no palette
def getpixel(self, xy):
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y).
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
if self.pyaccess:
return self.pyaccess.getpixel(xy)
return self.im.getpixel(xy)
def getprojection(self):
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return [i8(c) for c in x], [i8(c) for c in y]
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as
a list of pixel counts, one for each pixel value in the source
image. If the image has more than one band, the histograms for
all bands are concatenated (for example, the histogram for an
"RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
def offset(self, xoffset, yoffset=None):
raise NotImplementedError("offset() has been removed. "
"Please call ImageChops.offset() instead.")
def paste(self, im, box=None, mask=None):
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). If a 4-tuple is given, the size of the pasted image
must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L" or "RGBA"
images (in the latter case, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values will mix the two images together, including their alpha
channels if they have them.
See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to
combine images with respect to their alpha channels.
:param im: Source image or pixel value (integer or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box
box = None
if box is None:
box = (0, 0)
if len(box) == 2:
# upper left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
raise ValueError(
"cannot determine region size; use 4-item box"
)
box += (box[0]+size[0], box[1]+size[1])
if isStringType(im):
from . import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self._ensure_mutable()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):
""" 'In-place' analog of Image.alpha_composite. Composites an image
onto this image.
:param im: image to composite over this one
:param dest: Optional 2 tuple (left, top) specifying the upper
left corner in this (destination) image.
:param source: Optional 2 (left, top) tuple for the upper left
corner in the overlay source image, or 4 tuple (left, top, right,
bottom) for the bounds of the source rectangle
Performance Note: Not currently implemented in-place in the core layer.
"""
if not isinstance(source, (list, tuple)):
raise ValueError("Source must be a tuple")
if not isinstance(dest, (list, tuple)):
raise ValueError("Destination must be a tuple")
if not len(source) in (2, 4):
raise ValueError("Source must be a 2 or 4-tuple")
if not len(dest) == 2:
raise ValueError("Destination must be a 2-tuple")
if min(source) < 0:
raise ValueError("Source must be non-negative")
if min(dest) < 0:
raise ValueError("Destination must be non-negative")
if len(source) == 2:
source = source + im.size
# over image, crop if it's not the whole thing.
if source == (0, 0) + im.size:
overlay = im
else:
overlay = im.crop(source)
# target for the paste
box = dest + (dest[0] + overlay.width, dest[1] + overlay.height)
# destination image. don't copy if we're using the whole image.
if box == (0, 0) + self.size:
background = self
else:
background = self.crop(box)
result = alpha_composite(background, overlay)
self.paste(result, box)
def point(self, lut, mode=None):
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65536 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
:param mode: Output mode (default is same as input). In the
current version, this can only be used if the source image
has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = [lut(i) for i in range(256)] * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
raise ValueError("point operation not supported for this mode")
return self._new(self.im.point(lut, mode))
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
self._ensure_mutable()
if self.mode not in ("LA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
except (AttributeError, ValueError):
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "RGBA"):
raise ValueError # sanity check
self.im = im
self.pyaccess = None
self.mode = self.im.mode
except (KeyError, ValueError):
raise ValueError("illegal image mode")
if self.mode == "LA":
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
raise ValueError("illegal image mode")
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(self, data, scale=1.0, offset=0.0):
"""
Copies pixel data to this image. This method copies data from a
sequence object into the image, starting at the upper left
corner (0, 0), and continuing until either the image or the
sequence ends. The scale and offset values are used to adjust
the sequence values: **pixel = value*scale + offset**.
:param data: A sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self._ensure_mutable()
self.im.putdata(data, scale, offset)
def putpalette(self, data, rawmode="RGB"):
"""
Attaches a palette to this image. The image must be a "P" or
"L" image, and the palette sequence must contain 768 integer
values, where each group of three values represent the red,
green, and blue values for the corresponding pixel
index. Instead of an integer sequence, you can use an 8-bit
string.
:param data: A palette sequence (either a list or a string).
:param rawmode: The raw mode of the palette.
"""
from . import ImagePalette
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
self.load()
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isinstance(data, bytes):
if bytes is str:
data = "".join(chr(x) for x in data)
else:
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self.mode = "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
def putpixel(self, xy, value):
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y).
:param value: The pixel value.
"""
if self.readonly:
self._copy()
self.load()
if self.pyaccess:
return self.pyaccess.putpixel(xy, value)
return self.im.putpixel(xy, value)
def remap_palette(self, dest_map, source_palette=None):
"""
Rewrites the image to reorder the palette.
:param dest_map: A list of indexes into the original palette.
e.g. [1,0] would swap a two item palette, and list(range(255))
is the identity transform.
:param source_palette: Bytes or None.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
from . import ImagePalette
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
if source_palette is None:
if self.mode == "P":
real_source_palette = self.im.getpalette("RGB")[:768]
else: # L-mode
real_source_palette = bytearray(i//3 for i in range(768))
else:
real_source_palette = source_palette
palette_bytes = b""
new_positions = [0]*256
# pick only the used colors from the palette
for i, oldPosition in enumerate(dest_map):
palette_bytes += real_source_palette[oldPosition*3:oldPosition*3+3]
new_positions[oldPosition] = i
# replace the palette color id of all pixel with the new id
# Palette images are [0..255], mapped through a 1 or 3
# byte/color map. We need to remap the whole image
# from palette 1 to palette 2. New_positions is
# an array of indexes into palette 1. Palette 2 is
# palette 1 with any holes removed.
# We're going to leverage the convert mechanism to use the
# C code to remap the image from palette 1 to palette 2,
# by forcing the source image into 'L' mode and adding a
# mapping 'L' mode palette, then converting back to 'L'
# sans palette thus converting the image bytes, then
# assigning the optimized RGB palette.
# perf reference, 9500x4000 gif, w/~135 colors
# 14 sec prepatch, 1 sec postpatch with optimization forced.
mapping_palette = bytearray(new_positions)
m_im = self.copy()
m_im.mode = 'P'
m_im.palette = ImagePalette.ImagePalette("RGB",
palette=mapping_palette*3,
size=768)
# possibly set palette dirty, then
# m_im.putpalette(mapping_palette, 'L') # converts to 'P'
# or just force it.
# UNDONE -- this is part of the general issue with palettes
m_im.im.putpalette(*m_im.palette.getdata())
m_im = m_im.convert('L')
# Internally, we require 768 bytes for a palette.
new_palette_bytes = (palette_bytes +
(768 - len(palette_bytes)) * b'\x00')
m_im.putpalette(new_palette_bytes)
m_im.palette = ImagePalette.ImagePalette("RGB",
palette=palette_bytes,
size=len(palette_bytes))
return m_im
def resize(self, size, resample=NEAREST, box=None):
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BOX`,
:py:attr:`PIL.Image.BILINEAR`, :py:attr:`PIL.Image.HAMMING`,
:py:attr:`PIL.Image.BICUBIC` or :py:attr:`PIL.Image.LANCZOS`.
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
See: :ref:`concept-filters`.
:param box: An optional 4-tuple of floats giving the region
of the source image which should be scaled.
The values should be within (0, 0, width, height) rectangle.
If omitted or None, the entire source is used.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample not in (
NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING,
):
raise ValueError("unknown resampling filter")
size = tuple(size)
if box is None:
box = (0, 0) + self.size
else:
box = tuple(box)
if self.size == size and box == (0, 0) + self.size:
return self.copy()
if self.mode in ("1", "P"):
resample = NEAREST
if self.mode == 'LA':
return self.convert('La').resize(size, resample, box).convert('LA')
if self.mode == 'RGBA':
return self.convert('RGBa').resize(size, resample, box).convert('RGBA')
self.load()
return self._new(self.im.resize(size, resample, box))
def rotate(self, angle, resample=NEAREST, expand=0, center=None,
translate=None):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`. See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image. Note that the expand flag assumes rotation around
the center and no translation.
:param center: Optional center of rotation (a 2-tuple). Origin is
the upper left corner. Default is the center of the image.
:param translate: An optional post-rotate translation (a 2-tuple).
:returns: An :py:class:`~PIL.Image.Image` object.
"""
angle = angle % 360.0
# Fast paths regardless of filter, as long as we're not
# translating or changing the center.
if not (center or translate):
if angle == 0:
return self.copy()
if angle == 180:
return self.transpose(ROTATE_180)
if angle == 90 and expand:
return self.transpose(ROTATE_90)
if angle == 270 and expand:
return self.transpose(ROTATE_270)
# Calculate the affine matrix. Note that this is the reverse
# transformation (from destination image to source) because we
# want to interpolate the (discrete) destination pixel from
# the local area around the (floating) source pixel.
# The matrix we actually want (note that it operates from the right):
# (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx)
# (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy)
# (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1)
# The reverse matrix is thus:
# (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx)
# (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty)
# (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1)
# In any case, the final translation may be updated at the end to
# compensate for the expand flag.
w, h = self.size
if translate is None:
post_trans = (0, 0)
else:
post_trans = translate
if center is None:
rotn_center = (w / 2.0, h / 2.0) # FIXME These should be rounded to ints?
else:
rotn_center = center
angle = - math.radians(angle)
matrix = [
round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0,
round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a*x + b*y + c, d*x + e*y + f
matrix[2], matrix[5] = transform(-rotn_center[0] - post_trans[0],
-rotn_center[1] - post_trans[1], matrix)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
if expand:
# calculate output size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y, matrix)
xx.append(x)
yy.append(y)
nw = int(math.ceil(max(xx)) - math.floor(min(xx)))
nh = int(math.ceil(max(yy)) - math.floor(min(yy)))
# We multiply a translation matrix from the right. Because of its
# special form, this is the same as taking the image of the
# translation vector as new translation vector.
matrix[2], matrix[5] = transform(-(nw - w) / 2.0,
-(nh - h) / 2.0,
matrix)
w, h = nw, nh
return self.transform((w, h), AFFINE, matrix, resample)
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param options: Extra parameters to the image writer.
:returns: None
:exception KeyError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename = ""
open_fp = False
if isPath(fp):
filename = fp
open_fp = True
elif HAS_PATHLIB and isinstance(fp, Path):
filename = str(fp)
open_fp = True
if not filename and hasattr(fp, "name") and isPath(fp.name):
# only set the name for metadata purposes
filename = fp.name
# may mutate self!
self.load()
save_all = False
if 'save_all' in params:
save_all = params.pop('save_all')
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
if ext not in EXTENSION:
init()
try:
format = EXTENSION[ext]
except KeyError:
raise ValueError('unknown file extension: {}'.format(ext))
if format.upper() not in SAVE:
init()
if save_all:
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
if open_fp:
if params.get('append', False):
fp = builtins.open(filename, "r+b")
else:
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
fp = builtins.open(filename, "w+b")
try:
save_handler(self, fp, filename)
finally:
# do what we can to clean up
if open_fp:
fp.close()
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
**EOFError** exception. When a sequence file is opened, the
library automatically seeks to frame 0.
Note that in the current version of the library, most sequence
formats only allows you to seek to the next frame.
See :py:meth:`~PIL.Image.Image.tell`.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
raise EOFError
def show(self, title=None, command=None):
"""
Displays this image. This method is mainly intended for
debugging purposes.
On Unix platforms, this method saves the image to a temporary
PPM file, and calls either the **xv** utility or the **display**
utility, depending on which one can be found.
On macOS, this method saves the image to a temporary BMP file, and
opens it with the native Preview application.
On Windows, it saves the image to a temporary BMP file, and uses
the standard BMP display utility to show it (usually Paint).
:param title: Optional title to use for the image window,
where possible.
:param command: command used to show the image
"""
_show(self, title=title, command=command)
def split(self):
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
If you need only one band, :py:meth:`~PIL.Image.Image.getchannel`
method can be more convenient and faster.
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = map(self._new, self.im.split())
return tuple(ims)
def getchannel(self, channel):
"""
Returns an image containing a single channel of the source image.
:param channel: What channel to return. Could be index
(0 for "R" channel of "RGB") or channel name
("A" for alpha channel of "RGBA").
:returns: An image in "L" mode.
.. versionadded:: 4.3.0
"""
self.load()
if isStringType(channel):
try:
channel = self.getbands().index(channel)
except ValueError:
raise ValueError(
'The image has no channel "{}"'.format(channel))
return self._new(self.im.getband(channel))
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(self, size, resample=BICUBIC):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`.
(was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0)
:returns: None
"""
# preserve aspect ratio
x, y = self.size
if x > size[0]:
y = int(max(y * size[0] / x, 1))
x = int(size[0])
if y > size[1]:
x = int(max(x * size[1] / y, 1))
y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
im = self.resize(size, resample)
self.im = im.im
self.mode = im.mode
self.size = size
self.readonly = 0
self.pyaccess = None
# FIXME: the different transform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(self, size, method, data=None, resample=NEAREST,
fill=1, fillcolor=None):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:attr:`PIL.Image.AFFINE` (affine transform),
:py:attr:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`.
:param fillcolor: Optional fill color for the area outside the transform
in the output image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode == 'LA':
return self.convert('La').transform(
size, method, data, resample, fill).convert('LA')
if self.mode == 'RGBA':
return self.convert('RGBa').transform(
size, method, data, resample, fill).convert('RGBA')
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, fillcolor)
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample,
fillcolor is None)
else:
im.__transformer((0, 0)+size, self, method, data,
resample, fillcolor is None)
return im
def __transformer(self, box, image, method, data,
resample=NEAREST, fill=1):
w = box[2] - box[0]
h = box[3] - box[1]
if method == AFFINE:
data = data[0:6]
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (xs, 0, x0, 0, ys, y0)
elif method == PERSPECTIVE:
data = data[0:8]
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]
sw = data[2:4]
se = data[4:6]
ne = data[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
(se[0]-sw[0]-ne[0]+x0)*As*At,
y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
(se[1]-sw[1]-ne[1]+y0)*As*At)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
return self._new(self.im.transpose(method))
def effect_spread(self, distance):
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
return self._new(self.im.effect_spread(distance))
def toqimage(self):
"""Returns a QImage copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqimage(self)
def toqpixmap(self):
"""Returns a QPixmap copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqpixmap(self)
# --------------------------------------------------------------------
# Abstract handlers.
class ImagePointHandler(object):
# used as a mixin by point transforms (for use with im.point)
pass
class ImageTransformHandler(object):
# used as a mixin by geometry transforms (for use with im.transform)
pass
# --------------------------------------------------------------------
# Factories
#
# Debugging
def _wedge():
"Create greyscale wedge (for debugging only)"
return Image()._new(core.wedge("L"))
def _check_size(size):
"""
Common check to enforce type and sanity check on size tuples
:param size: Should be a 2 tuple of (width, height)
:returns: True, or raises a ValueError
"""
if not isinstance(size, (list, tuple)):
raise ValueError("Size must be a tuple")
if len(size) != 2:
raise ValueError("Size must be a tuple of length 2")
if size[0] < 0 or size[1] < 0:
raise ValueError("Width and height must be >= 0")
return True
def new(mode, size, color=0):
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image. See:
:ref:`concept-modes`.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
If given, this should be a single integer or floating point value
for single-band modes, and a tuple for multi-band modes (one value
per band). When creating RGB images, you can also use color
strings as supported by the ImageColor module. If the color is
None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isStringType(color):
# css3-style specifier
from . import ImageColor
color = ImageColor.getcolor(color, mode)
return Image()._new(core.fill(mode, size, color))
def frombytes(mode, size, data, decoder_name="raw", *args):
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
:ref:`Writing Your Own File Decoder <file-decoders>`.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.frombytes(data, decoder_name, args)
return im
def fromstring(*args, **kw):
raise NotImplementedError("fromstring() has been removed. " +
"Please call frombytes() instead.")
def frombuffer(mode, size, data, decoder_name="raw", *args):
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
**BytesIO** object, and use :py:func:`~PIL.Image.open` to load it.
In the current version, the default parameters used for the "raw" decoder
differs from that used for :py:func:`~PIL.Image.frombytes`. This is a
bug, and will probably be fixed in a future release. The current release
issues a warning if you do this; to disable the warning, you should provide
the full set of parameters. See below for details.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
warnings.warn(
"the frombuffer defaults may change in a future release; "
"for portability, change the call to read:\n"
" frombuffer(mode, size, data, 'raw', mode, 0, 1)",
RuntimeWarning, stacklevel=2
)
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
im = new(mode, (1, 1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return frombytes(mode, size, data, decoder_name, args)
def fromarray(obj, mode=None):
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol).
If obj is not contiguous, then the tobytes method is called
and :py:func:`~PIL.Image.frombuffer` is used.
:param obj: Object with array interface
:param mode: Mode to use (will be determined from type if None)
See: :ref:`concept-modes`.
:returns: An image object.
.. versionadded:: 1.1.6
"""
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
strides = arr.get('strides', None)
if mode is None:
try:
typekey = (1, 1) + shape[2:], arr['typestr']
mode, rawmode = _fromarray_typemap[typekey]
except KeyError:
# print(typekey)
raise TypeError("Cannot handle this data type")
else:
rawmode = mode
if mode in ["1", "L", "I", "P", "F"]:
ndmax = 2
elif mode == "RGB":
ndmax = 3
else:
ndmax = 4
if ndim > ndmax:
raise ValueError("Too many dimensions: %d > %d." % (ndim, ndmax))
size = shape[1], shape[0]
if strides is not None:
if hasattr(obj, 'tobytes'):
obj = obj.tobytes()
else:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
def fromqimage(im):
"""Creates an image instance from a QImage image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.fromqimage(im)
def fromqpixmap(im):
"""Creates an image instance from a QPixmap image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.fromqpixmap(im)
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
((1, 1), "|b1"): ("1", "1;8"),
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "<u2"): ("I", "I;16"),
((1, 1), ">u2"): ("I", "I;16B"),
((1, 1), "<i2"): ("I", "I;16S"),
((1, 1), ">i2"): ("I", "I;16BS"),
((1, 1), "<u4"): ("I", "I;32"),
((1, 1), ">u4"): ("I", "I;32B"),
((1, 1), "<i4"): ("I", "I;32S"),
((1, 1), ">i4"): ("I", "I;32BS"),
((1, 1), "<f4"): ("F", "F;32F"),
((1, 1), ">f4"): ("F", "F;32BF"),
((1, 1), "<f8"): ("F", "F;64F"),
((1, 1), ">f8"): ("F", "F;64BF"),
((1, 1, 2), "|u1"): ("LA", "LA"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
}
# shortcuts
_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I")
_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F")
def _decompression_bomb_check(size):
if MAX_IMAGE_PIXELS is None:
return
pixels = size[0] * size[1]
if pixels > 2 * MAX_IMAGE_PIXELS:
raise DecompressionBombError(
"Image size (%d pixels) exceeds limit of %d pixels, "
"could be decompression bomb DOS attack." %
(pixels, 2 * MAX_IMAGE_PIXELS))
if pixels > MAX_IMAGE_PIXELS:
warnings.warn(
"Image size (%d pixels) exceeds limit of %d pixels, "
"could be decompression bomb DOS attack." %
(pixels, MAX_IMAGE_PIXELS),
DecompressionBombWarning)
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but
the file remains open and the actual image data is not read from
the file until you try to process the data (or call the
:py:meth:`~PIL.Image.Image.load` method). See
:py:func:`~PIL.Image.new`.
:param fp: A filename (string), pathlib.Path object or a file object.
The file object must implement :py:meth:`~file.read`,
:py:meth:`~file.seek`, and :py:meth:`~file.tell` methods,
and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception IOError: If the file cannot be found, or the image cannot be
opened and identified.
"""
if mode != "r":
raise ValueError("bad mode %r" % mode)
exclusive_fp = False
filename = ""
if isPath(fp):
filename = fp
elif HAS_PATHLIB and isinstance(fp, Path):
filename = str(fp.resolve())
if filename:
fp = builtins.open(filename, "rb")
exclusive_fp = True
try:
fp.seek(0)
except (AttributeError, io.UnsupportedOperation):
fp = io.BytesIO(fp.read())
exclusive_fp = True
prefix = fp.read(16)
preinit()
def _open_core(fp, filename, prefix):
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
im = factory(fp, filename)
_decompression_bomb_check(im.size)
return im
except (SyntaxError, IndexError, TypeError, struct.error):
# Leave disabled by default, spams the logs with image
# opening failures that are entirely expected.
# logger.debug("", exc_info=True)
continue
return None
im = _open_core(fp, filename, prefix)
if im is None:
if init():
im = _open_core(fp, filename, prefix)
if im:
im._exclusive_fp = exclusive_fp
return im
if exclusive_fp:
fp.close()
raise IOError("cannot identify image file %r"
% (filename if filename else fp))
#
# Image processing.
def alpha_composite(im1, im2):
"""
Alpha composite im2 over im1.
:param im1: The first image. Must have mode RGBA.
:param im2: The second image. Must have mode RGBA, and the same size as
the first image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.alpha_composite(im1.im, im2.im))
def blend(im1, im2, alpha):
"""
Creates a new image by interpolating between two input images, using
a constant alpha.::
out = image1 * (1.0 - alpha) + image2 * alpha
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:param alpha: The interpolation alpha factor. If alpha is 0.0, a
copy of the first image is returned. If alpha is 1.0, a copy of
the second image is returned. There are no restrictions on the
alpha value. If necessary, the result is clipped to fit into
the allowed output range.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
def composite(image1, image2, mask):
"""
Create composite image by blending images using a transparency mask.
:param image1: The first image.
:param image2: The second image. Must have the same mode and
size as the first image.
:param mask: A mask image. This image can have mode
"1", "L", or "RGBA", and must have the same size as the
other two images.
"""
image = image2.copy()
image.paste(image1, None, mask)
return image
def eval(image, *args):
"""
Applies the function (which should take one argument) to each pixel
in the given image. If the image has more than one band, the same
function is applied to each band. Note that the function is
evaluated once for each possible pixel value, so you cannot use
random components or other generators.
:param image: The input image.
:param function: A function object, taking one integer argument.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
return image.point(args[0])
def merge(mode, bands):
"""
Merge a set of single band images into a new multiband image.
:param mode: The mode to use for the output image. See:
:ref:`concept-modes`.
:param bands: A sequence containing one single-band image for
each band in the output image. All bands must have the
same size.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if getmodebands(mode) != len(bands) or "*" in mode:
raise ValueError("wrong number of bands")
for band in bands[1:]:
if band.mode != getmodetype(mode):
raise ValueError("mode mismatch")
if band.size != bands[0].size:
raise ValueError("size mismatch")
for band in bands:
band.load()
return bands[0]._new(core.merge(mode, *[b.im for b in bands]))
# --------------------------------------------------------------------
# Plugin registry
def register_open(id, factory, accept=None):
"""
Register an image file plugin. This function should not be used
in application code.
:param id: An image format identifier.
:param factory: An image file factory method.
:param accept: An optional function that can be used to quickly
reject images having another format.
"""
id = id.upper()
ID.append(id)
OPEN[id] = factory, accept
def register_mime(id, mimetype):
"""
Registers an image MIME type. This function should not be used
in application code.
:param id: An image format identifier.
:param mimetype: The image MIME type for this format.
"""
MIME[id.upper()] = mimetype
def register_save(id, driver):
"""
Registers an image save function. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE[id.upper()] = driver
def register_save_all(id, driver):
"""
Registers an image function to save all the frames
of a multiframe format. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE_ALL[id.upper()] = driver
def register_extension(id, extension):
"""
Registers an image extension. This function should not be
used in application code.
:param id: An image format identifier.
:param extension: An extension used for this format.
"""
EXTENSION[extension.lower()] = id.upper()
def register_extensions(id, extensions):
"""
Registers image extensions. This function should not be
used in application code.
:param id: An image format identifier.
:param extensions: A list of extensions used for this format.
"""
for extension in extensions:
register_extension(id, extension)
def registered_extensions():
"""
Returns a dictionary containing all file extensions belonging
to registered plugins
"""
if not EXTENSION:
init()
return EXTENSION
def register_decoder(name, decoder):
"""
Registers an image decoder. This function should not be
used in application code.
:param name: The name of the decoder
:param decoder: A callable(mode, args) that returns an
ImageFile.PyDecoder object
.. versionadded:: 4.1.0
"""
DECODERS[name] = decoder
def register_encoder(name, encoder):
"""
Registers an image encoder. This function should not be
used in application code.
:param name: The name of the encoder
:param encoder: A callable(mode, args) that returns an
ImageFile.PyEncoder object
.. versionadded:: 4.1.0
"""
ENCODERS[name] = encoder
# --------------------------------------------------------------------
# Simple display support. User code may override this.
def _show(image, **options):
# override me, as necessary
_showxv(image, **options)
def _showxv(image, title=None, **options):
from . import ImageShow
ImageShow.show(image, title, **options)
# --------------------------------------------------------------------
# Effects
def effect_mandelbrot(size, extent, quality):
"""
Generate a Mandelbrot set covering the given extent.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param extent: The extent to cover, as a 4-tuple:
(x0, y0, x1, y2).
:param quality: Quality.
"""
return Image()._new(core.effect_mandelbrot(size, extent, quality))
def effect_noise(size, sigma):
"""
Generate Gaussian noise centered around 128.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param sigma: Standard deviation of noise.
"""
return Image()._new(core.effect_noise(size, sigma))
def linear_gradient(mode):
"""
Generate 256x256 linear gradient from black to white, top to bottom.
:param mode: Input mode.
"""
return Image()._new(core.linear_gradient(mode))
def radial_gradient(mode):
"""
Generate 256x256 radial gradient from black to white, centre to edge.
:param mode: Input mode.
"""
return Image()._new(core.radial_gradient(mode))
# --------------------------------------------------------------------
# Resources
def _apply_env_variables(env=None):
if env is None:
env = os.environ
for var_name, setter in [
('PILLOW_ALIGNMENT', core.set_alignment),
('PILLOW_BLOCK_SIZE', core.set_block_size),
('PILLOW_BLOCKS_MAX', core.set_blocks_max),
]:
if var_name not in env:
continue
var = env[var_name].lower()
units = 1
for postfix, mul in [('k', 1024), ('m', 1024*1024)]:
if var.endswith(postfix):
units = mul
var = var[:-len(postfix)]
try:
var = int(var) * units
except ValueError:
warnings.warn("{0} is not int".format(var_name))
continue
try:
setter(var)
except ValueError as e:
warnings.warn("{0}: {1}".format(var_name, e))
_apply_env_variables()
atexit.register(core.clear_cache)
|
isabernardes/Heriga
|
Herigaenv/lib/python2.7/site-packages/PIL/Image.py
|
Python
|
mit
| 95,395
|
[
"Gaussian"
] |
47764c8c51bdae16d8bb45b08996be2650d4dd624e0d1ab2f3545a97be41884c
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Eli
#
# Created: 06/04/2014
# Copyright: (c) Eli 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
import sys
import glob
#This script filters multiple data files by id's listed one per line in another file
ids = open("C:/users/eli/desktop/otx_targets/heidi_otx_targets_clone.txt", "r")
idlist1 = []
for line in ids:
idlist1.append(line.split('\t')[0].split('\n')[1])
files = glob.glob("C:/users/eli/desktop/otx_targets/limma_lfc/*percentile..txt")
for f in files:
f_in = open(f)
filename = f.split("\\")[-1]
file_out = open('C:/RNAseq/mir_targets/de_mirs/%s_JGI.txt' %filename, 'a')
idlist2 = []
file_out.write(data.readline()) #copy header row
for line in f_in:
name = line.strip('\n').split('\t')[0].strip('"')
#name = name.split('|')[3].split('.')[0] # for first ID from BLAST target
variables = line.strip('\n').split('\t')[2:]
#idlist2[name] = line.split('\t')[1]
descr = line.strip('\n').split('\t')[1]
f_in.close
#else:
# pass
#print len(idlist1)
#print len(idlist2)
#print idlist2
#for item in idlist1:
# print item
#cross check input and output lists
#idlist3= []
#for thing in idlist1:
# if thing in idlist2:
# pass
# else:
# idlist3.append(thing)
#print len(idlist3)
|
ejspina/Gene_expression_tools
|
Python/FilterByID_batch_oneID.py
|
Python
|
gpl-2.0
| 1,685
|
[
"BLAST"
] |
326c24fa04f6a3cdf0b3469c9c871247509b0f321d0e46f5855f9253d27e05b1
|
"""
=======================================
Calculate number of Si in a LAMOST pixel
=======================================
*By: Jianrong Deng 20170517
-------------------
"""
##############################################################################
# LAMOST pixel size 12 um x 12 um
# pixel thickness, not sure, assume 200 um for now JD--TODO
# rho_Si = 2.33 g/cm^3
# 1 mol of 28Si = 28 gram
# A_Si = 28
# N_A = 6.22 * 10^23 mol^{-1}: Avogadro constant
pixel_size = 12 * 12 # in um^2
pixel_thick = 200 # in um
rho_Si = 2.33 # in g/cm^3
N_A = 6.02 * pow(10,23) # Avogadro constant
A_Si = 28
pixel_weight = pixel_size * pixel_thick * pow(10, -4*3) * rho_Si # 1um = 10^-4 cm
pixel_N_Si = N_A / A_Si * pixel_weight
print('number of Si atoms in one pixel:', "{:E}".format(pixel_N_Si))
|
jianrongdeng/LAMOST
|
ana/scripts/pixel_number-of-Si.py
|
Python
|
gpl-3.0
| 814
|
[
"Avogadro"
] |
7905aece94a220740acc60d44949e03f0b2169224ba4e4cc9b09c2919ce706a0
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1beta1.services.dataset_service import (
DatasetServiceAsyncClient,
)
from google.cloud.aiplatform_v1beta1.services.dataset_service import (
DatasetServiceClient,
)
from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers
from google.cloud.aiplatform_v1beta1.services.dataset_service import transports
from google.cloud.aiplatform_v1beta1.types import annotation
from google.cloud.aiplatform_v1beta1.types import annotation_spec
from google.cloud.aiplatform_v1beta1.types import data_item
from google.cloud.aiplatform_v1beta1.types import dataset
from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset
from google.cloud.aiplatform_v1beta1.types import dataset_service
from google.cloud.aiplatform_v1beta1.types import encryption_spec
from google.cloud.aiplatform_v1beta1.types import io
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DatasetServiceClient._get_default_mtls_endpoint(None) is None
assert (
DatasetServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [DatasetServiceClient, DatasetServiceAsyncClient,]
)
def test_dataset_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.DatasetServiceGrpcTransport, "grpc"),
(transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_dataset_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [DatasetServiceClient, DatasetServiceAsyncClient,]
)
def test_dataset_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_dataset_service_client_get_transport_class():
transport = DatasetServiceClient.get_transport_class()
available_transports = [
transports.DatasetServiceGrpcTransport,
]
assert transport in available_transports
transport = DatasetServiceClient.get_transport_class("grpc")
assert transport == transports.DatasetServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DatasetServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceClient),
)
@mock.patch.object(
DatasetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceAsyncClient),
)
def test_dataset_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DatasetServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DatasetServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DatasetServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceClient),
)
@mock.patch.object(
DatasetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_dataset_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [DatasetServiceClient, DatasetServiceAsyncClient]
)
@mock.patch.object(
DatasetServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceClient),
)
@mock.patch.object(
DatasetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatasetServiceAsyncClient),
)
def test_dataset_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_dataset_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
DatasetServiceClient,
transports.DatasetServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_dataset_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_dataset_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DatasetServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
DatasetServiceClient,
transports.DatasetServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
DatasetServiceAsyncClient,
transports.DatasetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_dataset_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [dataset_service.CreateDatasetRequest, dict,])
def test_create_dataset(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
client.create_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.CreateDatasetRequest()
@pytest.mark.asyncio
async def test_create_dataset_async(
transport: str = "grpc_asyncio", request_type=dataset_service.CreateDatasetRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_dataset_async_from_dict():
await test_create_dataset_async(request_type=dict)
def test_create_dataset_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.CreateDatasetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_dataset_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.CreateDatasetRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_dataset_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_dataset(
parent="parent_value", dataset=gca_dataset.Dataset(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].dataset
mock_val = gca_dataset.Dataset(name="name_value")
assert arg == mock_val
def test_create_dataset_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_dataset(
dataset_service.CreateDatasetRequest(),
parent="parent_value",
dataset=gca_dataset.Dataset(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_dataset_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_dataset(
parent="parent_value", dataset=gca_dataset.Dataset(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].dataset
mock_val = gca_dataset.Dataset(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_dataset_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_dataset(
dataset_service.CreateDatasetRequest(),
parent="parent_value",
dataset=gca_dataset.Dataset(name="name_value"),
)
@pytest.mark.parametrize("request_type", [dataset_service.GetDatasetRequest, dict,])
def test_get_dataset(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset(
name="name_value",
display_name="display_name_value",
description="description_value",
metadata_schema_uri="metadata_schema_uri_value",
etag="etag_value",
)
response = client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.metadata_schema_uri == "metadata_schema_uri_value"
assert response.etag == "etag_value"
def test_get_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
client.get_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetDatasetRequest()
@pytest.mark.asyncio
async def test_get_dataset_async(
transport: str = "grpc_asyncio", request_type=dataset_service.GetDatasetRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.Dataset(
name="name_value",
display_name="display_name_value",
description="description_value",
metadata_schema_uri="metadata_schema_uri_value",
etag="etag_value",
)
)
response = await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.metadata_schema_uri == "metadata_schema_uri_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_dataset_async_from_dict():
await test_get_dataset_async(request_type=dict)
def test_get_dataset_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.GetDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = dataset.Dataset()
client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_dataset_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.GetDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
await client.get_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_dataset_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_dataset_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_dataset(
dataset_service.GetDatasetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_dataset_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_dataset_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_dataset(
dataset_service.GetDatasetRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [dataset_service.UpdateDatasetRequest, dict,])
def test_update_dataset(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset(
name="name_value",
display_name="display_name_value",
description="description_value",
metadata_schema_uri="metadata_schema_uri_value",
etag="etag_value",
)
response = client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.metadata_schema_uri == "metadata_schema_uri_value"
assert response.etag == "etag_value"
def test_update_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
client.update_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.UpdateDatasetRequest()
@pytest.mark.asyncio
async def test_update_dataset_async(
transport: str = "grpc_asyncio", request_type=dataset_service.UpdateDatasetRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_dataset.Dataset(
name="name_value",
display_name="display_name_value",
description="description_value",
metadata_schema_uri="metadata_schema_uri_value",
etag="etag_value",
)
)
response = await client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.metadata_schema_uri == "metadata_schema_uri_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_update_dataset_async_from_dict():
await test_update_dataset_async(request_type=dict)
def test_update_dataset_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.UpdateDatasetRequest()
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = gca_dataset.Dataset()
client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_dataset_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.UpdateDatasetRequest()
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
await client.update_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "dataset.name=dataset.name/value",) in kw[
"metadata"
]
def test_update_dataset_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_dataset(
dataset=gca_dataset.Dataset(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].dataset
mock_val = gca_dataset.Dataset(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_dataset_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_dataset(
dataset_service.UpdateDatasetRequest(),
dataset=gca_dataset.Dataset(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_dataset_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_dataset(
dataset=gca_dataset.Dataset(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].dataset
mock_val = gca_dataset.Dataset(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_dataset_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_dataset(
dataset_service.UpdateDatasetRequest(),
dataset=gca_dataset.Dataset(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [dataset_service.ListDatasetsRequest, dict,])
def test_list_datasets(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDatasetsResponse(
next_page_token="next_page_token_value",
)
response = client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_datasets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
client.list_datasets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDatasetsRequest()
@pytest.mark.asyncio
async def test_list_datasets_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ListDatasetsRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDatasetsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_datasets_async_from_dict():
await test_list_datasets_async(request_type=dict)
def test_list_datasets_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListDatasetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = dataset_service.ListDatasetsResponse()
client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_datasets_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListDatasetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDatasetsResponse()
)
await client.list_datasets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_datasets_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDatasetsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_datasets(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_datasets_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_datasets(
dataset_service.ListDatasetsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_datasets_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDatasetsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDatasetsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_datasets(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_datasets_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_datasets(
dataset_service.ListDatasetsRequest(), parent="parent_value",
)
def test_list_datasets_pager(transport_name: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
dataset_service.ListDatasetsResponse(datasets=[], next_page_token="def",),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_datasets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, dataset.Dataset) for i in results)
def test_list_datasets_pages(transport_name: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
dataset_service.ListDatasetsResponse(datasets=[], next_page_token="def",),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
pages = list(client.list_datasets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_datasets_async_pager():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
dataset_service.ListDatasetsResponse(datasets=[], next_page_token="def",),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
async_pager = await client.list_datasets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, dataset.Dataset) for i in responses)
@pytest.mark.asyncio
async def test_list_datasets_async_pages():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(), dataset.Dataset(),],
next_page_token="abc",
),
dataset_service.ListDatasetsResponse(datasets=[], next_page_token="def",),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(),], next_page_token="ghi",
),
dataset_service.ListDatasetsResponse(
datasets=[dataset.Dataset(), dataset.Dataset(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_datasets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [dataset_service.DeleteDatasetRequest, dict,])
def test_delete_dataset(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_dataset_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
client.delete_dataset()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.DeleteDatasetRequest()
@pytest.mark.asyncio
async def test_delete_dataset_async(
transport: str = "grpc_asyncio", request_type=dataset_service.DeleteDatasetRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_dataset_async_from_dict():
await test_delete_dataset_async(request_type=dict)
def test_delete_dataset_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.DeleteDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_dataset_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.DeleteDatasetRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_dataset(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_dataset_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_dataset_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_dataset(
dataset_service.DeleteDatasetRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_dataset_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_dataset(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_dataset_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_dataset(
dataset_service.DeleteDatasetRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [dataset_service.ImportDataRequest, dict,])
def test_import_data(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ImportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_import_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
client.import_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ImportDataRequest()
@pytest.mark.asyncio
async def test_import_data_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ImportDataRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ImportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_import_data_async_from_dict():
await test_import_data_async(request_type=dict)
def test_import_data_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ImportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_import_data_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ImportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.import_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_import_data_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.import_data(
name="name_value",
import_configs=[
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].import_configs
mock_val = [
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
]
assert arg == mock_val
def test_import_data_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.import_data(
dataset_service.ImportDataRequest(),
name="name_value",
import_configs=[
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
],
)
@pytest.mark.asyncio
async def test_import_data_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.import_data(
name="name_value",
import_configs=[
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].import_configs
mock_val = [
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
]
assert arg == mock_val
@pytest.mark.asyncio
async def test_import_data_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.import_data(
dataset_service.ImportDataRequest(),
name="name_value",
import_configs=[
dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=["uris_value"]))
],
)
@pytest.mark.parametrize("request_type", [dataset_service.ExportDataRequest, dict,])
def test_export_data(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ExportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_export_data_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
client.export_data()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ExportDataRequest()
@pytest.mark.asyncio
async def test_export_data_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ExportDataRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ExportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_export_data_async_from_dict():
await test_export_data_async(request_type=dict)
def test_export_data_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ExportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_export_data_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ExportDataRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.export_data(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_export_data_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_data(
name="name_value",
export_config=dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].export_config
mock_val = dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
assert arg == mock_val
def test_export_data_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.export_data(
dataset_service.ExportDataRequest(),
name="name_value",
export_config=dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
),
)
@pytest.mark.asyncio
async def test_export_data_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.export_data(
name="name_value",
export_config=dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].export_config
mock_val = dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_export_data_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.export_data(
dataset_service.ExportDataRequest(),
name="name_value",
export_config=dataset.ExportDataConfig(
gcs_destination=io.GcsDestination(
output_uri_prefix="output_uri_prefix_value"
)
),
)
@pytest.mark.parametrize("request_type", [dataset_service.ListDataItemsRequest, dict,])
def test_list_data_items(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDataItemsResponse(
next_page_token="next_page_token_value",
)
response = client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDataItemsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataItemsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_data_items_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
client.list_data_items()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDataItemsRequest()
@pytest.mark.asyncio
async def test_list_data_items_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ListDataItemsRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDataItemsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListDataItemsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataItemsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_data_items_async_from_dict():
await test_list_data_items_async(request_type=dict)
def test_list_data_items_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListDataItemsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
call.return_value = dataset_service.ListDataItemsResponse()
client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_data_items_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListDataItemsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDataItemsResponse()
)
await client.list_data_items(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_data_items_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDataItemsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_data_items(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_data_items_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_data_items(
dataset_service.ListDataItemsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_data_items_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListDataItemsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListDataItemsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_data_items(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_data_items_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_data_items(
dataset_service.ListDataItemsRequest(), parent="parent_value",
)
def test_list_data_items_pager(transport_name: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDataItemsResponse(
data_items=[
data_item.DataItem(),
data_item.DataItem(),
data_item.DataItem(),
],
next_page_token="abc",
),
dataset_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(),], next_page_token="ghi",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(), data_item.DataItem(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_data_items(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, data_item.DataItem) for i in results)
def test_list_data_items_pages(transport_name: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_data_items), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDataItemsResponse(
data_items=[
data_item.DataItem(),
data_item.DataItem(),
data_item.DataItem(),
],
next_page_token="abc",
),
dataset_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(),], next_page_token="ghi",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(), data_item.DataItem(),],
),
RuntimeError,
)
pages = list(client.list_data_items(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_data_items_async_pager():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_items), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDataItemsResponse(
data_items=[
data_item.DataItem(),
data_item.DataItem(),
data_item.DataItem(),
],
next_page_token="abc",
),
dataset_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(),], next_page_token="ghi",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(), data_item.DataItem(),],
),
RuntimeError,
)
async_pager = await client.list_data_items(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, data_item.DataItem) for i in responses)
@pytest.mark.asyncio
async def test_list_data_items_async_pages():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_items), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListDataItemsResponse(
data_items=[
data_item.DataItem(),
data_item.DataItem(),
data_item.DataItem(),
],
next_page_token="abc",
),
dataset_service.ListDataItemsResponse(
data_items=[], next_page_token="def",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(),], next_page_token="ghi",
),
dataset_service.ListDataItemsResponse(
data_items=[data_item.DataItem(), data_item.DataItem(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_data_items(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [dataset_service.GetAnnotationSpecRequest, dict,]
)
def test_get_annotation_spec(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec(
name="name_value", display_name="display_name_value", etag="etag_value",
)
response = client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec.AnnotationSpec)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
def test_get_annotation_spec_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
client.get_annotation_spec()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetAnnotationSpecRequest()
@pytest.mark.asyncio
async def test_get_annotation_spec_async(
transport: str = "grpc_asyncio",
request_type=dataset_service.GetAnnotationSpecRequest,
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec(
name="name_value", display_name="display_name_value", etag="etag_value",
)
)
response = await client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec.AnnotationSpec)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_annotation_spec_async_from_dict():
await test_get_annotation_spec_async(request_type=dict)
def test_get_annotation_spec_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.GetAnnotationSpecRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = annotation_spec.AnnotationSpec()
client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_annotation_spec_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.GetAnnotationSpecRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec()
)
await client.get_annotation_spec(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_annotation_spec_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_annotation_spec(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_annotation_spec_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_annotation_spec(
dataset_service.GetAnnotationSpecRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_annotation_spec_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_annotation_spec(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_annotation_spec_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_annotation_spec(
dataset_service.GetAnnotationSpecRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [dataset_service.ListAnnotationsRequest, dict,]
)
def test_list_annotations(request_type, transport: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListAnnotationsResponse(
next_page_token="next_page_token_value",
)
response = client.list_annotations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListAnnotationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnnotationsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_annotations_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
client.list_annotations()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListAnnotationsRequest()
@pytest.mark.asyncio
async def test_list_annotations_async(
transport: str = "grpc_asyncio", request_type=dataset_service.ListAnnotationsRequest
):
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListAnnotationsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_annotations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == dataset_service.ListAnnotationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAnnotationsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_annotations_async_from_dict():
await test_list_annotations_async(request_type=dict)
def test_list_annotations_field_headers():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListAnnotationsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
call.return_value = dataset_service.ListAnnotationsResponse()
client.list_annotations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_annotations_field_headers_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dataset_service.ListAnnotationsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListAnnotationsResponse()
)
await client.list_annotations(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_annotations_flattened():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListAnnotationsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_annotations(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_annotations_flattened_error():
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_annotations(
dataset_service.ListAnnotationsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_annotations_flattened_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset_service.ListAnnotationsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset_service.ListAnnotationsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_annotations(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_annotations_flattened_error_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_annotations(
dataset_service.ListAnnotationsRequest(), parent="parent_value",
)
def test_list_annotations_pager(transport_name: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListAnnotationsResponse(
annotations=[
annotation.Annotation(),
annotation.Annotation(),
annotation.Annotation(),
],
next_page_token="abc",
),
dataset_service.ListAnnotationsResponse(
annotations=[], next_page_token="def",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(),], next_page_token="ghi",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(), annotation.Annotation(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_annotations(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, annotation.Annotation) for i in results)
def test_list_annotations_pages(transport_name: str = "grpc"):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_annotations), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListAnnotationsResponse(
annotations=[
annotation.Annotation(),
annotation.Annotation(),
annotation.Annotation(),
],
next_page_token="abc",
),
dataset_service.ListAnnotationsResponse(
annotations=[], next_page_token="def",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(),], next_page_token="ghi",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(), annotation.Annotation(),],
),
RuntimeError,
)
pages = list(client.list_annotations(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_annotations_async_pager():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotations), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListAnnotationsResponse(
annotations=[
annotation.Annotation(),
annotation.Annotation(),
annotation.Annotation(),
],
next_page_token="abc",
),
dataset_service.ListAnnotationsResponse(
annotations=[], next_page_token="def",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(),], next_page_token="ghi",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(), annotation.Annotation(),],
),
RuntimeError,
)
async_pager = await client.list_annotations(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, annotation.Annotation) for i in responses)
@pytest.mark.asyncio
async def test_list_annotations_async_pages():
client = DatasetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_annotations), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
dataset_service.ListAnnotationsResponse(
annotations=[
annotation.Annotation(),
annotation.Annotation(),
annotation.Annotation(),
],
next_page_token="abc",
),
dataset_service.ListAnnotationsResponse(
annotations=[], next_page_token="def",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(),], next_page_token="ghi",
),
dataset_service.ListAnnotationsResponse(
annotations=[annotation.Annotation(), annotation.Annotation(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_annotations(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatasetServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DatasetServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DatasetServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatasetServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DatasetServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DatasetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DatasetServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DatasetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.DatasetServiceGrpcTransport,)
def test_dataset_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DatasetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_dataset_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DatasetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_dataset",
"get_dataset",
"update_dataset",
"list_datasets",
"delete_dataset",
"import_data",
"export_data",
"list_data_items",
"get_annotation_spec",
"list_annotations",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_dataset_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DatasetServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_dataset_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DatasetServiceTransport()
adc.assert_called_once()
def test_dataset_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DatasetServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
def test_dataset_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DatasetServiceGrpcTransport, grpc_helpers),
(transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_dataset_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
def test_dataset_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_dataset_service_host_no_port():
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_dataset_service_host_with_port():
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_dataset_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DatasetServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_dataset_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DatasetServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
def test_dataset_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.DatasetServiceGrpcTransport,
transports.DatasetServiceGrpcAsyncIOTransport,
],
)
def test_dataset_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_dataset_service_grpc_lro_client():
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_dataset_service_grpc_lro_async_client():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_annotation_path():
project = "squid"
location = "clam"
dataset = "whelk"
data_item = "octopus"
annotation = "oyster"
expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(
project=project,
location=location,
dataset=dataset,
data_item=data_item,
annotation=annotation,
)
actual = DatasetServiceClient.annotation_path(
project, location, dataset, data_item, annotation
)
assert expected == actual
def test_parse_annotation_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"dataset": "mussel",
"data_item": "winkle",
"annotation": "nautilus",
}
path = DatasetServiceClient.annotation_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_annotation_path(path)
assert expected == actual
def test_annotation_spec_path():
project = "scallop"
location = "abalone"
dataset = "squid"
annotation_spec = "clam"
expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
project=project,
location=location,
dataset=dataset,
annotation_spec=annotation_spec,
)
actual = DatasetServiceClient.annotation_spec_path(
project, location, dataset, annotation_spec
)
assert expected == actual
def test_parse_annotation_spec_path():
expected = {
"project": "whelk",
"location": "octopus",
"dataset": "oyster",
"annotation_spec": "nudibranch",
}
path = DatasetServiceClient.annotation_spec_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_annotation_spec_path(path)
assert expected == actual
def test_data_item_path():
project = "cuttlefish"
location = "mussel"
dataset = "winkle"
data_item = "nautilus"
expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(
project=project, location=location, dataset=dataset, data_item=data_item,
)
actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item)
assert expected == actual
def test_parse_data_item_path():
expected = {
"project": "scallop",
"location": "abalone",
"dataset": "squid",
"data_item": "clam",
}
path = DatasetServiceClient.data_item_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_data_item_path(path)
assert expected == actual
def test_dataset_path():
project = "whelk"
location = "octopus"
dataset = "oyster"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
actual = DatasetServiceClient.dataset_path(project, location, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"dataset": "mussel",
}
path = DatasetServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_dataset_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DatasetServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = DatasetServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = DatasetServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = DatasetServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = DatasetServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = DatasetServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = DatasetServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = DatasetServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DatasetServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = DatasetServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DatasetServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DatasetServiceTransport, "_prep_wrapped_messages"
) as prep:
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DatasetServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DatasetServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = DatasetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = DatasetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(DatasetServiceClient, transports.DatasetServiceGrpcTransport),
(DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-aiplatform
|
tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py
|
Python
|
apache-2.0
| 154,357
|
[
"Octopus"
] |
6fe2fe9d7eccc976a7ccd2f8213b26289f4a983445339f16015dfccdf572d22f
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Model a single layer in a nueral network.
These classes deal with a layers in the neural network (ie. the input layer,
hidden layers and the output layer).
"""
# standard library
import math
import random
from Bio._py3k import range
def logistic_function(value):
"""Transform the value with the logistic function.
XXX This is in the wrong place -- I need to find a place to put it
that makes sense.
"""
return 1.0 / (1.0 + math.exp(-value))
class AbstractLayer(object):
"""Abstract base class for all layers.
"""
def __init__(self, num_nodes, has_bias_node):
"""Initialize the layer.
Arguments:
o num_nodes -- The number of nodes that are contained in this layer.
o has_bias_node -- Specify whether or not this node has a bias
node. This node is not included in the number of nodes in the network,
but is used in constructing and dealing with the network.
"""
# specify all of the nodes in the network
if has_bias_node:
lower_range = 0
else:
lower_range = 1
self.nodes = list(range(lower_range, num_nodes + 1))
self.weights = {}
def __str__(self):
"""Debugging output.
"""
return "weights: %s" % self.weights
def set_weight(self, this_node, next_node, value):
"""Set a weight value from one node to the next.
If weights are not explicitly set, they will be initialized to
random values to start with.
"""
if (this_node, next_node) not in self.weights:
raise ValueError("Invalid node values passed.")
self.weights[(this_node, next_node)] = value
class InputLayer(AbstractLayer):
def __init__(self, num_nodes, next_layer):
"""Initialize the input layer.
Arguments:
o num_nodes -- The number of nodes in the input layer.
o next_layer -- The next layer in the neural network this is
connected to.
"""
AbstractLayer.__init__(self, num_nodes, 1)
self._next_layer = next_layer
# set up the weights
self.weights = {}
for own_node in self.nodes:
for other_node in self._next_layer.nodes:
self.weights[(own_node, other_node)] = \
random.randrange(-2.0, 2.0)
# set up the weight changes
self.weight_changes = {}
for own_node in self.nodes:
for other_node in self._next_layer.nodes:
self.weight_changes[(own_node, other_node)] = 0.0
# set up the calculated values for each node -- these will
# actually just be set from inputs into the network.
self.values = {}
for node in self.nodes:
# set the bias node -- always has a value of 1
if node == 0:
self.values[0] = 1
else:
self.values[node] = 0
def update(self, inputs):
"""Update the values of the nodes using given inputs.
Arguments:
o inputs -- A list of inputs into the network -- this must be
equal to the number of nodes in the layer.
"""
if len(inputs) != len(self.values) - 1:
raise ValueError("Inputs do not match input layer nodes.")
# set the node values from the inputs
for input_num in range(len(inputs)):
self.values[input_num + 1] = inputs[input_num]
# propagate the update to the next layer
self._next_layer.update(self)
def backpropagate(self, outputs, learning_rate, momentum):
"""Recalculate all weights based on the last round of prediction.
Arguments:
o learning_rate -- The learning rate of the network
o momentum - The amount of weight to place on the previous weight
change.
o outputs - The output info we are using to calculate error.
"""
# first backpropagate to the next layers
next_errors = self._next_layer.backpropagate(outputs, learning_rate,
momentum)
for this_node in self.nodes:
for next_node in self._next_layer.nodes:
error_deriv = (next_errors[next_node] *
self.values[this_node])
delta = (learning_rate * error_deriv +
momentum * self.weight_changes[(this_node, next_node)])
# apply the change to the weight
self.weights[(this_node, next_node)] += delta
# remember the weight change for next time
self.weight_changes[(this_node, next_node)] = delta
class HiddenLayer(AbstractLayer):
def __init__(self, num_nodes, next_layer, activation=logistic_function):
"""Initialize a hidden layer.
Arguments:
o num_nodes -- The number of nodes in this hidden layer.
o next_layer -- The next layer in the neural network that this
is connected to.
o activation -- The transformation function used to transform
predicted values.
"""
AbstractLayer.__init__(self, num_nodes, 1)
self._next_layer = next_layer
self._activation = activation
# set up the weights
self.weights = {}
for own_node in self.nodes:
for other_node in self._next_layer.nodes:
self.weights[(own_node, other_node)] = \
random.randrange(-2.0, 2.0)
# set up the weight changes
self.weight_changes = {}
for own_node in self.nodes:
for other_node in self._next_layer.nodes:
self.weight_changes[(own_node, other_node)] = 0.0
# set up the calculated values for each node
self.values = {}
for node in self.nodes:
# bias node
if node == 0:
self.values[node] = 1
else:
self.values[node] = 0
def update(self, previous_layer):
"""Update the values of nodes from the previous layer info.
Arguments:
o previous_layer -- The previous layer in the network.
"""
# update each node in this network
for update_node in self.nodes[1:]:
# sum up the weighted inputs from the previous network
sum = 0.0
for node in previous_layer.nodes:
sum += (previous_layer.values[node] *
previous_layer.weights[(node, update_node)])
self.values[update_node] = self._activation(sum)
# propagate the update to the next layer
self._next_layer.update(self)
def backpropagate(self, outputs, learning_rate, momentum):
"""Recalculate all weights based on the last round of prediction.
Arguments:
o learning_rate -- The learning rate of the network
o momentum - The amount of weight to place on the previous weight
change.
o outputs - The output values we are using to see how good our
network is at predicting things.
"""
# first backpropagate to the next layers
next_errors = self._next_layer.backpropagate(outputs, learning_rate,
momentum)
# --- update the weights
for this_node in self.nodes:
for next_node in self._next_layer.nodes:
error_deriv = (next_errors[next_node] *
self.values[this_node])
delta = (learning_rate * error_deriv +
momentum * self.weight_changes[(this_node, next_node)])
# apply the change to the weight
self.weights[(this_node, next_node)] += delta
# remember the weight change for next time
self.weight_changes[(this_node, next_node)] = delta
# --- calculate error terms
errors = {}
for error_node in self.nodes:
# get the error info propagated from the next layer
previous_error = 0.0
for next_node in self._next_layer.nodes:
previous_error += (next_errors[next_node] *
self.weights[(error_node, next_node)])
# get the correction factor
corr_factor = (self.values[error_node] *
(1 - self.values[error_node]))
# calculate the error
errors[error_node] = previous_error * corr_factor
return errors
class OutputLayer(AbstractLayer):
def __init__(self, num_nodes, activation=logistic_function):
"""Initialize the Output Layer.
Arguments:
o num_nodes -- The number of nodes in this layer. This corresponds
to the number of outputs in the neural network.
o activation -- The transformation function used to transform
predicted values.
"""
AbstractLayer.__init__(self, num_nodes, 0)
self._activation = activation
self.values = {}
for node in self.nodes:
self.values[node] = 0
def update(self, previous_layer):
"""Update the value of output nodes from the previous layers.
Arguments:
o previous_layer -- The hidden layer preceding this.
"""
# update all of the nodes in this layer
for update_node in self.nodes:
# sum up the contribution from all of the previous inputs
sum = 0.0
for node in previous_layer.nodes:
sum += (previous_layer.values[node] *
previous_layer.weights[(node, update_node)])
self.values[update_node] = self._activation(sum)
def backpropagate(self, outputs, learning_rate, momentum):
"""Calculate the backpropagation error at a given node.
This calculates the error term using the formula:
p = (z - t) z (1 - z)
where z is the calculated value for the node, and t is the
real value.
Arguments:
o outputs - The list of output values we use to calculate the
errors in our predictions.
"""
errors = {}
for node in self.nodes:
calculated_value = self.values[node]
real_value = outputs[node - 1]
errors[node] = ((real_value - calculated_value) *
calculated_value *
(1 - calculated_value))
return errors
def get_error(self, real_value, node_number):
"""Return the error value at a particular node.
"""
predicted_value = self.values[node_number]
return 0.5 * math.pow((real_value - predicted_value), 2)
def set_weight(self, this_node, next_node, value):
raise NotImplementedError("Can't set weights for the output layer")
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/NeuralNetwork/BackPropagation/Layer.py
|
Python
|
gpl-2.0
| 11,144
|
[
"Biopython"
] |
09d3b748e20726a7bfef95a0c94dd445ac7b986fdabc520450209363712e6e7c
|
import tensorflow as tf
from .template import BaseLayer
class Dropout(BaseLayer):
@BaseLayer.init_name_scope
def __init__(self, dropout_below=0.5, noise_shape=None):
'''
Args:
dropout_below(float): probability of the inputs from the layer below
been masked out
noise_shape (list or tuple): shape of the noise: example [-1, 2, -1] which applies
noise to the second dimension only
'''
self.dropout_below = dropout_below
self.noise_shape = noise_shape
def _test_fprop(self, state_below):
"""
Description:
Since input is already scaled up during training, therefore during
testing, we don't need to scale the inputs again
"""
return state_below
def _train_fprop(self, state_below):
"""
Description:
Applies dropout to the layer during training with probability keep_prob,
outputs the input element scaled up by 1 / keep_prob
Args:
keep_prob: probability of keeping the neuron active
"""
if self.noise_shape is not None:
assert len(state_below.get_shape()) == len(self.noise_shape)
noise_shape = []
for i, v in enumerate(self.noise_shape):
if v == -1 or v is None:
noise_shape.append(tf.shape(state_below)[i])
else:
noise_shape.append(v)
self.noise_shape = noise_shape
return tf.nn.dropout(state_below, keep_prob=1-self.dropout_below,
noise_shape=self.noise_shape)
|
hycis/TensorGraph
|
tensorgraph/layers/noise.py
|
Python
|
apache-2.0
| 1,668
|
[
"NEURON"
] |
b935333a76a1a3d72fde96096a56911145f8b00c286a0203f8040539404a6cb4
|
# -*- coding: utf-8 -*-
# note that internally 'yes'/'no' are converted to True/False; and so one can use 'yes'/'no'
# but definitely *do not* use 'True'/'False' since they are not the same as True/False...
# CSSSTYLESHEET presupposes './server/css' as part of its path; i.e. you
# will want to put custom CSS in the same directory as the
# default installed css ('hipparchiastyles.css')
#
# SUPPRESSCOLORS if set to True will set all colors to black in the CSS.
#
# HOSTEDFONTFAMILY sets which of the font families that Hipparchia downloads upon
# installation will be served to clients. Any of them should ensure full
# coverage for both Greek and Latin without any need to have a special font
# installed at the system level by people who visit. HipparchiaThirdPartySoftware contains more
# hostable fonts inside the 'extra_fonts' directory. Install the TTF files into
# ~/hipparchia_venv/HipparchiaServer/server/static/ttf
#
# USEFONTFILESFORSTYLES will use something like Roboto-BoldItalic.ttf instead of
# using CSS commands like font-style: italic; + font-weight: bold;
#
# DEFAULTLOCALFONT sets the global font. A generic unicode font with good coverage
# is what you want to pick. You are also responsible for getting the name
# right. And, most importantly, it should be installed at the system-level for
# anyone who visits.
#
# DEFAULTLOCALGREEKFONT is meaningful only if DISTINCTGREEKANDLATINFONTS is True.
# In that case Greek characters will display in this font
#
# DEFAULTLOCALNONGREEKFONT is meaningful only if DISTINCTGREEKANDLATINFONTS is True.
# In that case all non-Greek characters will display in this font.
#
# ENBALEFONTPICKER allows you to select fonts from the web interface; but see notes on
# FONTPICKERLIST before enabling this. Anything other than True disables this option.
# Fontpicking is not working properly on gunicorn installations ATM; just go for the notostyles.css option
# as per the installation instructions and the CSS notes
#
# FONTPICKERLIST is a list of fonts to choose from. These can EITHER be served OR local to the client.
# The item set here alters DEFAULTLOCALFONT in the CSS. To avoid problems the list should contain
# only HOSTEDFONTS or you know you have installed. Also, remote users can get in trouble here:
# if they choose 'GFSOrpheusSans' what are the chances that it is already installed on their system?
#
CSSSTYLESHEET = 'hipparchiastyles.css'
DISTINCTGREEKANDLATINFONTS = False
SUPPRESSCOLORS = False
HOSTEDFONTFAMILY = 'Noto' # Noto should be pre-installed by Hipparchia; see above about adding more
USEFONTFILESFORSTYLES = True # Only valid if you are using a HOSTEDFONTFAMILY
DEFAULTLOCALFONT = 'yourfonthere_otherwise_fallbacktohipparchiahostedfonts' # Arial is often present and it is very good
DEFAULTLOCALGREEKFONT = 'yourfonthere_otherwise_fallbacktohipparchiahostedfonts'
DEFAULTLOCALNONGREEKFONT = 'yourfonthere_otherwise_fallbacktohipparchiahostedfonts'
ENBALEFONTPICKER = False
FONTPICKERLIST = ['Noto', 'CMUSans', 'CMUSerif', 'DejaVuSans', 'DejaVuSerif', 'EBGaramond',
'Fira', 'IBMPlex', 'Roboto', 'Ubuntu'] # see above about editing this list
|
e-gun/HipparchiaServer
|
server/sample_settings/htmlandcssstylesettings.py
|
Python
|
gpl-3.0
| 3,185
|
[
"VisIt"
] |
4a148461f4335ac3e0a07ae141fbc814876bf202c798eb8041263a62d45568c4
|
#!/usr/bin/env python
""" Convert BLOSUM62 from log odds to a probability matrix
This script is used to generate ``hw2/blosum62.py`` by calling::
python convert_blosum.py > hw2/blosum62.py
BLOSUM62 uses the log odds of the probability of an amino acid substitution
to align protien sequences. The CPASS similarity metric I'm using wants the
normalized substitution probabilities, so this script converts the scores
backwards into probabilities.
Henikoff, S., and Henikoff, J.G. (1992). Amino acid substitution matrices from
protein blocks. Proceedings of the National Academy of Sciences 89,
10915–10919.
BLOSUM62 matrix at `data/blosum62.txt` from
`NIH BLAST <ftp://ftp.ncbi.nih.gov/blast/matrices/BLOSUM62>`_
"""
import pathlib
BLOSUM62_FILE = pathlib.Path('data/blosum62.txt')
AA_THREE_LETTERS = {
'A': 'ala',
'R': 'arg',
'N': 'asn',
'D': 'asp',
'B': 'asx',
'C': 'cys',
'E': 'glu',
'Q': 'gln',
'G': 'gly',
'H': 'his',
'I': 'ile',
'L': 'leu',
'K': 'lys',
'M': 'met',
'F': 'phe',
'P': 'pro',
'S': 'ser',
'T': 'thr',
'W': 'trp',
'Y': 'tyr',
'V': 'val',
'X': '*', # Extra codes, unused by this algorithm
'Z': '*',
'*': '*',
}
blosum62 = {}
with BLOSUM62_FILE.open('rt') as fp:
# Skip through the commented out header
for line in fp:
if line.strip().startswith('#'):
continue
break
header = line.split(' ')
header = [h.strip() for h in header]
header = [h for h in header if h != '']
header = [AA_THREE_LETTERS[h].upper() for h in header]
for line in fp:
line = [l.strip() for l in line.split(' ')]
line = [l for l in line if l != '']
# Convert scores to int
key, *scores = line
key = AA_THREE_LETTERS[key].upper()
if key == '*':
continue
# BLOSUM62 log odds is 2 * log_base2(p_ij / (q_i * q_j))
scores = [2**(float(s)/2.0) for s in scores]
assert len(scores) == len(header)
for h, s in zip(header, scores):
if h == '*':
continue
blosum62[(h, key)] = s
# Sanity check
# Assert that the matrix is symmetric
differences = []
for aa1, aa2 in blosum62:
assert blosum62[aa1, aa2] == blosum62[aa2, aa1]
print('BLOSUM62 = {')
for aa1, aa2 in sorted(blosum62):
print(' ("{}", "{}"): {},'.format(aa1, aa2, blosum62[aa1, aa2]))
print('}')
|
david-joy/bmi203-hw2
|
convert_blosum.py
|
Python
|
apache-2.0
| 2,448
|
[
"BLAST"
] |
e703f2c3fccddd5bfc8171b5018bb030f81c368b389d9e19479c4c38131aa775
|
""" Test Gaussian estimator. """
import numpy as np
import unittest
from .. import gaussian
class TestShortHash(unittest.TestCase):
def test_gaussian_init(self):
data = np.random.normal(3, 0.5, size=10)
estimator = gaussian.GaussianEstimator(data)
class TestGaussianEstimator(unittest.TestCase):
def test_repr(self):
N = 5
y = np.random.normal(5.2, 0.4, size=N)
yerr = np.abs(np.random.normal(0, 0.1, size=N))
model = gaussian.GaussianEstimator(y=y, yerr=yerr)
print(model)
def test_attributes(self):
N = 5
y = np.random.normal(5.2, 0.4, size=N)
yerr = np.abs(np.random.normal(0, 0.1, size=N))
model = gaussian.GaussianEstimator(y=y, yerr=yerr)
model.data
model.quantum
model.message_length
model.dimensions
model.parameter_names
model.log_prior
model.log_fisher
model.log_likelihood
def test_negative_error(self):
with self.assertRaises(ValueError):
gaussian.GaussianEstimator(y=[0], yerr=[-1])
def test_data_shape_mismatch(self):
with self.assertRaises(ValueError):
gaussian.GaussianEstimator(y=[0], yerr=[0.1, 0.1])
with self.assertRaises(ValueError):
gaussian.GaussianEstimator(y=[1, 1], yerr=0.1)
def test_proper_prior_on_mean(self):
with self.assertRaises(ValueError):
gaussian.GaussianEstimator(y=[1], mean_bounds=(None, 3))
with self.assertRaises(ValueError):
gaussian.GaussianEstimator(y=[1], mean_bounds=(3, None))
self.assertIsNotNone(
gaussian.GaussianEstimator(y=[1], mean_bounds=(None, None)))
self.assertIsNotNone(
gaussian.GaussianEstimator(y=[1], mean_bounds=None))
with self.assertRaises(ValueError):
gaussian.GaussianEstimator(y=[1], mean_bounds=[3])
with self.assertRaises(ValueError):
gaussian.GaussianEstimator(y=[1], mean_bounds=[1,2,3])
bounds = [5, 1]
model = gaussian.GaussianEstimator(y=[2], mean_bounds=bounds)
self.assertEqual(model.bounds[0][0], bounds[1])
def test_optimization(self):
N = 5
y = np.random.normal(5.2, 0.4, size=N)
yerr = np.abs(np.random.normal(0, 0.1, size=N))
model = gaussian.GaussianEstimator(y=y, yerr=yerr)
before = model.message_length
model.optimize()
after = model.message_length
self.assertTrue(after <= before)
# Ensure we hit the warning flag.
model.optimize(maxiter=0, maxfun=0, factr=10, pgtol=1e-30)
def test_prior_on_mean(self):
N = 5
y = np.random.normal(5.2, 0.4, size=N)
yerr = np.abs(np.random.normal(0, 0.1, size=N))
without_prior = gaussian.GaussianEstimator(y=y, yerr=yerr)
without_prior.optimize()
with_prior = gaussian.GaussianEstimator(
y=y, yerr=yerr, mean_bounds=[1, 10])
with_prior.optimize()
print("with", with_prior.message_length)
print("without", without_prior.message_length)
#self.assertTrue(
# with_prior.message_length > without_prior.message_length)
print("TODO CHECK PRIOR ON MEAN")
def test_prior_on_sigma(self):
N = 5
y = np.random.normal(5.2, 0.4, size=N)
yerr = np.abs(np.random.normal(0, 0.1, size=N))
model = gaussian.GaussianEstimator(
y=y, yerr=yerr, sigma_upper_bound=10)
model.optimize()
|
andycasey/snob
|
snob/tests/test_gaussian.py
|
Python
|
mit
| 3,591
|
[
"Gaussian"
] |
aa05537ef4f710950efa396fa0d922138bf4e7c23e55f75645eaa5d20c2c85b4
|
"""
ioHub
.. file: ioHub/examples/ioHubAccessDelayTest/run.py
Copyright (C) 2012 Sol Simpson
Distributed under the terms of the GNU General Public License (GPL version 3 or any later version).
.. moduleauthor:: Sol Simpson <sol@isolver-software.com> + contributors, please see credits section of documentation.
.. fileauthor:: Sol Simpson <sol@isolver-software.com>
------------------------------------------------------------------------------------------------------------------------
ioHubAccessDelayTest
++++++++++++++++++++
Overview:
---------
This script is implemnted by extending the ioHub.psychopyIOHubRuntime.SimpleIOHubRuntime class to a class
called ExperimentRuntime. The ExperimentRuntime class provides a utility object to run a psychopy script and
also launches the ioHub server process so the script has access to the ioHub service and associated devices.
The program loads many configuration values for the experiment process by using the experiment_Config.yaml file that
is located in the same directory as this script. Configuration settings for the ioHub server process are defined in
the ioHub_configuration.yaml file.
The __main__ of this script file simply calls the start() function, which creates the ExperimentRuntime class instance,
calls the run() method for the instance which is what contains the actual 'program / experiment execution code' ,
and then when run() completes, closes the ioHubServer process and ends the local program.
Desciption:
-----------
The main purpose for the ioHubAccessDelayTest is to test the round trip time it takes to request and reveive events
from the I/O hub. Retrace intervals are also calculated and stored to monitor for skipped retraces.
A full screen Window is opened that shows some graphics, including a moving grating as well as a small gaussian
that is controlled by mouse events from the ioHub. At the top of the screen is an area that will display the last key
pressed on the keyboard.
The script runs for until 1000 getEvent() requests to the ioHub have returned with >= 1 event. A number near the
bottom of the screen displays the number of remaining successful getEvent calls before the experiment will end.
By default the script also sends an Experiment MessageEvent to the ioHub on each retrace. This message is stored
in the ioHub datafile, but is also sent back as an ioHub MessageEvent to the experiment. Therefore, the getEvent()
request counter shown on the screen will decrease even if you do not move your mouse or keyboard,
as the MessageEvents are retrieved from the ioHub Server.
At the end of the test, plots are displayed showing the getEvent() round trip delays in a histogram,
the retrace intervals as a fucntion of time, and the retrace intervals in a histogram. All times in the plots are
in msec.
To Run:
-------
1. Ensure you have followed the ioHub installation instructions at http://www.github.com/isolver/iohub
2. Edit the experiment_config.yaml file that is in the same directory as the run.py script you will be starting. See the
comments at the top of each config file regarding any paramemters that 'must' be changed for the program to run.
In this example, nothing 'must' be changed.
3. Open a command prompt to the directory containing this file.
4. Start the test program by running:
python.exe run.py
Any issues or questions, please let me know.
Notes:
------
If you get high MAX delays, turn off cloud drive apps, especially Google Drive; that fixes it for me.
If you are getting dropped frames, try commenting out the text stim that changes based on the number of getEvents()
left to call. It seems that resetting text on a text stim is a 'very' expensive operation.
"""
from __builtin__ import len, isinstance, dict, float, sum, int, unicode
from exceptions import Exception
import time
import ioHub
from ioHub.psychopyIOHubRuntime import SimpleIOHubRuntime, core, visual
from numpy import zeros
class ExperimentRuntime(SimpleIOHubRuntime):
def __init__(self,configFileDirectory, configFile):
SimpleIOHubRuntime.__init__(self,configFileDirectory,configFile)
self.initAttributes()
def initAttributes(self):
"""
"""
self.psychoStim = None
self.totalEventRequestsForTest=None
self.numEventRequests=0
self.totalEventRequestsForTest=None
self.psychoWindow=None
def run(self,*args,**kwargs):
"""
psychopy code is taken from an example psychopy script in the coder documentation.
"""
self.totalEventRequestsForTest=1000
#report process affinities
print "Current process affinities (experiment proc, ioHub proc):", self.getProcessAffinities()
print "ExperimentPCkeyboard methods:",self.hub.devices.kb.getRemoteMethodNames()
print "ExperimentPCmouse methods:",self.hub.devices.mouse.getRemoteMethodNames()
print "ExperimentRuntime methods:",self.hub.devices.experimentRuntime.getRemoteMethodNames()
print "ParallelPort methods:",self.hub.devices.parallelPort.getRemoteMethodNames()
self.hub.devices.mouse.setPosition((0.0,0.0))
# create fullscreen pyglet window at current resolution, as well as required resources / drawings
self.createPsychoGraphicsWindow()
# create stats numpy arrays, set experiment process and ioHubServer to high priority.
self.initTestResourcesAndState()
#draw and flip to the updated graphics state.
self.drawAndFlipPsychoWindow()
# START TEST LOOP >>>>>>>>>>>>>>>>>>>>>>>>>>
while self.numEventRequests < self.totalEventRequestsForTest:
# try sending an Experiment Event
self.hub.sendMessageEvent("This is a test message %.3f"%self.flipTime)
#draw and flip to the updated graphics state.
ifi=self.drawAndFlipPsychoWindow()
events,callDuration=self.checkForEvents()
if events:
# events were available
self.updateStats(events, callDuration, ifi)
# END TEST LOOP <<<<<<<<<<<<<<<<<<<<<<<<<<
# _close neccessary files / objects, 'disable high priority.
print "plot spinDownTest"
self.spinDownTest()
# plot collected delay and retrace detection results.
print "plot results"
self.plotResults()
def createPsychoGraphicsWindow(self):
#create a window
self.mouse=self.hub.devices.mouse
self.kb=self.hub.devices.kb
self.expRuntime=self.hub.devices.experimentRuntime
self.pport=self.hub.devices.parallelPort
self.display=self.hub.devices.display
self.hub.devices.display.getScreenResolution()
self.psychoWindow = visual.Window(self.display.getScreenResolution(),monitor="testMonitor", units=self.display.getDisplayCoordinateType(), fullscr=True, allowGUI=False)
currentPosition=self.mouse.setPosition((0,0))
print '###self.mouse.setVisibility:', self.mouse.setSysCursorVisibility(False)
self.instructionText2Pattern='%d'
self.psychoStim=ioHub.LastUpdatedOrderedDict()
self.psychoStim['grating'] = visual.PatchStim(self.psychoWindow, mask="circle", size=75,pos=[-100,0], sf=.075)
self.psychoStim['fixation'] =visual.PatchStim(self.psychoWindow, size=25, pos=[0,0], sf=0, color=[-1,-1,-1], colorSpace='rgb')
self.psychoStim['title'] =visual.TextStim(win=self.psychoWindow, text="ioHub getEvents Delay Test", pos = [0,125], height=36, color=[1,.5,0], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
self.psychoStim['instructions'] =visual.TextStim(win=self.psychoWindow, text='Move the mouse around, press keyboard keys and mouse buttons', pos = [0,-125], height=32, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
self.psychoStim['instructions2'] =visual.TextStim(win=self.psychoWindow, text=self.instructionText2Pattern%(self.totalEventRequestsForTest,), pos = [0,-250], color=[-1,-1,-1], height=32, colorSpace='rgb',alignHoriz='center',wrapWidth=800.0)
self.psychoStim['keytext'] =visual.TextStim(win=self.psychoWindow, text='key', pos = [0,300], height=48, color=[-1,-1,-1], colorSpace='rgb',alignHoriz='left',wrapWidth=800.0)
self.psychoStim['mouseDot'] =visual.GratingStim(win=self.psychoWindow,tex=None, mask="gauss", pos=currentPosition,size=(50,50),color='purple')
def drawAndFlipPsychoWindow(self):
self.psychoStim['grating'].setPhase(0.05, '+')#advance phase by 0.05 of a cycle
currentPosition=self.mouse.getPosition()
#print "Current Position:",currentPosition
self.psychoStim['mouseDot'].setPos(currentPosition)
[self.psychoStim[stimName].draw() for stimName in self.psychoStim]
self.flipTime=self.psychoWindow.flip()
d=self.flipTime-self.lastFlipTime
self.lastFlipTime=self.flipTime
return d
def checkForEvents(self):
# get the time we request events from the ioHub
stime=self.currentTime()
r = self.getEvents()
if r and len(r) > 0:
# so there were events returned in the request, so include this getEvent request in the tally
etime=self.currentTime()
dur=etime-stime
return r, dur*1000.0
return None,None
def initTestResourcesAndState(self):
if self.hub is None:
print "Error: ioHub must be enabled to run the testEventRetrievalTiming test."
return
# Init Results numpy array
self.results= zeros((self.totalEventRequestsForTest,3),dtype='f4')
self.numEventRequests=0
self.flipTime=0.0
self.lastFlipTime=0.0
# enable high priority mode for the experiment process and optionally the ioHub server process.
self.enableHighPriority()
# clear the ioHub event Buffer before starting the test.
# This is VERY IMPORTANT, given an existing bug in ioHub.
# You would want to do this before each trial started until the bug is fixed.
self.clearEvents()
def updateStats(self, events, duration, ifi):
self.results[self.numEventRequests][0]=duration # ctime it took to get events from ioHub
self.results[self.numEventRequests][1]=len(events) # number of events returned
self.results[self.numEventRequests][2]=ifi*1000.0 # calculating inter flip interval.
self.numEventRequests+=1 # incrementing tally counter
self.psychoStim['instructions2'].setText(self.instructionText2Pattern%(self.totalEventRequestsForTest-self.numEventRequests,))
for r in events:
if not isinstance(r,dict):
r=self._eventListToDict(r)
if r['event_type'] == ioHub.devices.EventConstants.EVENT_TYPES['KEYBOARD_PRESS']: #keypress code
keystring=r['key']
self.psychoStim['keytext'].setText(keystring)
def spinDownTest(self):
# OK, we have collected the number of requested getEvents, that have returned >0 events
# so _close psychopy window
self.psychoWindow.close()
# disable high priority in both processes
self.disableHighPriority()
def plotResults(self):
#### calculate stats on collected data and draw some plots ####
import matplotlib.mlab as mlab
from matplotlib.pyplot import axis, title, xlabel, hist, grid, show, ylabel, plot
import pylab
results= self.results
durations=results[:,0]
flips=results[1:,2]
dmin=durations.min()
dmax=durations.max()
dmean=durations.mean()
dstd=durations.std()
fmean=flips.mean()
fstd=flips.std()
pylab.figure(figsize=[30,10])
pylab.subplot(1,3,1)
# the histogram of the delay data
n, bins, patches = hist(durations, 50, normed=True, facecolor='blue', alpha=0.75)
# add a 'best fit' line
y = mlab.normpdf( bins, dmean, dstd)
plot(bins, y, 'r--', linewidth=1)
xlabel('ioHub getEvents Delay')
ylabel('Percentage')
title(
r'$\mathrm{{Histogram\ of\ Delay:}}\ \min={0:.3f},\ \max={1:.3f},\ \mu={2:.3f},\ \sigma={3:.4f}$'.format(
dmin, dmax, dmean, dstd))
axis([0, dmax+1.0, 0, 25.0])
grid(True)
# graphs of the retrace data ( taken from retrace example in psychopy demos folder)
intervalsMS = flips
m=fmean
sd=fstd
distString= "Mean={0:.1f}ms, s.d.={1:.1f}, 99%CI={2:.1f}-{3:.1f}".format(m, sd, m - 3 * sd, m + 3 * sd)
nTotal=len(intervalsMS)
nDropped=sum(intervalsMS>(1.5*m))
droppedString = "Dropped/Frames = {0:d}/{1:d} = {2:.3f}%".format(nDropped, nTotal, int(nDropped) / float(nTotal))
pylab.subplot(1,3,2)
#plot the frameintervals
pylab.plot(intervalsMS, '-')
pylab.ylabel('t (ms)')
pylab.xlabel('frame N')
pylab.title(droppedString)
pylab.subplot(1,3,3)
pylab.hist(intervalsMS, 50, normed=0, histtype='stepfilled')
pylab.xlabel('t (ms)')
pylab.ylabel('n frames')
pylab.title(distString)
show()
##################################################################
def main(configurationDirectory):
import sys
if len(sys.argv)>1:
configFile=unicode(sys.argv[1])
runtime=ExperimentRuntime(configurationDirectory, configFile)
else:
runtime=ExperimentRuntime(configurationDirectory, "experiment_config.yaml")
runtime.start()
if __name__ == "__main__":
configurationDirectory=ioHub.module_directory(main)
main(configurationDirectory)
|
peteristhegreat/ioHub
|
examples/ioHubAccessDelayTest/run.py
|
Python
|
gpl-3.0
| 13,760
|
[
"Gaussian"
] |
ce17680dcc7e4dde790ae9db6dcfedbe6a4bf6f6580b9f1d4cba33f5b89c073c
|
from __future__ import division
from . import cholesky_errors, mahalanobis, VariogramFourthRoot
from . import pivoted_cholesky
import numpy as np
from numpy.linalg import solve, cholesky
from scipy.linalg import cho_solve
import scipy.stats as stats
from statsmodels.sandbox.distributions.mv_normal import MVT
import seaborn as sns
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from cycler import cycler
from itertools import cycle
__all__ = ['Diagnostic', 'GraphicalDiagnostic']
class Diagnostic:
R"""A class for quickly testing model checking methods discussed in Bastos & O'Hagan.
This class is under construction and the implementation may change in the future.
Parameters
----------
mean : array, shape = (n_samples,)
The mean
cov : array, shape = (n_samples, n_samples)
The covariance
df : int, optional
The degrees of freedom. Defaults to `None`, which treats the distribution as Gaussian
random_state : int, optional
The random state for the random number generator
"""
def __init__(self, mean, cov, df=None, random_state=1):
self.mean = mean
self.cov = cov
self.sd = sd = np.sqrt(np.diag(cov))
if df is None:
# TODO: Handle when cov is ill-conditioned so multivariate_normal fails.
self.dist = stats.multivariate_normal(mean=mean, cov=cov)
# try:
# self.dist = stats.multivariate_normal(mean=mean, cov=cov)
# except np.linalg.LinAlgError:
# self.dist = None
self.udist = stats.norm(loc=mean, scale=sd)
self.std_udist = stats.norm(loc=0., scale=1.)
else:
sigma = cov * (df - 2) / df
self.dist = MVT(mean=mean, sigma=sigma, df=df)
self.udist = stats.t(loc=mean, scale=sd, df=df)
self.std_udist = stats.t(loc=0., scale=1., df=df)
self.dist.random_state = random_state
self.udist.random_state = random_state
self.std_udist.random_state = random_state
self._chol = cholesky(self.cov)
self._pchol = pivoted_cholesky(self.cov)
e, v = np.linalg.eigh(self.cov)
# To match Bastos and O'Hagan definition
# i.e., eigenvalues ordered from largest to smallest
e, v = e[::-1], v[:, ::-1]
ee = np.diag(np.sqrt(e))
self._eig = v @ ee
def samples(self, n):
R"""Sample random variables
Parameters
----------
n : int
The number of curves to sample
Returns
-------
array, shape = (n_samples, n_curves)
"""
return self.dist.rvs(n).T
def individual_errors(self, y):
R"""Computes the scaled individual errors diagnostic
.. math::
D_I(y) = \frac{y-m}{\sigma}
Parameters
----------
y : array, shape = (n_samples, [n_curves])
Returns
-------
array : shape = (n_samples, [n_curves])
"""
return ((y.T - self.mean) / np.sqrt(np.diag(self.cov))).T
def cholesky_errors(self, y):
return cholesky_errors(y.T, self.mean, self._chol).T
def pivoted_cholesky_errors(self, y):
return solve(self._pchol, (y.T - self.mean).T)
def eigen_errors(self, y):
return solve(self._eig, (y.T - self.mean).T)
def chi2(self, y):
return np.sum(self.individual_errors(y), axis=0)
def md_squared(self, y):
R"""Computes the squared Mahalanobis distance"""
return mahalanobis(y.T, self.mean, self._chol) ** 2
def kl(self, mean, cov):
R"""The Kullback-Leibler divergence between two multivariate normal distributions
.. math::
D_{KL}(N_0 | N_1) = \frac{1}{2} \left [
\mathrm{Tr}(\Sigma_1^{-1}\Sigma_0)
+ (\mu_1 - \mu_0)^T \Sigma_1^{-1} (\mu_1 - \mu_0)
- k + \log\left(\frac{\det \Sigma_1}{\det \Sigma_0}\right)
\right]
where :math:`k` is the dimension of Normal distributions. The :math:`\mu_1` and :math:`\Sigma_1` are those
fed during the initialization of the Diagnostic object, and :math:`\mu_0` and :math:`\Sigma_0` are the
arguments of this function.
Parameters
----------
mean : array, shape = (n_samples,)
cov : array, shape = (n_samples, n_samples)
Returns
-------
float
The KL divergence
"""
m1, c1, chol1 = self.mean, self.cov, self._chol
m0, c0 = mean, cov
tr = np.trace(cho_solve((chol1, True), c0))
dist = self.md_squared(m0)
k = c1.shape[-1]
logs = 2 * np.sum(np.log(np.diag(c1))) - np.linalg.slogdet(c0)[-1]
return 0.5 * (tr + dist - k + logs)
def credible_interval(self, y, intervals):
"""The credible interval diagnostic.
Parameters
----------
y : (n_samples, [n_curves]) shaped array
intervals : 1d array
The credible intervals at which to perform the test
Returns
-------
array, shape = ([n_curves], n_intervals)
"""
lower, upper = self.udist.interval(np.atleast_2d(intervals).T)
def diagnostic(data_, lower_, upper_):
indicator = (lower_ < data_) & (data_ < upper_) # 1 if in, 0 if out
return np.average(indicator, axis=1) # The diagnostic
dci = np.apply_along_axis(
diagnostic, axis=1, arr=np.atleast_2d(y).T, lower_=lower, upper_=upper)
if y.ndim == 1:
dci = np.squeeze(dci) # If y didn't have n_curves dim, then remove it now.
return dci
@staticmethod
def variogram(X, y, bin_bounds):
R"""Computes the variogram for the data y at input points X.
Parameters
----------
X
y
bin_bounds
Returns
-------
v : array
bin_locations :
gamma :
lower :
upper :
"""
v = VariogramFourthRoot(X, y, bin_bounds)
bin_locations = v.bin_locations
gamma, lower, upper = v.compute(rt_scale=False)
return v, bin_locations, gamma, lower, upper
class GraphicalDiagnostic:
R"""A class for plotting diagnostics and their reference distributions.
This class is under construction and the implementation may change in the future.
Parameters
----------
data : array, shape = (n_samples, n_curves)
The data to compute diagnostics against
mean : array
The mean for the diagnostic object
cov : array
The covariance of the diagnostic object
df : int, optional
If a Student-t distribution, then this is the degrees of freedom. If `None`, it is
treated as Gaussian
random_state : int, optional
nref : int
The number of samples to use in computing a reference distribution by simulation
colors : list
The colors to use for each curve
markers : list
The markers to use for each curve, where applicable.
Examples
--------
"""
# See: https://ianstormtaylor.com/design-tip-never-use-black/
# soft_black = '#262626'
def __init__(self, data, mean, cov, df=None, random_state=1, nref=1000, colors=None, markers=None, labels=None,
gray='lightgray', black='#262626', markeredgecolors=None, markerfillstyles=None):
self.diagnostic = Diagnostic(mean=mean, cov=cov, df=df, random_state=random_state)
if data.ndim == 1:
data = np.atleast_2d(data).T # Add n_curves dim if it doesn't exist
self.data = data
self.samples = self.diagnostic.samples(nref)
prop_list = list(mpl.rcParams['axes.prop_cycle'])
if colors is None:
# The standard Matplotlib 2.0 colors, or whatever they've been updated to be.
colors = [c['color'] for c in prop_list]
if markers is None:
markers = ['o' for c in prop_list]
if markeredgecolors is None:
markeredgecolors = [None for c in prop_list]
if markerfillstyles is None:
markerfillstyles = ['full' for c in prop_list]
if labels is None:
labels = np.array([r'$c_{{{}}}$'.format(i) for i in range(data.shape[-1])])
self.labels = labels
self.markers = markers
self.markeredgecolors = markeredgecolors
self.markerfillstyles = markerfillstyles
self.marker_cycle = cycler('marker', colors)
self.colors = colors
self.color_cycle = cycler('color', colors)
self.gray = gray
self.black = black
n = len(cov)
if df is None:
self.md_ref_dist = stats.chi2(df=n)
else:
self.md_ref_dist = stats.f(dfn=n, dfd=df, scale=(df-2)*n/df)
def error_plot(self, err, title=None, xlabel='Index', ylabel=None, ax=None):
if ax is None:
ax = plt.gca()
ax.axhline(0, 0, 1, linestyle='-', color=self.black, lw=1, zorder=0)
# The standardized 2 sigma bands since the sd has been divided out.
sd = self.diagnostic.std_udist.std()
ax.axhline(-2 * sd, 0, 1, color=self.gray, zorder=0, lw=1)
ax.axhline(2 * sd, 0, 1, color=self.gray, zorder=0, lw=1)
index = np.arange(1, self.data.shape[0]+1)
size = 8
if err.ndim == 1:
err = err[:, None]
for i, error in enumerate(err.T):
ax.plot(
index, error, ls='', color=self.colors[i],
marker=self.markers[i], markeredgecolor=self.markeredgecolors[i],
fillstyle=self.markerfillstyles[i], markersize=size, markeredgewidth=0.5
)
# ax.scatter(
# index, error, color=self.colors[i], marker=self.markers[i],
# edgecolor=self.markeredgecolors[i], linestyle=self.markerlinestyles[i]
# )
from matplotlib.ticker import MaxNLocator
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel(xlabel)
ax.margins(x=0.05)
ax.set_ylabel(ylabel)
ax.set_title(title)
return ax
def individual_errors(self, title='Individual Errors', ax=None):
err = self.diagnostic.individual_errors(self.data)
return self.error_plot(err, title=title, ax=ax)
def individual_errors_qq(self, title='Individual QQ Plot', ax=None):
return self.qq(self.data, self.samples, [0.68, 0.95], self.diagnostic.individual_errors,
title=title, ax=ax)
def cholesky_errors(self, title='Cholesky Errors', ax=None):
err = self.diagnostic.cholesky_errors(self.data)
return self.error_plot(err, title=title, ax=ax)
def cholesky_errors_qq(self, title='Cholesky QQ Plot', ax=None):
return self.qq(self.data, self.samples, [0.68, 0.95], self.diagnostic.cholesky_errors,
title=title, ax=ax)
def pivoted_cholesky_errors(self, title='Pivoted Cholesky Errors', ax=None):
err = self.diagnostic.pivoted_cholesky_errors(self.data)
return self.error_plot(err, title=title, ax=ax)
def pivoted_cholesky_errors_qq(self, title='Pivoted Cholesky QQ Plot', ax=None):
return self.qq(self.data, self.samples, [0.68, 0.95], self.diagnostic.pivoted_cholesky_errors,
title=title, ax=ax)
def eigen_errors(self, title='Eigen Errors', ax=None):
err = self.diagnostic.eigen_errors(self.data)
return self.error_plot(err, title=title, ax=ax)
def eigen_errors_qq(self, title='Eigen QQ Plot', ax=None):
return self.qq(self.data, self.samples, [0.68, 0.95], self.diagnostic.eigen_errors,
title=title, ax=ax)
def hist(self, data, ref, title=None, xlabel=None, ylabel=None, vlines=True, ax=None):
if hasattr(ref, 'ppf'):
lower_95 = ref.ppf(0.975)
upper_95 = ref.ppf(0.025)
x = np.linspace(lower_95, upper_95, 100)
ax.plot(x, ref.pdf(x), label='ref', color=self.black)
else:
ref_stats = stats.describe(ref)
ref_sd = np.sqrt(ref_stats.variance)
ref_mean = ref_stats.mean
# This doesn't exactly match 95% intervals from distribution
lower_95 = ref_mean - 2 * ref_sd
upper_95 = ref_mean + 2 * ref_sd
ax.hist(ref, density=1, label='ref', histtype='step', color=self.black)
if ax is None:
ax = plt.gca()
ax.axvline(lower_95, 0, 1, color='gray', linestyle='--', label=r'$2\sigma$')
ax.axvline(upper_95, 0, 1, color='gray', linestyle='--')
if vlines:
for c, d in zip(cycle(self.color_cycle), np.atleast_1d(data)):
ax.axvline(d, 0, 1, zorder=50, **c)
else:
ax.hist(data, density=1, label='data', histtype='step')
ax.legend()
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return ax
def violin(self, data, ref, title=None, xlabel=None, ylabel=None, ax=None):
if ax is None:
ax = plt.gca()
n = len(data)
nref = len(ref)
orders = np.arange(n)
zero = np.zeros(len(data), dtype=int)
nans = np.nan * np.ones(nref)
fake = np.hstack((np.ones(nref, dtype=bool), np.zeros(nref, dtype=bool)))
fake_ref = np.hstack((fake[:, None], np.hstack((ref, nans))[:, None]))
label = 'label_' # Placeholder
ref_df = pd.DataFrame(fake_ref, columns=['fake', label])
tidy_data = np.hstack((orders[:, None], data[:, None]))
data_df = pd.DataFrame(tidy_data, columns=['orders', label])
sns.violinplot(x=np.zeros(2 * nref, dtype=int), y=label, data=ref_df,
color=self.gray, hue='fake', split=True, inner='box', ax=ax)
with sns.color_palette(self.colors):
sns.swarmplot(x=zero, y=label, data=data_df, hue='orders', ax=ax)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.set_xlim(-0.05, 0.5)
return ax
def box(self, data, ref, title=None, xlabel=None, ylabel=None, trim=True, size=8, legend=False, ax=None):
if ax is None:
ax = plt.gca()
label = 'labelll' # Placeholder
# Plot reference dist
if hasattr(ref, 'ppf'):
gray = 'gray'
boxartist = self._dist_boxplot(
ref, ax=ax, positions=[0],
patch_artist=True,
widths=0.8)
for box in boxartist['boxes']:
box.update(dict(facecolor='lightgrey', edgecolor=gray))
for whisk in boxartist["whiskers"]:
whisk.update(dict(color=gray))
for cap in boxartist["caps"]:
cap.update(dict(color=gray))
for med in boxartist["medians"]:
med.update(dict(color=gray))
else:
nref = len(ref)
ref_df = pd.DataFrame(ref, columns=[label])
sns.boxplot(
x=np.zeros(nref, dtype=int), y=label, data=ref_df, color='lightgrey', ax=ax, fliersize=0, sym='',
whis=[2.5, 97.5], bootstrap=None,
)
# Plot data
n = len(data)
orders = np.array([r'$c_{{{}}}$'.format(i) for i in range(n)])
zero = np.zeros(len(data), dtype=int)
tidy_data = np.array([orders, data], dtype=np.object).T
data_df = pd.DataFrame(tidy_data, columns=['orders', label])
data_df[label] = data_df[label].astype(float)
# print(data_df)
from matplotlib.markers import MarkerStyle
with sns.color_palette(self.colors):
# Only use this to get the right positions
ss = sns.swarmplot(
x=zero, y=label, data=data_df,
hue='orders',
ax=ax, size=size,
linewidth=0.5,
# marker=[MarkerStyle('o', fillstyle=style) for style in self.markerfillstyles]
# marker='left'
# marker=MarkerStyle('o', fillstyle='left')
# marker=MarkerStyle('o', fillstyle='top'),
# facecolor='none',
# facecoloralt='w',
# color='none',
# alpha=0
)
# Swarmplot plots markers in an order from smallest to largest
# This rearranges the marker line styles to be in that order
positions = ss.collections[0].get_offsets() # These are ordered by swarmplot
ss.collections[0].remove() # Don't show swarmplot, we will plot below
_, idx, inv = np.unique(data_df[label].values, return_index=True, return_inverse=True)
# positions = positions[idx]
positions = positions[inv]
assert np.allclose(positions[:, -1], data_df[label].values)
for i, (x, y) in enumerate(positions):
ax.plot(
[x], [y],
marker=self.markers[i], ls='', markersize=size,
zorder=5+i,
c=self.colors[i], fillstyle=self.markerfillstyles[i],
markeredgecolor=self.markeredgecolors[i], markeredgewidth=0.5, clip_on=False
)
# linestyles_new = np.array(self.markerlinestyles)[inv]
# print(ss.collections[0].get_fill())
# # collections[0] *should* be the markers created by swarmplot... but might not always be?
# ss.collections[0].set_dashes(linestyles_new)
ax.set_ylabel(ylabel)
ax.set_xticks([])
ax.set_xlabel(xlabel)
ax.set_title(title)
if legend:
ax.legend(title=None)
else:
ax.get_legend().remove()
sns.despine(offset=0, trim=trim, bottom=True, ax=ax)
return ax
@staticmethod
def _dist_boxplot(dist, q1=0.25, q3=0.75, whislo=0.025, whishi=0.975, label=None, ax=None, other_stats=None,
**kwargs):
"""Creates a boxplot computed from a Scipy.stats-like distribution."""
if ax is None:
ax = plt.gca()
stat_dict = [{'med': dist.median(), 'q1': dist.ppf(q1), 'q3': dist.ppf(q3),
'whislo': dist.ppf(whislo), 'whishi': dist.ppf(whishi)}]
if label is not None:
stat_dict[0]['label'] = label
if other_stats is not None:
stat_dict = [*stat_dict, *other_stats]
return ax.bxp(stat_dict, showfliers=False, **kwargs)
def qq(self, data, ref, band_perc, func, title=None, ax=None):
data = np.sort(func(data.copy()), axis=0)
ref = np.sort(func(ref.copy()), axis=0)
bands = np.array([np.percentile(ref, [100 * (1. - bi) / 2, 100 * (1. + bi) / 2], axis=1)
for bi in band_perc])
n = data.shape[0]
quants = (np.arange(1, n + 1) - 0.5) / n
q_theory = self.diagnostic.std_udist.ppf(quants)
if ax is None:
ax = plt.gca()
for i in range(len(band_perc) - 1, -1, -1):
ax.fill_between(q_theory, bands[i, 0], bands[i, 1], alpha=0.5, color='gray')
for i, dat in enumerate(data.T):
ax.plot(q_theory, dat, c=self.colors[i], label=self.labels[i])
yl, yu = ax.get_ylim()
xl, xu = ax.get_xlim()
ax.plot([xl, xu], [xl, xu], c=self.black)
ax.set_ylim([yl, yu])
ax.set_xlim([xl, xu])
if title is not None:
ax.set_title(title)
ax.set_xlabel('Theoretical Quantiles')
ax.set_ylabel('Empirical Quantiles')
return ax
def md_squared(self, ax=None, type='hist', title='Mahalanobis Distance', xlabel='MD', **kwargs):
if ax is None:
ax = plt.gca()
md_data = self.diagnostic.md_squared(self.data)
if type == 'hist':
return self.hist(md_data, self.md_ref_dist, title=title,
xlabel=xlabel, ax=ax, **kwargs)
elif type == 'box':
return self.box(
md_data, self.md_ref_dist, title=title,
xlabel=xlabel, ax=ax, **kwargs)
def kl(self, X, gp, predict=False, vlines=True, title='KL Divergence', xlabel='KL', ax=None):
if ax is None:
ax = plt.gca()
ref_means = []
ref_covs = []
for i, sample in enumerate(self.samples):
gp.fit(X, sample)
if predict:
mean, cov = gp.predict(X, return_cov=True)
else:
mean, cov = gp.mean(X), gp.cov(X)
ref_means.append(mean)
ref_covs.append(cov)
data_means = []
data_covs = []
for i, data in enumerate(np.atleast_2d(self.data)):
gp.fit(X, data)
if predict:
mean, cov = gp.predict(X, return_cov=True)
else:
mean, cov = gp.mean(X), gp.cov(X)
data_means.append(mean)
data_covs.append(cov)
kl_ref = [self.diagnostic.kl(mean, cov) for mean, cov in zip(ref_means, ref_covs)]
kl_data = [self.diagnostic.kl(mean, cov) for mean, cov in zip(data_means, data_covs)]
return self.hist(kl_data, kl_ref, title=title,
xlabel=xlabel, vlines=vlines, ax=ax)
def credible_interval(self, intervals, band_perc, title='Credible Interval Diagnostic',
xlabel='Credible Interval', ylabel='Empirical Coverage', ax=None, linestyles=None):
dci_data = self.diagnostic.credible_interval(self.data, intervals)
dci_ref = self.diagnostic.credible_interval(self.samples, intervals)
bands = np.array([np.percentile(dci_ref, [100 * (1. - bi) / 2, 100 * (1. + bi) / 2], axis=0)
for bi in band_perc])
greys = mpl.cm.get_cmap('Greys')
if ax is None:
ax = plt.gca()
band_perc = np.sort(band_perc)
for i, perc in enumerate(band_perc):
ax.fill_between(intervals, bands[i, 0], bands[i, 1], alpha=1.,
color=greys((len(band_perc) - i) / (len(band_perc) + 2.5)),
zorder=-perc)
ax.plot([0, 1], [0, 1], c=self.black)
for i, data in enumerate(dci_data):
if linestyles is None:
ls = None
else:
ls = linestyles[i]
ax.plot(intervals, data, color=self.colors[i], ls=ls, label=self.labels[i])
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_title(title)
return ax
def variogram(self, X, title='Variogram', xlabel='Lag', ax=None):
y = self.data
N = len(X)
nbins = np.ceil((N * (N - 1) / 2.) ** (1. / 3))
bin_bounds = np.linspace(0, np.max(np.linalg.norm(X, axis=-1)), nbins)
v, loc, gamma, lower, upper = self.diagnostic.variogram(X, y, bin_bounds)
if ax is None:
ax = plt.gca()
ax.set_title(title)
ax.set_xlabel(xlabel)
for i in range(y.shape[0]):
ax.plot(loc, gamma[:, i], ls='', marker='o', c=self.colors[i])
ax.plot(loc, lower[:, i], lw=0.5, c=self.colors[i])
ax.plot(loc, upper[:, i], lw=0.5, c=self.colors[i])
return ax
def plotzilla(self, X, gp=None, predict=False, vlines=True):
R"""A convenience method for plotting a lot of diagnostics at once.
"""
if gp is None:
pass
fig, axes = plt.subplots(4, 3, figsize=(12, 12))
self.md_squared(vlines=vlines, ax=axes[0, 0])
if gp is not None:
self.kl(X, gp, predict, vlines=vlines, ax=axes[0, 1])
self.credible_interval(np.linspace(0, 1, 101), [0.68, 0.95], axes[0, 2])
self.individual_errors(axes[1, 0])
self.individual_errors_qq(axes[2, 0])
self.cholesky_errors(axes[1, 1])
self.cholesky_errors_qq(axes[2, 1])
self.eigen_errors(axes[1, 2])
self.eigen_errors_qq(axes[2, 2])
self.pivoted_cholesky_errors(axes[3, 0])
self.pivoted_cholesky_errors_qq(axes[3, 1])
fig.tight_layout()
return fig, axes
def essentials(self, vlines=True, bare=False):
R"""A convenience method for plotting the essential diagnostics quickly.
Parameters
----------
vlines
bare
Returns
-------
"""
if bare:
fig, axes = plt.subplots(1, 3, figsize=(7, 3))
self.md_squared(vlines=vlines, ax=axes[0])
self.pivoted_cholesky_errors(axes[1])
self.credible_interval(np.linspace(0, 1, 101), [0.68, 0.95], axes[2])
axes[0].set_title('')
axes[0].legend(title=r'$\mathrm{D}_{\mathrm{MD}}$')
axes[0].set_ylabel('')
axes[0].set_yticks([])
axes[1].set_yticks([])
axes[1].legend(title=r'$\mathrm{D}_{\mathrm{PC}}$')
axes[1].set_title('')
axes[1].set_ylabel('')
axes[2].set_title('')
axes[2].set_ylabel('')
axes[2].set_xticks([0, 0.5, 1])
axes[2].set_xticklabels(['0', '0.5', '1'])
axes[2].yaxis.tick_right()
axes[2].text(0.05, 0.94, r'$\mathrm{D}_{\mathrm{CI}}$', transform=axes[2].transAxes,
verticalalignment='top',
bbox=dict(boxstyle='round', facecolor='white', alpha=0.5, ec='grey'))
fig.tight_layout(h_pad=0.01, w_pad=0.1)
else:
fig, axes = plt.subplots(2, 3, figsize=(12, 6))
self.md_squared(vlines=vlines, ax=axes[0, 0])
self.credible_interval(np.linspace(0, 1, 101), [0.68, 0.95], axes[1, 0])
self.eigen_errors(axes[0, 1])
self.eigen_errors_qq(axes[1, 1])
self.pivoted_cholesky_errors(axes[0, 2])
self.pivoted_cholesky_errors_qq(axes[1, 2])
fig.tight_layout()
return fig, axes
|
jordan-melendez/buqeyemodel
|
gsum/diagnostics.py
|
Python
|
mit
| 26,203
|
[
"Gaussian"
] |
71fe4bc969e9590d3b2cc2ebb02f99106f4e4f6d7adbb25398216a87d60b2874
|
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 21817 $
# Date: $Date: 2005-07-21 13:39:57 -0700 (Thu, 21 Jul 2005) $
# Copyright: This module has been placed in the public domain.
"""
Docutils document tree element class library.
Classes in CamelCase are abstract base classes or auxiliary classes. The one
exception is `Text`, for a text (PCDATA) node; uppercase is used to
differentiate from element classes. Classes in lower_case_with_underscores
are element classes, matching the XML element generic identifiers in the DTD_.
The position of each node (the level at which it can occur) is significant and
is represented by abstract base classes (`Root`, `Structural`, `Body`,
`Inline`, etc.). Certain transformations will be easier because we can use
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.
.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import warnings
from types import IntType, SliceType, StringType, UnicodeType, \
TupleType, ListType, ClassType
from UserString import UserString
# ==============================
# Functional Node Base Classes
# ==============================
class Node:
"""Abstract base class of nodes in a document tree."""
parent = None
"""Back-reference to the Node immediately containing this Node."""
document = None
"""The `document` node at the root of the tree containing this Node."""
source = None
"""Path or description of the input source which generated this Node."""
line = None
"""The line number (1-based) of the beginning of this Node in `source`."""
def __nonzero__(self):
"""
Node instances are always true, even if they're empty. A node is more
than a simple container. Its boolean "truth" does not depend on
having one or more subnodes in the doctree.
Use `len()` to check node length. Use `None` to represent a boolean
false value.
"""
return 1
def asdom(self, dom=None):
"""Return a DOM **fragment** representation of this Node."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
return self._dom_node(domroot)
def pformat(self, indent=' ', level=0):
"""
Return an indented pseudo-XML representation, for test purposes.
Override in subclasses.
"""
raise NotImplementedError
def copy(self):
"""Return a copy of self."""
raise NotImplementedError
def setup_child(self, child):
child.parent = self
if self.document:
child.document = self.document
if child.source is None:
child.source = self.document.current_source
if child.line is None:
child.line = self.document.current_line
def walk(self, visitor):
"""
Traverse a tree of `Node` objects, calling the
`dispatch_visit()` method of `visitor` when entering each
node. (The `walkabout()` method is similar, except it also
calls the `dispatch_departure()` method before exiting each
node.)
This tree traversal supports limited in-place tree
modifications. Replacing one node with one or more nodes is
OK, as is removing an element. However, if the node removed
or replaced occurs after the current node, the old node will
still be traversed, and any new nodes will not.
Within ``visit`` methods (and ``depart`` methods for
`walkabout()`), `TreePruningException` subclasses may be raised
(`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` implementation for each `Node` subclass encountered.
"""
visitor.document.reporter.debug(
'docutils.nodes.Node.walk calling dispatch_visit for %s'
% self.__class__.__name__)
try:
visitor.dispatch_visit(self)
except (SkipChildren, SkipNode):
return
except SkipDeparture: # not applicable; ignore
pass
children = self.children
try:
for child in children[:]:
child.walk(visitor)
except SkipSiblings:
pass
def walkabout(self, visitor):
"""
Perform a tree traversal similarly to `Node.walk()` (which
see), except also call the `dispatch_departure()` method
before exiting each node.
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` and ``depart`` implementation for each `Node`
subclass encountered.
"""
call_depart = 1
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except SkipNode:
return
except SkipDeparture:
call_depart = 0
children = self.children
try:
for child in children[:]:
child.walkabout(visitor)
except SkipSiblings:
pass
except SkipChildren:
pass
if call_depart:
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_departure '
'for %s' % self.__class__.__name__)
visitor.dispatch_departure(self)
def traverse(self, condition=None,
include_self=1, descend=1, siblings=0, ascend=0):
"""
Return an iterable containing
* self (if include_self is true)
* all descendants in tree traversal order (if descend is true)
* all siblings (if siblings is true) and their descendants (if
also descend is true)
* the siblings of the parent (if ascend is true) and their
descendants (if also descend is true), and so on
If `condition` is not None, the iterable contains only nodes
for which ``condition(node)`` is true. If `condition` is a
node class ``cls``, it is equivalent to a function consisting
of ``return isinstance(node, cls)``.
If ascend is true, assume siblings to be true as well.
For example, given the following tree::
<paragraph>
<emphasis> <--- emphasis.traverse() and
<strong> <--- strong.traverse() are called.
Foo
Bar
<reference name="Baz" refid="baz">
Baz
Then list(emphasis.traverse()) equals ::
[<emphasis>, <strong>, <#text: Foo>, <#text: Bar>]
and list(strong.traverse(ascend=1)) equals ::
[<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]
"""
r = []
if ascend:
siblings=1
if isinstance(condition, ClassType) and issubclass(condition, Node):
node_class = condition
def condition(node, node_class=node_class):
return isinstance(node, node_class)
if include_self and (condition is None or condition(self)):
r.append(self)
if descend and len(self.children):
for child in self:
r.extend(child.traverse(
include_self=1, descend=1, siblings=0, ascend=0,
condition=condition))
if siblings or ascend:
node = self
while node.parent:
index = node.parent.index(node)
for sibling in node.parent[index+1:]:
r.extend(sibling.traverse(include_self=1, descend=descend,
siblings=0, ascend=0,
condition=condition))
if not ascend:
break
else:
node = node.parent
return r
def next_node(self, condition=None,
include_self=0, descend=1, siblings=0, ascend=0):
"""
Return the first node in the iterable returned by traverse(),
or None if the iterable is empty.
Parameter list is the same as of traverse. Note that
include_self defaults to 0, though.
"""
iterable = self.traverse(condition=condition,
include_self=include_self, descend=descend,
siblings=siblings, ascend=ascend)
try:
return iterable[0]
except IndexError:
return None
class Text(Node, UserString):
"""
Instances are terminal nodes (leaves) containing text only; no child
nodes or attributes. Initialize by passing a string to the constructor.
Access the text itself with the `astext` method.
"""
tagname = '#text'
children = ()
"""Text nodes have no children, and cannot have children."""
def __init__(self, data, rawsource=''):
UserString.__init__(self, data)
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
def __repr__(self):
data = repr(self.data)
if len(data) > 70:
data = repr(self.data[:64] + ' ...')
return '<%s: %s>' % (self.tagname, data)
def __len__(self):
return len(self.data)
def shortrepr(self):
data = repr(self.data)
if len(data) > 20:
data = repr(self.data[:16] + ' ...')
return '<%s: %s>' % (self.tagname, data)
def _dom_node(self, domroot):
return domroot.createTextNode(self.data)
def astext(self):
return self.data
def copy(self):
return self.__class__(self.data)
def pformat(self, indent=' ', level=0):
result = []
indent = indent * level
for line in self.data.splitlines():
result.append(indent + line + '\n')
return ''.join(result)
class Element(Node):
"""
`Element` is the superclass to all specific elements.
Elements contain attributes and child nodes. Elements emulate
dictionaries for attributes, indexing by attribute name (a string). To
set the attribute 'att' to 'value', do::
element['att'] = 'value'
There are two special attributes: 'ids' and 'names'. Both are
lists of unique identifiers, and names serve as human interfaces
to IDs. Names are case- and whitespace-normalized (see the
fully_normalize_name() function), and IDs conform to the regular
expression ``[a-z](-?[a-z0-9]+)*`` (see the make_id() function).
Elements also emulate lists for child nodes (element nodes and/or text
nodes), indexing by integer. To get the first child node, use::
element[0]
Elements may be constructed using the ``+=`` operator. To add one new
child node to element, do::
element += node
This is equivalent to ``element.append(node)``.
To add a list of multiple child nodes at once, use the same ``+=``
operator::
element += [node1, node2]
This is equivalent to ``element.extend([node1, node2])``.
"""
attr_defaults = {'ids': [], 'classes': [], 'names': [],
'dupnames': [], 'backrefs': []}
"""Default attributes."""
tagname = None
"""The element generic identifier. If None, it is set as an instance
attribute to the name of the class."""
child_text_separator = '\n\n'
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', *children, **attributes):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
self.children = []
"""List of child nodes (elements and/or `Text`)."""
self.extend(children) # maintain parent info
self.attributes = {}
"""Dictionary of attribute {name: value}."""
# Copy default values.
for att, value in self.attr_defaults.items():
# Default values are always lists (at the moment).
self.attributes[att] = value[:]
for att, value in attributes.items():
self.attributes[att.lower()] = value
if self.tagname is None:
self.tagname = self.__class__.__name__
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
for attribute, value in self.attlist():
if isinstance(value, ListType):
value = ' '.join(['%s' % v for v in value])
element.setAttribute(attribute, '%s' % value)
for child in self.children:
element.appendChild(child._dom_node(domroot))
return element
def __repr__(self):
data = ''
for c in self.children:
data += c.shortrepr()
if len(data) > 60:
data = data[:56] + ' ...'
break
if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
'; '.join(self['names']), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
def shortrepr(self):
if self['names']:
return '<%s "%s"...>' % (self.__class__.__name__,
'; '.join(self['names']))
else:
return '<%s...>' % self.tagname
def __str__(self):
return self.__unicode__().encode('raw_unicode_escape')
def __unicode__(self):
if self.children:
return u'%s%s%s' % (self.starttag(),
''.join([str(c) for c in self.children]),
self.endtag())
else:
return self.emptytag()
def starttag(self):
parts = [self.tagname]
for name, value in self.attlist():
if value is None: # boolean attribute
parts.append(name)
elif isinstance(value, ListType):
values = ['%s' % v for v in value]
parts.append('%s="%s"' % (name, ' '.join(values)))
else:
parts.append('%s="%s"' % (name, value))
return '<%s>' % ' '.join(parts)
def endtag(self):
return '</%s>' % self.tagname
def emptytag(self):
return u'<%s/>' % ' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
def __len__(self):
return len(self.children)
def __getitem__(self, key):
if isinstance(key, UnicodeType) or isinstance(key, StringType):
return self.attributes[key]
elif isinstance(key, IntType):
return self.children[key]
elif isinstance(key, SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
return self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __setitem__(self, key, item):
if isinstance(key, UnicodeType) or isinstance(key, StringType):
self.attributes[str(key)] = item
elif isinstance(key, IntType):
self.setup_child(item)
self.children[key] = item
elif isinstance(key, SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
for node in item:
self.setup_child(node)
self.children[key.start:key.stop] = item
else:
raise TypeError, ('element index must be an integer, a slice, or '
'an attribute name string')
def __delitem__(self, key):
if isinstance(key, UnicodeType) or isinstance(key, StringType):
del self.attributes[key]
elif isinstance(key, IntType):
del self.children[key]
elif isinstance(key, SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
del self.children[key.start:key.stop]
else:
raise TypeError, ('element index must be an integer, a simple '
'slice, or an attribute name string')
def __add__(self, other):
return self.children + other
def __radd__(self, other):
return other + self.children
def __iadd__(self, other):
"""Append a node or a list of nodes to `self.children`."""
if isinstance(other, Node):
self.append(other)
elif other is not None:
self.extend(other)
return self
def astext(self):
return self.child_text_separator.join(
[child.astext() for child in self.children])
def non_default_attributes(self):
atts = {}
for key, value in self.attributes.items():
if self.is_not_default(key):
atts[key] = value
return atts
def attlist(self):
attlist = self.non_default_attributes().items()
attlist.sort()
return attlist
def get(self, key, failobj=None):
return self.attributes.get(key, failobj)
def hasattr(self, attr):
return self.attributes.has_key(attr)
def delattr(self, attr):
if self.attributes.has_key(attr):
del self.attributes[attr]
def setdefault(self, key, failobj=None):
return self.attributes.setdefault(key, failobj)
has_key = hasattr
def append(self, item):
self.setup_child(item)
self.children.append(item)
def extend(self, item):
for node in item:
self.append(node)
def insert(self, index, item):
if isinstance(item, Node):
self.setup_child(item)
self.children.insert(index, item)
elif item is not None:
self[index:index] = item
def pop(self, i=-1):
return self.children.pop(i)
def remove(self, item):
self.children.remove(item)
def index(self, item):
return self.children.index(item)
def is_not_default(self, key):
try:
return self[key] != self.attr_defaults[key]
except KeyError:
return 1
def clear(self):
self.children = []
def replace(self, old, new):
"""Replace one child `Node` with another child or children."""
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new
def first_child_matching_class(self, childclass, start=0, end=sys.maxint):
"""
Return the index of the first child whose class exactly matches.
Parameters:
- `childclass`: A `Node` subclass to search for, or a tuple of `Node`
classes. If a tuple, any of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, TupleType):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
return index
return None
def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxint):
"""
Return the index of the first child whose class does *not* match.
Parameters:
- `childclass`: A `Node` subclass to skip, or a tuple of `Node`
classes. If a tuple, none of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, TupleType):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self.children[index], c):
break
else:
return index
return None
def pformat(self, indent=' ', level=0):
return ''.join(['%s%s\n' % (indent * level, self.starttag())] +
[child.pformat(indent, level+1)
for child in self.children])
def copy(self):
return self.__class__(**self.attributes)
def set_class(self, name):
"""Add a new class to the "classes" attribute."""
warnings.warn('docutils.nodes.Element.set_class deprecated; '
"append to Element['classes'] list attribute directly",
DeprecationWarning, stacklevel=2)
assert ' ' not in name
self['classes'].append(name.lower())
def note_referenced_by(self, name=None, id=None):
"""Note that this Element has been referenced by its name
`name` or id `id`."""
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target propagation.
by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
if by_name:
assert name is not None
by_name.referenced = 1
if by_id:
assert id is not None
by_id.referenced = 1
class TextElement(Element):
"""
An element which directly contains text.
Its children are all `Text` or `Inline` subclass nodes. You can
check whether an element's context is inline simply by checking whether
its immediate parent is a `TextElement` instance (including subclasses).
This is handy for nodes like `image` that can appear both inline and as
standalone body elements.
If passing children to `__init__()`, make sure to set `text` to
``''`` or some other suitable value.
"""
child_text_separator = ''
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', text='', *children, **attributes):
if text != '':
textnode = Text(text)
Element.__init__(self, rawsource, textnode, *children,
**attributes)
else:
Element.__init__(self, rawsource, *children, **attributes)
class FixedTextElement(TextElement):
"""An element which directly contains preformatted text."""
def __init__(self, rawsource='', text='', *children, **attributes):
TextElement.__init__(self, rawsource, text, *children, **attributes)
self.attributes['xml:space'] = 'preserve'
# ========
# Mixins
# ========
class Resolvable:
resolved = 0
class BackLinkable:
def add_backref(self, refid):
self['backrefs'].append(refid)
# ====================
# Element Categories
# ====================
class Root: pass
class Titular: pass
class PreBibliographic:
"""Category of Node which may occur before Bibliographic Nodes."""
class Bibliographic: pass
class Decorative(PreBibliographic): pass
class Structural: pass
class Body: pass
class General(Body): pass
class Sequential(Body):
"""List-like elements."""
class Admonition(Body): pass
class Special(Body):
"""Special internal body elements."""
class Invisible(PreBibliographic):
"""Internal elements that don't appear in output."""
class Part: pass
class Inline: pass
class Referential(Resolvable): pass
class Targetable(Resolvable):
referenced = 0
class Labeled:
"""Contains a `label` as its first element."""
# ==============
# Root Element
# ==============
class document(Root, Structural, Element):
"""
The document root element.
Do not instantiate this class directly; use
`docutils.utils.new_document()` instead.
"""
def __init__(self, settings, reporter, *args, **kwargs):
Element.__init__(self, *args, **kwargs)
self.current_source = None
"""Path to or description of the input source being processed."""
self.current_line = None
"""Line number (1-based) of `current_source`."""
self.settings = settings
"""Runtime settings data record."""
self.reporter = reporter
"""System message generator."""
self.indirect_targets = []
"""List of indirect target nodes."""
self.substitution_defs = {}
"""Mapping of substitution names to substitution_definition nodes."""
self.substitution_names = {}
"""Mapping of case-normalized substitution names to case-sensitive
names."""
self.refnames = {}
"""Mapping of names to lists of referencing nodes."""
self.refids = {}
"""Mapping of ids to lists of referencing nodes."""
self.nameids = {}
"""Mapping of names to unique id's."""
self.nametypes = {}
"""Mapping of names to hyperlink type (boolean: True => explicit,
False => implicit."""
self.ids = {}
"""Mapping of ids to nodes."""
self.substitution_refs = {}
"""Mapping of substitution names to lists of substitution_reference
nodes."""
self.footnote_refs = {}
"""Mapping of footnote labels to lists of footnote_reference nodes."""
self.citation_refs = {}
"""Mapping of citation labels to lists of citation_reference nodes."""
self.anonymous_targets = []
"""List of anonymous target nodes."""
self.anonymous_refs = []
"""List of anonymous reference nodes."""
self.autofootnotes = []
"""List of auto-numbered footnote nodes."""
self.autofootnote_refs = []
"""List of auto-numbered footnote_reference nodes."""
self.symbol_footnotes = []
"""List of symbol footnote nodes."""
self.symbol_footnote_refs = []
"""List of symbol footnote_reference nodes."""
self.footnotes = []
"""List of manually-numbered footnote nodes."""
self.citations = []
"""List of citation nodes."""
self.autofootnote_start = 1
"""Initial auto-numbered footnote number."""
self.symbol_footnote_start = 0
"""Initial symbol footnote symbol index."""
self.id_start = 1
"""Initial ID number."""
self.parse_messages = []
"""System messages generated while parsing."""
self.transform_messages = []
"""System messages generated while applying transforms."""
import docutils.transforms
self.transformer = docutils.transforms.Transformer(self)
"""Storage for transforms to be applied to this document."""
self.decoration = None
"""Document's `decoration` node."""
self.document = self
def asdom(self, dom=None):
"""Return a DOM representation of this document."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot
def set_id(self, node, msgnode=None):
for id in node['ids']:
if self.ids.has_key(id) and self.ids[id] is not node:
msg = self.reporter.severe('Duplicate ID: "%s".' % id)
if msgnode != None:
msgnode += msg
if not node['ids']:
for name in node['names']:
id = self.settings.id_prefix + make_id(name)
if id and not self.ids.has_key(id):
break
else:
id = ''
while not id or self.ids.has_key(id):
id = (self.settings.id_prefix +
self.settings.auto_id_prefix + str(self.id_start))
self.id_start += 1
node['ids'].append(id)
self.ids[id] = node
return id
def set_name_id_map(self, node, id, msgnode=None, explicit=None):
"""
`self.nameids` maps names to IDs, while `self.nametypes` maps names to
booleans representing hyperlink type (True==explicit,
False==implicit). This method updates the mappings.
The following state transition table shows how `self.nameids` ("ids")
and `self.nametypes` ("types") change with new input (a call to this
method), and what actions are performed:
==== ===== ======== ======== ======= ==== ===== =====
Old State Input Action New State Notes
----------- -------- ----------------- ----------- -----
ids types new type sys.msg. dupname ids types
==== ===== ======== ======== ======= ==== ===== =====
-- -- explicit -- -- new True
-- -- implicit -- -- new False
None False explicit -- -- new True
old False explicit implicit old new True
None True explicit explicit new None True
old True explicit explicit new,old None True [#]_
None False implicit implicit new None False
old False implicit implicit new,old None False
None True implicit implicit new None True
old True implicit implicit new old True
==== ===== ======== ======== ======= ==== ===== =====
.. [#] Do not clear the name-to-id map or invalidate the old target if
both old and new targets are external and refer to identical URIs.
The new target is invalidated regardless.
"""
for name in node['names']:
if self.nameids.has_key(name):
self.set_duplicate_name_id(node, id, name, msgnode, explicit)
else:
self.nameids[name] = id
self.nametypes[name] = explicit
def set_duplicate_name_id(self, node, id, name, msgnode, explicit):
old_id = self.nameids[name]
old_explicit = self.nametypes[name]
self.nametypes[name] = old_explicit or explicit
if explicit:
if old_explicit:
level = 2
if old_id is not None:
old_node = self.ids[old_id]
if node.has_key('refuri'):
refuri = node['refuri']
if old_node['names'] \
and old_node.has_key('refuri') \
and old_node['refuri'] == refuri:
level = 1 # just inform if refuri's identical
if level > 1:
dupname(old_node, name)
self.nameids[name] = None
msg = self.reporter.system_message(
level, 'Duplicate explicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
dupname(node, name)
else:
self.nameids[name] = id
if old_id is not None:
old_node = self.ids[old_id]
dupname(old_node, name)
else:
if old_id is not None and not old_explicit:
self.nameids[name] = None
old_node = self.ids[old_id]
dupname(old_node, name)
dupname(node, name)
if not explicit or (not old_explicit and old_id is not None):
msg = self.reporter.info(
'Duplicate implicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
def has_name(self, name):
return self.nameids.has_key(name)
# "note" here is an imperative verb: "take note of".
def note_implicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=None)
def note_explicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=1)
def note_refname(self, node):
self.refnames.setdefault(node['refname'], []).append(node)
def note_refid(self, node):
self.refids.setdefault(node['refid'], []).append(node)
def note_indirect_target(self, target):
self.indirect_targets.append(target)
if target['names']:
self.note_refname(target)
def note_anonymous_target(self, target):
self.set_id(target)
self.anonymous_targets.append(target)
def note_anonymous_ref(self, ref):
self.anonymous_refs.append(ref)
def note_autofootnote(self, footnote):
self.set_id(footnote)
self.autofootnotes.append(footnote)
def note_autofootnote_ref(self, ref):
self.set_id(ref)
self.autofootnote_refs.append(ref)
def note_symbol_footnote(self, footnote):
self.set_id(footnote)
self.symbol_footnotes.append(footnote)
def note_symbol_footnote_ref(self, ref):
self.set_id(ref)
self.symbol_footnote_refs.append(ref)
def note_footnote(self, footnote):
self.set_id(footnote)
self.footnotes.append(footnote)
def note_footnote_ref(self, ref):
self.set_id(ref)
self.footnote_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_citation(self, citation):
self.citations.append(citation)
def note_citation_ref(self, ref):
self.set_id(ref)
self.citation_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_substitution_def(self, subdef, def_name, msgnode=None):
name = whitespace_normalize_name(def_name)
subdef['names'].append(name)
if self.substitution_defs.has_key(name):
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
base_node=subdef)
if msgnode != None:
msgnode += msg
oldnode = self.substitution_defs[name]
dupname(oldnode, name)
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
self.substitution_names[fully_normalize_name(name)] = name
def note_substitution_ref(self, subref, refname):
name = subref['refname'] = whitespace_normalize_name(refname)
self.substitution_refs.setdefault(name, []).append(subref)
def note_pending(self, pending, priority=None):
self.transformer.add_pending(pending, priority)
def note_parse_message(self, message):
self.parse_messages.append(message)
def note_transform_message(self, message):
self.transform_messages.append(message)
def note_source(self, source, offset):
self.current_source = source
if offset is None:
self.current_line = offset
else:
self.current_line = offset + 1
def copy(self):
return self.__class__(self.settings, self.reporter,
**self.attributes)
def get_decoration(self):
if not self.decoration:
self.decoration = decoration()
index = self.first_child_not_matching_class(Titular)
if index is None:
self.append(self.decoration)
else:
self.insert(index, self.decoration)
return self.decoration
# ================
# Title Elements
# ================
class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass
# ========================
# Bibliographic Elements
# ========================
class docinfo(Bibliographic, Element): pass
class author(Bibliographic, TextElement): pass
class authors(Bibliographic, Element): pass
class organization(Bibliographic, TextElement): pass
class address(Bibliographic, FixedTextElement): pass
class contact(Bibliographic, TextElement): pass
class version(Bibliographic, TextElement): pass
class revision(Bibliographic, TextElement): pass
class status(Bibliographic, TextElement): pass
class date(Bibliographic, TextElement): pass
class copyright(Bibliographic, TextElement): pass
# =====================
# Decorative Elements
# =====================
class decoration(Decorative, Element):
def get_header(self):
if not len(self.children) or not isinstance(self.children[0], header):
self.insert(0, header())
return self.children[0]
def get_footer(self):
if not len(self.children) or not isinstance(self.children[-1], footer):
self.append(footer())
return self.children[-1]
class header(Decorative, Element): pass
class footer(Decorative, Element): pass
# =====================
# Structural Elements
# =====================
class section(Structural, Element): pass
class topic(Structural, Element):
"""
Topics are terminal, "leaf" mini-sections, like block quotes with titles,
or textual figures. A topic is just like a section, except that it has no
subsections, and it doesn't have to conform to section placement rules.
Topics are allowed wherever body elements (list, table, etc.) are allowed,
but only at the top level of a section or document. Topics cannot nest
inside topics, sidebars, or body elements; you can't have a topic inside a
table, list, block quote, etc.
"""
class sidebar(Structural, Element):
"""
Sidebars are like miniature, parallel documents that occur inside other
documents, providing related or reference material. A sidebar is
typically offset by a border and "floats" to the side of the page; the
document's main text may flow around it. Sidebars can also be likened to
super-footnotes; their content is outside of the flow of the document's
main text.
Sidebars are allowed wherever body elements (list, table, etc.) are
allowed, but only at the top level of a section or document. Sidebars
cannot nest inside sidebars, topics, or body elements; you can't have a
sidebar inside a table, list, block quote, etc.
"""
class transition(Structural, Element): pass
# ===============
# Body Elements
# ===============
class paragraph(General, TextElement): pass
class compound(General, Element): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
class definition_list(Sequential, Element): pass
class definition_list_item(Part, Element): pass
class term(Part, TextElement): pass
class classifier(Part, TextElement): pass
class definition(Part, Element): pass
class field_list(Sequential, Element): pass
class field(Part, Element): pass
class field_name(Part, TextElement): pass
class field_body(Part, Element): pass
class option(Part, Element):
child_text_separator = ''
class option_argument(Part, TextElement):
def astext(self):
return self.get('delimiter', ' ') + TextElement.astext(self)
class option_group(Part, Element):
child_text_separator = ', '
class option_list(Sequential, Element): pass
class option_list_item(Part, Element):
child_text_separator = ' '
class option_string(Part, TextElement): pass
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class line_block(General, Element): pass
class line(Part, TextElement):
indent = None
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
class error(Admonition, Element): pass
class important(Admonition, Element): pass
class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
class footnote(General, BackLinkable, Element, Labeled, Targetable): pass
class citation(General, BackLinkable, Element, Labeled, Targetable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
class legend(Part, Element): pass
class table(General, Element): pass
class tgroup(Part, Element): pass
class colspec(Part, Element): pass
class thead(Part, Element): pass
class tbody(Part, Element): pass
class row(Part, Element): pass
class entry(Part, Element): pass
class system_message(Special, BackLinkable, PreBibliographic, Element):
"""
System message element.
Do not instantiate this class directly; use
``document.reporter.info/warning/error/severe()`` instead.
"""
def __init__(self, message=None, *children, **attributes):
if message:
p = paragraph('', message)
children = (p,) + children
try:
Element.__init__(self, '', *children, **attributes)
except:
print 'system_message: children=%r' % (children,)
raise
def astext(self):
line = self.get('line', '')
return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
class pending(Special, Invisible, Element):
"""
The "pending" element is used to encapsulate a pending operation: the
operation (transform), the point at which to apply it, and any data it
requires. Only the pending operation's location within the document is
stored in the public document tree (by the "pending" object itself); the
operation and its data are stored in the "pending" object's internal
instance attributes.
For example, say you want a table of contents in your reStructuredText
document. The easiest way to specify where to put it is from within the
document, with a directive::
.. contents::
But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive
code leaves a placeholder behind that will trigger the second phase of its
processing, something like this::
<pending ...public attributes...> + internal attributes
Use `document.note_pending()` so that the
`docutils.transforms.Transformer` stage of processing can run all pending
transforms.
"""
def __init__(self, transform, details=None,
rawsource='', *children, **attributes):
Element.__init__(self, rawsource, *children, **attributes)
self.transform = transform
"""The `docutils.transforms.Transform` class implementing the pending
operation."""
self.details = details or {}
"""Detail data (dictionary) required by the pending operation."""
def pformat(self, indent=' ', level=0):
internals = [
'.. internal attributes:',
' .transform: %s.%s' % (self.transform.__module__,
self.transform.__name__),
' .details:']
details = self.details.items()
details.sort()
for key, value in details:
if isinstance(value, Node):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
elif value and isinstance(value, ListType) \
and isinstance(value[0], Node):
internals.append('%7s%s:' % ('', key))
for v in value:
internals.extend(['%9s%s' % ('', line)
for line in v.pformat().splitlines()])
else:
internals.append('%7s%s: %r' % ('', key, value))
return (Element.pformat(self, indent, level)
+ ''.join([(' %s%s\n' % (indent * level, line))
for line in internals]))
def copy(self):
return self.__class__(self.transform, self.details, self.rawsource,
**self.attributes)
class raw(Special, Inline, PreBibliographic, FixedTextElement):
"""
Raw data that is to be passed untouched to the Writer.
"""
pass
# =================
# Inline Elements
# =================
class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass
class image(General, Inline, Element):
def astext(self):
return self.get('alt', '')
class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass
# ========================================
# Auxiliary Classes, Functions, and Data
# ========================================
node_class_names = """
Text
abbreviation acronym address admonition attention attribution author
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
compound contact copyright
danger date decoration definition definition_list definition_list_item
description docinfo doctest_block document
emphasis entry enumerated_list error
field field_body field_list field_name figure footer
footnote footnote_reference
generated
header hint
image important inline
label legend line line_block list_item literal literal_block
note
option option_argument option_group option_list option_list_item
option_string organization
paragraph pending problematic
raw reference revision row rubric
section sidebar status strong subscript substitution_definition
substitution_reference subtitle superscript system_message
table target tbody term tgroup thead tip title title_reference topic
transition
version
warning""".split()
"""A list of names of all concrete Node subclasses."""
class NodeVisitor:
"""
"Visitor" pattern [GoF95]_ abstract superclass implementation for
document tree traversals.
Each node class has corresponding methods, doing nothing by
default; override individual methods for specific and useful
behaviour. The `dispatch_visit()` method is called by
`Node.walk()` upon entering a node. `Node.walkabout()` also calls
the `dispatch_departure()` method before exiting a node.
The dispatch methods call "``visit_`` + node class name" or
"``depart_`` + node class name", resp.
This is a base class for visitors whose ``visit_...`` & ``depart_...``
methods should be implemented for *all* node types encountered (such as
for `docutils.writers.Writer` subclasses). Unimplemented methods will
raise exceptions.
For sparse traversals, where only certain node types are of interest,
subclass `SparseNodeVisitor` instead. When (mostly or entirely) uniform
processing is desired, subclass `GenericNodeVisitor`.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
optional = ()
"""
Tuple containing node class names (as strings).
No exception will be raised if writers do not implement visit
or departure functions for these node classes.
Used to ensure transitional compatibility with existing 3rd-party writers.
"""
def __init__(self, document):
self.document = document
def dispatch_visit(self, node):
"""
Call self."``visit_`` + node class name" with `node` as
parameter. If the ``visit_...`` method does not exist, call
self.unknown_visit.
"""
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
return method(node)
def dispatch_departure(self, node):
"""
Call self."``depart_`` + node class name" with `node` as
parameter. If the ``depart_...`` method does not exist, call
self.unknown_departure.
"""
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name))
return method(node)
def unknown_visit(self, node):
"""
Called when entering unknown `Node` types.
Raise an exception unless overridden.
"""
if (node.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__))
def unknown_departure(self, node):
"""
Called before exiting unknown `Node` types.
Raise exception unless overridden.
"""
if (node.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s departing unknown node type: %s'
% (self.__class__, node.__class__.__name__))
class SparseNodeVisitor(NodeVisitor):
"""
Base class for sparse traversals, where only certain node types are of
interest. When ``visit_...`` & ``depart_...`` methods should be
implemented for *all* node types (such as for `docutils.writers.Writer`
subclasses), subclass `NodeVisitor` instead.
"""
class GenericNodeVisitor(NodeVisitor):
"""
Generic "Visitor" abstract superclass, for simple traversals.
Unless overridden, each ``visit_...`` method calls `default_visit()`, and
each ``depart_...`` method (when using `Node.walkabout()`) calls
`default_departure()`. `default_visit()` (and `default_departure()`) must
be overridden in subclasses.
Define fully generic visitors by overriding `default_visit()` (and
`default_departure()`) only. Define semi-generic visitors by overriding
individual ``visit_...()`` (and ``depart_...()``) methods also.
`NodeVisitor.unknown_visit()` (`NodeVisitor.unknown_departure()`) should
be overridden for default behavior.
"""
def default_visit(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def default_departure(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def _call_default_visit(self, node):
self.default_visit(node)
def _call_default_departure(self, node):
self.default_departure(node)
def _nop(self, node):
pass
def _add_node_class_names(names):
"""Save typing with dynamic assignments:"""
for _name in names:
setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
setattr(SparseNodeVisitor, 'depart_' + _name, _nop)
_add_node_class_names(node_class_names)
class TreeCopyVisitor(GenericNodeVisitor):
"""
Make a complete copy of a tree or branch, including element attributes.
"""
def __init__(self, document):
GenericNodeVisitor.__init__(self, document)
self.parent_stack = []
self.parent = []
def get_tree_copy(self):
return self.parent[0]
def default_visit(self, node):
"""Copy the current node, and make it the new acting parent."""
newnode = node.copy()
self.parent.append(newnode)
self.parent_stack.append(self.parent)
self.parent = newnode
def default_departure(self, node):
"""Restore the previous acting parent."""
self.parent = self.parent_stack.pop()
class TreePruningException(Exception):
"""
Base class for `NodeVisitor`-related tree pruning exceptions.
Raise subclasses from within ``visit_...`` or ``depart_...`` methods
called from `Node.walk()` and `Node.walkabout()` tree traversals to prune
the tree traversed.
"""
pass
class SkipChildren(TreePruningException):
"""
Do not visit any children of the current node. The current node's
siblings and ``depart_...`` method are not affected.
"""
pass
class SkipSiblings(TreePruningException):
"""
Do not visit any more siblings (to the right) of the current node. The
current node's children and its ``depart_...`` method are not affected.
"""
pass
class SkipNode(TreePruningException):
"""
Do not visit the current node's children, and do not call the current
node's ``depart_...`` method.
"""
pass
class SkipDeparture(TreePruningException):
"""
Do not call the current node's ``depart_...`` method. The current node's
children and siblings are not affected.
"""
pass
class NodeFound(TreePruningException):
"""
Raise to indicate that the target of a search has been found. This
exception must be caught by the client; it is not caught by the traversal
code.
"""
pass
def make_id(string):
"""
Convert `string` into an identifier and return it.
Docutils identifiers will conform to the regular expression
``[a-z](-?[a-z0-9]+)*``. For CSS compatibility, identifiers (the "class"
and "id" attributes) should have no underscores, colons, or periods.
Hyphens may be used.
- The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
followed by any number of letters, digits ([0-9]), hyphens ("-"),
underscores ("_"), colons (":"), and periods (".").
- However the `CSS1 spec`_ defines identifiers based on the "name" token,
a tighter interpretation ("flex" tokenizer notation; "latin1" and
"escape" 8-bit characters have been replaced with entities)::
unicode \\[0-9a-f]{1,4}
latin1 [¡-ÿ]
escape {unicode}|\\[ -~¡-ÿ]
nmchar [-a-z0-9]|{latin1}|{escape}
name {nmchar}+
The CSS1 "nmchar" rule does not include underscores ("_"), colons (":"),
or periods ("."), therefore "class" and "id" attributes should not contain
these characters. They should be replaced with hyphens ("-"). Combined
with HTML's requirements (the first character must be a letter; no
"unicode", "latin1", or "escape" characters), this results in the
``[a-z](-?[a-z0-9]+)*`` pattern.
.. _HTML 4.01 spec: http://www.w3.org/TR/html401
.. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
"""
id = _non_id_chars.sub('-', ' '.join(string.lower().split()))
id = _non_id_at_ends.sub('', id)
return str(id)
_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')
def dupname(node, name):
node['dupnames'].append(name)
node['names'].remove(name)
# Assume that this method is referenced, even though it isn't; we
# don't want to throw unnecessary system_messages.
node.referenced = 1
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def whitespace_normalize_name(name):
"""Return a whitespace-normalized name."""
return ' '.join(name.split())
|
garinh/cs
|
docs/support/docutils/nodes.py
|
Python
|
lgpl-2.1
| 57,112
|
[
"VisIt"
] |
9d252f3d7115099da8a25e5d01521493a7c0edf0d4a7662dcd85a2d611ef969d
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, John D. Chodera
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
"""Load an md.Topology from tripos mol2 files.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
import itertools
import re
from mdtraj.utils import import_
from mdtraj.utils.six.moves import cStringIO as StringIO
from mdtraj.formats.registry import FormatRegistry
__all__ = ['load_mol2', "mol2_to_dataframes"]
@FormatRegistry.register_loader('.mol2')
def load_mol2(filename):
"""Load a TRIPOS mol2 file from disk.
Parameters
----------
filename : str
Path to the prmtop file on disk.
Returns
-------
traj : md.Trajectory
The resulting topology, as an md.Topology object.
Notes
-----
This function should work on GAFF and sybyl style MOL2 files, but has
been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
The elements are guessed using GAFF atom types or via the atype string.
Examples
--------
>>> traj = md.load_mol2('mysystem.mol2')
"""
from mdtraj.core.trajectory import Trajectory
from mdtraj.core.topology import Topology, Single, Double, Triple, Aromatic, Amide
atoms, bonds = mol2_to_dataframes(filename)
atoms_mdtraj = atoms[["name", "resName"]].copy()
atoms_mdtraj["serial"] = atoms.index
#Figure out 1 letter element names
# IF this is a GAFF mol2, this line should work without issues
atoms_mdtraj["element"] = atoms.atype.map(gaff_elements)
# If this is a sybyl mol2, there should be NAN (null) values
if atoms_mdtraj.element.isnull().any():
# If this is a sybyl mol2, I think this works generally.
atoms_mdtraj["element"] = atoms.atype.apply(lambda x: x.strip(".")[0])
atoms_mdtraj["resSeq"] = np.ones(len(atoms), 'int')
atoms_mdtraj["chainID"] = np.ones(len(atoms), 'int')
bond_type_map = {
'1': Single,
'2': Double,
'3': Triple,
'am': Amide,
'ar': Aromatic
}
if bonds is not None:
bonds_mdtraj = bonds[["id0", "id1"]].values
offset = bonds_mdtraj.min() # Should this just be 1???
bonds_mdtraj -= offset
# Create the bond augment information
n_bonds = bonds_mdtraj.shape[0]
bond_augment = np.zeros([n_bonds, 2], dtype=float)
# Add bond type information
bond_augment[:, 0] = [float(bond_type_map[bond_value]) for bond_value in bonds["bond_type"].values]
# Add Bond "order" information, this is not known from Mol2 files
bond_augment[:, 1] = [0.0 for _ in range(n_bonds)]
# Augment array, dtype is cast to minimal representation of float
bonds_mdtraj = np.append(bonds_mdtraj, bond_augment, axis=-1)
else:
bonds_mdtraj = None
top = Topology.from_dataframe(atoms_mdtraj, bonds_mdtraj)
xyzlist = np.array([atoms[["x", "y", "z"]].values])
xyzlist /= 10.0 # Convert from angstrom to nanometer
traj = Trajectory(xyzlist, top)
return traj
def mol2_to_dataframes(filename):
"""Convert a GAFF (or sybyl) mol2 file to a pair of pandas dataframes.
Parameters
----------
filename : str
Name of mol2 filename
Returns
-------
atoms_frame : pd.DataFrame
DataFrame containing atom information
bonds_frame : pd.DataFrame
DataFrame containing bond information
Notes
-----
These dataframes may contain force field information as well as the
information necessary for constructing the coordinates and molecular
topology. This function has been tested for GAFF and sybyl-style
mol2 files but has been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
See Also
--------
If you just need the coordinates and bonds, use load_mol2(filename)
to get a Trajectory object.
"""
pd = import_('pandas')
with open(filename) as f:
data = dict((key, list(grp)) for key, grp in itertools.groupby(f, _parse_mol2_sections))
# Mol2 can have "status bits" at the end of the bond lines. We don't care
# about these, but they interfere with using pd_read_table because it looks
# like one line has too many columns. So we just regex out the offending
# text.
status_bit_regex = "BACKBONE|DICT|INTERRES|\|"
data["@<TRIPOS>BOND\n"] = [re.sub(status_bit_regex, lambda _: "", s)
for s in data["@<TRIPOS>BOND\n"]]
if len(data["@<TRIPOS>BOND\n"]) > 1:
csv = StringIO()
csv.writelines(data["@<TRIPOS>BOND\n"][1:])
csv.seek(0)
bonds_frame = pd.read_table(csv, names=["bond_id", "id0", "id1", "bond_type"],
index_col=0, header=None, sep="\s*", engine='python')
else:
bonds_frame = None
csv = StringIO()
csv.writelines(data["@<TRIPOS>ATOM\n"][1:])
csv.seek(0)
atoms_frame = pd.read_csv(csv, sep="\s*", engine='python', header=None)
ncols = atoms_frame.shape[1]
names=["serial", "name", "x", "y", "z", "atype", "code", "resName", "charge", "status"]
atoms_frame.columns = names[:ncols]
return atoms_frame, bonds_frame
def _parse_mol2_sections(x):
"""Helper function for parsing a section in a MOL2 file."""
if x.startswith('@<TRIPOS>'):
_parse_mol2_sections.key = x
return _parse_mol2_sections.key
gaff_elements = {
'br': 'Br',
'c': 'C',
'c1': 'C',
'c2': 'C',
'c3': 'C',
'ca': 'C',
'cc': 'C',
'cd': 'C',
'ce': 'C',
'cf': 'C',
'cg': 'C',
'ch': 'C',
'cl': 'Cl',
'cp': 'C',
'cq': 'C',
'cu': 'C',
'cv': 'C',
'cx': 'C',
'cy': 'C',
'cz': 'C',
'f': 'F',
'h1': 'H',
'h2': 'H',
'h3': 'H',
'h4': 'H',
'h5': 'H',
'ha': 'H',
'hc': 'H',
'hn': 'H',
'ho': 'H',
'hp': 'H',
'hs': 'H',
'hw': 'H',
'hx': 'H',
'i': 'I',
'n': 'N',
'n1': 'N',
'n2': 'N',
'n3': 'N',
'n4': 'N',
'na': 'N',
'nb': 'N',
'nc': 'N',
'nd': 'N',
'ne': 'N',
'nf': 'N',
'nh': 'N',
'no': 'N',
'o': 'O',
'oh': 'O',
'os': 'O',
'ow': 'O',
'p2': 'P',
'p3': 'P',
'p4': 'P',
'p5': 'P',
'pb': 'P',
'px': 'P',
'py': 'P',
's': 'S',
's2': 'S',
's4': 'S',
's6': 'S',
'sh': 'S',
'ss': 'S',
'sx': 'S',
'sy': 'S'}
|
leeping/mdtraj
|
mdtraj/formats/mol2.py
|
Python
|
lgpl-2.1
| 8,848
|
[
"MDTraj",
"OpenMM"
] |
f26024695196cce931fdf61c07edb427922cf880ae29b4732de9f94af308f89c
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRasterFileWriter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Radim Blazek'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
import os
import glob
import tempfile
from osgeo import gdal
from qgis.PyQt.QtCore import QTemporaryFile, QDir
from qgis.core import (QgsContrastEnhancement,
QgsRaster,
QgsRasterLayer,
QgsRasterChecker,
QgsRasterPipe,
QgsRasterFileWriter,
QgsRasterProjector,
QgsRectangle)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
class TestQgsRasterFileWriter(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
self.testDataDir = unitTestDataPath()
self.report = "<h1>Python Raster File Writer Tests</h1>\n"
def write(self, theRasterName):
print(theRasterName)
path = "%s/%s" % (self.testDataDir, theRasterName)
rasterLayer = QgsRasterLayer(path, "test")
if not rasterLayer.isValid():
return False
provider = rasterLayer.dataProvider()
tmpFile = QTemporaryFile()
tmpFile.open() # fileName is not available until open
tmpName = tmpFile.fileName()
tmpFile.close()
# do not remove when class is destroyed so that we can read
# the file and see difference
tmpFile.setAutoRemove(False)
fileWriter = QgsRasterFileWriter(tmpName)
pipe = QgsRasterPipe()
if not pipe.set(provider.clone()):
print("Cannot set pipe provider")
return False
projector = QgsRasterProjector()
projector.setCrs(provider.crs(), provider.crs())
if not pipe.insert(2, projector):
print("Cannot set pipe projector")
return False
fileWriter.writeRaster(
pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs())
checker = QgsRasterChecker()
ok = checker.runTest("gdal", tmpName, "gdal", path)
self.report += checker.report()
# All OK, we can delete the file
tmpFile.setAutoRemove(ok)
return ok
def testWrite(self):
for name in glob.glob("%s/raster/*.tif" % self.testDataDir):
baseName = os.path.basename(name)
allOk = True
ok = self.write("raster/%s" % baseName)
if not ok:
allOk = False
reportFilePath = "%s/qgistest.html" % QDir.tempPath()
reportFile = open(reportFilePath, 'a')
reportFile.write(self.report)
reportFile.close()
assert allOk, "Raster file writer test failed"
def testDriverForExtension(self):
self.assertEqual(QgsRasterFileWriter.driverForExtension('tif'), 'GTiff')
self.assertEqual(QgsRasterFileWriter.driverForExtension('TIF'), 'GTiff')
self.assertEqual(QgsRasterFileWriter.driverForExtension('tIf'), 'GTiff')
self.assertEqual(QgsRasterFileWriter.driverForExtension('.tif'), 'GTiff')
self.assertEqual(QgsRasterFileWriter.driverForExtension('img'), 'HFA')
self.assertEqual(QgsRasterFileWriter.driverForExtension('.vrt'), 'VRT')
self.assertEqual(QgsRasterFileWriter.driverForExtension('.jpg'), 'JPEG')
self.assertEqual(QgsRasterFileWriter.driverForExtension('asc'), 'AAIGrid')
self.assertEqual(QgsRasterFileWriter.driverForExtension('not a format'), '')
self.assertEqual(QgsRasterFileWriter.driverForExtension(''), '')
def testExtensionsForFormat(self):
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('not format'), [])
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('GTiff'), ['tiff', 'tif'])
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('GPKG'), ['gpkg'])
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('JPEG'), ['jpg', 'jpeg'])
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('AAIGrid'), ['asc'])
def testSupportedFiltersAndFormat(self):
# test with formats in recommended order
formats = QgsRasterFileWriter.supportedFiltersAndFormats(QgsRasterFileWriter.SortRecommended)
self.assertEqual(formats[0].filterString, 'GeoTIFF (*.tif *.TIF *.tiff *.TIFF)')
self.assertEqual(formats[0].driverName, 'GTiff')
self.assertTrue('netCDF' in [f.driverName for f in formats])
# alphabetical sorting
formats2 = QgsRasterFileWriter.supportedFiltersAndFormats(QgsRasterFileWriter.RasterFormatOptions())
self.assertTrue(formats2[0].driverName < formats2[1].driverName)
self.assertCountEqual([f.driverName for f in formats], [f.driverName for f in formats2])
self.assertNotEqual(formats2[0].driverName, 'GTiff')
def testSupportedFormatExtensions(self):
formats = QgsRasterFileWriter.supportedFormatExtensions()
self.assertTrue('tif' in formats)
self.assertFalse('exe' in formats)
self.assertEqual(formats[0], 'tif')
self.assertTrue('nc' in formats)
# alphabetical sorting
formats2 = QgsRasterFileWriter.supportedFormatExtensions(QgsRasterFileWriter.RasterFormatOptions())
self.assertTrue(formats2[1] < formats2[2])
self.assertCountEqual(formats, formats2)
self.assertNotEqual(formats2[0], 'tif')
def testImportIntoGpkg(self):
# init target file
test_gpkg = tempfile.mktemp(suffix='.gpkg', dir=self.testDataDir)
gdal.GetDriverByName('GPKG').Create(test_gpkg, 1, 1, 1)
source = QgsRasterLayer(os.path.join(self.testDataDir, 'raster', 'band3_byte_noct_epsg4326.tif'), 'my', 'gdal')
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(test_gpkg)
fw.setOutputFormat('gpkg')
fw.setCreateOptions(['RASTER_TABLE=imported_table', 'APPEND_SUBDATASET=YES'])
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(provider.clone()))
projector = QgsRasterProjector()
projector.setCrs(provider.crs(), provider.crs())
self.assertTrue(pipe.set(projector))
self.assertEqual(fw.writeRaster(pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs()), 0)
# Check that the test geopackage contains the raster layer and compare
rlayer = QgsRasterLayer('GPKG:%s:imported_table' % test_gpkg)
self.assertTrue(rlayer.isValid())
out_provider = rlayer.dataProvider()
for i in range(3):
src_data = provider.block(i + 1, provider.extent(), source.width(), source.height())
out_data = out_provider.block(i + 1, out_provider.extent(), rlayer.width(), rlayer.height())
self.assertEqual(src_data.data(), out_data.data())
# remove result file
os.unlink(test_gpkg)
def testExportToGpkgWithExtraExtent(self):
tmpName = tempfile.mktemp(suffix='.gpkg')
source = QgsRasterLayer(os.path.join(self.testDataDir, 'raster', 'band3_byte_noct_epsg4326.tif'), 'my', 'gdal')
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(tmpName)
fw.setOutputFormat('gpkg')
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(provider.clone()))
self.assertEqual(fw.writeRaster(pipe,
provider.xSize() + 4,
provider.ySize() + 4,
QgsRectangle(-3 - 2, -4 - 2, 7 + 2, 6 + 2),
provider.crs()), 0)
del fw
# Check that the test geopackage contains the raster layer and compare
rlayer = QgsRasterLayer(tmpName)
self.assertTrue(rlayer.isValid())
out_provider = rlayer.dataProvider()
for i in range(3):
src_data = provider.block(i + 1, provider.extent(), source.width(), source.height())
out_data = out_provider.block(i + 1, provider.extent(), source.width(), source.height())
self.assertEqual(src_data.data(), out_data.data())
out_data = out_provider.block(1, QgsRectangle(7, -4, 7 + 2, 6), 2, 8)
# band3_byte_noct_epsg4326 nodata is 255
self.assertEqual(out_data.data().data(), b'\xff' * 2 * 8)
del out_provider
del rlayer
# remove result file
os.unlink(tmpName)
def testExportToGpkgWithExtraExtentNoNoData(self):
tmpName = tempfile.mktemp(suffix='.gpkg')
# Remove nodata
gdal.Translate('/vsimem/src.tif', os.path.join(self.testDataDir, 'raster', 'band3_byte_noct_epsg4326.tif'), options='-a_nodata none')
source = QgsRasterLayer('/vsimem/src.tif', 'my', 'gdal')
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(tmpName)
fw.setOutputFormat('gpkg')
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(provider.clone()))
self.assertEqual(fw.writeRaster(pipe,
provider.xSize() + 4,
provider.ySize() + 4,
QgsRectangle(-3 - 2, -4 - 2, 7 + 2, 6 + 2),
provider.crs()), 0)
del fw
# Check that the test geopackage contains the raster layer and compare
rlayer = QgsRasterLayer(tmpName)
self.assertTrue(rlayer.isValid())
out_provider = rlayer.dataProvider()
for i in range(3):
src_data = provider.block(i + 1, provider.extent(), source.width(), source.height())
out_data = out_provider.block(i + 1, provider.extent(), source.width(), source.height())
self.assertEqual(src_data.data(), out_data.data())
out_data = out_provider.block(1, QgsRectangle(7, -4, 7 + 2, 6), 2, 8)
# No nodata: defaults to zero
self.assertEqual(out_data.data().data(), b'\x00' * 2 * 8)
del out_provider
del rlayer
# remove result file
gdal.Unlink('/vsimem/src.tif')
os.unlink(tmpName)
def _testGeneratePyramids(self, pyramidFormat):
tmpName = tempfile.mktemp(suffix='.tif')
source = QgsRasterLayer(os.path.join(self.testDataDir, 'raster', 'byte.tif'), 'my', 'gdal')
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(tmpName)
fw.setBuildPyramidsFlag(QgsRaster.PyramidsFlagYes)
fw.setPyramidsFormat(pyramidFormat)
fw.setPyramidsList([2])
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(provider.clone()))
projector = QgsRasterProjector()
projector.setCrs(provider.crs(), provider.crs())
self.assertTrue(pipe.set(projector))
self.assertEqual(fw.writeRaster(pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs()), 0)
del fw
ds = gdal.Open(tmpName)
self.assertEqual(ds.RasterCount, 1)
self.assertEqual(ds.GetRasterBand(1).Checksum(), 4672)
self.assertEqual(ds.GetRasterBand(1).GetOverviewCount(), 1)
fl = ds.GetFileList()
if pyramidFormat == QgsRaster.PyramidsGTiff:
self.assertEqual(len(fl), 2, fl)
self.assertIn('.ovr', fl[1])
elif pyramidFormat == QgsRaster.PyramidsInternal:
self.assertEqual(len(fl), 1, fl)
elif pyramidFormat == QgsRaster.PyramidsErdas:
self.assertEqual(len(fl), 2, fl)
self.assertIn('.aux', fl[1])
os.unlink(tmpName)
def testGeneratePyramidsExternal(self):
return self._testGeneratePyramids(QgsRaster.PyramidsGTiff)
def testGeneratePyramidsInternal(self):
return self._testGeneratePyramids(QgsRaster.PyramidsInternal)
def testGeneratePyramidsErdas(self):
return self._testGeneratePyramids(QgsRaster.PyramidsErdas)
def testWriteAsRawInvalidOutputFile(self):
tmpName = "/this/is/invalid/file.tif"
source = QgsRasterLayer(os.path.join(self.testDataDir, 'raster', 'byte.tif'), 'my', 'gdal')
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(tmpName)
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(provider.clone()))
self.assertEqual(fw.writeRaster(pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs()), QgsRasterFileWriter.CreateDatasourceError)
del fw
def testWriteAsImage(self):
tmpName = tempfile.mktemp(suffix='.tif')
source = QgsRasterLayer(os.path.join(self.testDataDir, 'raster', 'byte.tif'), 'my', 'gdal')
source.setContrastEnhancement(algorithm=QgsContrastEnhancement.NoEnhancement)
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(tmpName)
self.assertEqual(fw.writeRaster(source.pipe(),
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs()), QgsRasterFileWriter.NoError)
ds = gdal.Open(tmpName)
self.assertEqual(ds.RasterCount, 4)
self.assertEqual(ds.GetRasterBand(1).Checksum(), 4672)
self.assertEqual(ds.GetRasterBand(2).Checksum(), 4672)
self.assertEqual(ds.GetRasterBand(3).Checksum(), 4672)
self.assertEqual(ds.GetRasterBand(4).Checksum(), 4873)
ds = None
del fw
os.unlink(tmpName)
def testWriteAsImageInvalidOutputPath(self):
tmpName = "/this/is/invalid/file.tif"
source = QgsRasterLayer(os.path.join(self.testDataDir, 'raster', 'byte.tif'), 'my', 'gdal')
source.setContrastEnhancement(algorithm=QgsContrastEnhancement.NoEnhancement)
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(tmpName)
self.assertEqual(fw.writeRaster(source.pipe(),
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs()), QgsRasterFileWriter.CreateDatasourceError)
del fw
def testWriteAsRawGS7BG(self):
''' Test that despite writing a Byte raster, we correctly handle GS7BG creating a Float64 '''
tmpName = tempfile.mktemp(suffix='.grd')
source = QgsRasterLayer(os.path.join(self.testDataDir, 'raster', 'byte.tif'), 'my', 'gdal')
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(tmpName)
fw.setOutputFormat('GS7BG')
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(provider.clone()))
self.assertEqual(fw.writeRaster(pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs()), QgsRasterFileWriter.NoError)
del fw
ds = gdal.Open(tmpName)
self.assertEqual(ds.RasterCount, 1)
self.assertEqual(ds.GetRasterBand(1).Checksum(), 4672)
ds = None
os.unlink(tmpName)
if __name__ == '__main__':
unittest.main()
|
pblottiere/QGIS
|
tests/src/python/test_qgsrasterfilewriter.py
|
Python
|
gpl-2.0
| 16,469
|
[
"NetCDF"
] |
123bc24c2283588a3fc629595c2071ef9f05e6693d0ac6037dc05855a157c7f3
|
# Copyright 2015 Allen Institute for Brain Science
# This file is part of Allen SDK.
#
# Allen SDK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Allen SDK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Allen SDK. If not, see <http://www.gnu.org/licenses/>.
import json, logging
from allensdk.api.queries.rma_api import RmaApi
class GlifApi(RmaApi):
def __init__(self, base_uri=None):
super(RmaApi, self).__init__(base_uri)
self.neuronal_model = None
self.ephys_sweeps = None
self.stimulus_url = None
self.neuron_config_url = None
def list_neuronal_models(self):
''' Query the API for a list of all GLIF neuronal models.
Returns
-------
list
Meta data for all GLIF neuronal models.
'''
include = "specimen(ephys_result),neuronal_model_template[name$il'*LIF*']"
return self.model_query('NeuronalModel', include=include, num_rows='all')
def get_neuronal_model(self, neuronal_model_id):
'''Query the current RMA endpoint with a neuronal_model id
to get the corresponding well known files and meta data.
Returns
-------
dict
A dictionary containing
'''
include = ( 'neuronal_model_template(well_known_files(well_known_file_type)),' +
'specimen(ephys_sweeps,ephys_result(well_known_files(well_known_file_type))),' +
'well_known_files(well_known_file_type)' )
criteria = "[id$eq%d]" % neuronal_model_id
self.neuronal_model = self.model_query('NeuronalModel',
criteria=criteria,
include=include,
num_rows='all')[0]
self.ephys_sweeps = None
self.neuron_config_url = None
self.stimulus_url = None
# sweeps come from the specimen
try:
specimen = self.neuronal_model['specimen']
self.ephys_sweeps = specimen['ephys_sweeps']
except Exception, e:
print e.message
self.ephys_sweeps = None
if self.ephys_sweeps is None:
logging.warning("Could not find ephys_sweeps for this model (%d)" % self.neuronal_model['id'])
# neuron config file comes from the neuronal model's well known files
try:
for wkf in self.neuronal_model['well_known_files']:
if wkf['path'].endswith('neuron_config.json'):
self.neuron_config_url = wkf['download_link']
break
except Exception, e:
self.neuron_config_url = None
if self.neuron_config_url is None:
logging.warning("Could not find neuron config well_known_file for this model (%d)" % self.neuronal_model['id'])
# NWB file comes from the ephys_result's well known files
try:
ephys_result = specimen['ephys_result']
for wkf in ephys_result['well_known_files']:
if wkf['well_known_file_type']['name'] == 'NWB':
self.stimulus_url = wkf['download_link']
break
except Exception, e:
self.stimulus_url = None
if self.stimulus_url is None:
logging.warning("Could not find stimulus well_known_file for this model (%d)" % self.neuronal_model['id'])
self.metadata = {
'neuron_config_url': self.neuron_config_url,
'stimulus_url': self.stimulus_url,
'ephys_sweeps': self.ephys_sweeps,
'neuronal_model': self.neuronal_model
}
return self.metadata
def get_ephys_sweeps(self):
''' Retrieve ephys sweep information out of downloaded metadata for a neuronal model
Returns
-------
list
A list of sweeps metadata dictionaries
'''
return self.ephys_sweeps
def get_neuron_config(self, output_file_name=None):
''' Retrieve a model configuration file from the API, optionally save it to disk, and
return the contents of that file as a dictionary.
Parameters
----------
output_file_name: string
File name to store the neuron configuration (optional).
'''
if self.neuron_config_url is None:
raise Exception("URL for neuron config file is empty.")
print self.api_url + self.neuron_config_url
neuron_config = self.retrieve_parsed_json_over_http(self.api_url + self.neuron_config_url)
if output_file_name:
with open(output_file_name, 'wb') as f:
f.write(json.dumps(neuron_config, indent=2))
return neuron_config
def cache_stimulus_file(self, output_file_name):
''' Download the NWB file for the current neuronal model and save it to a file.
Parameters
----------
output_file_name: string
File name to store the NWB file.
'''
if self.stimulus_url is None:
raise Exception("URL for stimulus file is empty.")
self.retrieve_file_over_http(self.api_url + self.metadata['stimulus_url'], output_file_name)
|
wvangeit/AllenSDK
|
allensdk/api/queries/glif_api.py
|
Python
|
gpl-3.0
| 5,717
|
[
"NEURON"
] |
cde7019aff424d37c939c8c36310d8dc5c602cec6e0ee6a70e8d11ba4859a710
|
#!/usr/bin/env python2
#
# Copyright 2015 Anthony Scemama
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file can be downloaded here:
# https://raw.githubusercontent.com/scemama/ninja_ocaml/master/ninja_ocaml.py
#
"""Build OCaml projects using ninja."""
__author__ = """Anthony Scemama <scemama@irsamc.ups-tlse.fr>"""
import os
import sys
import subprocess
def _help_ ():
print """
1) Download and install ninja :
https://github.com/martine/ninja/releases/latest
2) Copy the script into your OCaml project.
3) Run the script. It will build a default build.ninja file
4) Edit the build.ninja file
5) Compile the main target using `ninja`
6) Compile all the targets using `ninja all`
"""
def create_generated_ninja():
"""Creates the generated.ninja file"""
# Header
PACKAGES=""
THREAD=""
SYNTAX=""
OCAMLC_FLAGS=""
GENERATED_NINJA="generated.ninja"
with open('build.ninja','r') as f:
for line in f:
if line.startswith("PACKAGES"):
PACKAGES=line.split('=',1)[1].strip()
elif line.startswith("THREAD"):
THREAD=line.split('=',1)[1].strip()
elif line.startswith("SYNTAX"):
SYNTAX=line.split('=',1)[1].strip()
elif line.startswith("OCAMLC_FLAGS"):
OCAMLC_FLAGS=line.split('=',1)[1].strip()
elif line.startswith("LINK_FLAGS"):
LINK_FLAGS=line.split('=',1)[1].strip()
elif line.startswith("GENERATED_NINJA"):
GENERATED_NINJA=line.split('=',1)[1].strip()
if PACKAGES != "":
LINK_FLAGS = "-linkpkg "+PACKAGES
header = [
"""
########################################################
# This file was auto-generated. #
# This file will be overwritten. Don't edit this file! #
# Changes should be done in the build.ninja file. #
########################################################
""",
"PACKAGES=%s"%(PACKAGES),
"THREAD=%s"%(THREAD),
"SYNTAX=%s"%(SYNTAX),
"OCAMLC_FLAGS=%s"%(OCAMLC_FLAGS),
"LINK_FLAGS=%s"%(LINK_FLAGS),
"GENERATED_NINJA=%s"%(GENERATED_NINJA),
]
header += """
rule ocamlc
command = ocamlfind ocamlc -c $OCAMLC_FLAGS $THREAD $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (bytecode)
rule ocamlopt
command = ocamlfind ocamlopt -c $OCAMLC_FLAGS $THREAD $PACKAGES $SYNTAX -o $o $in
description = Compiling $out (native)
rule ocamlc_link
command = ocamlfind ocamlc $OCAMLC_FLAGS $THREAD $LINK_FLAGS $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (bytecode)
rule ocamlopt_link
command = ocamlfind ocamlopt $OCAMLC_FLAGS $THREAD $LINK_FLAGS $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (native)
""".splitlines()
# Get the list of .ml files
all_files = os.listdir(os.getcwd())
files = [ os.path.splitext(i)[0] for i in all_files if i.endswith('.ml') ]
while "myocamlbuild" in files:
files.remove("myocamlbuild")
ml_files = ' '.join( [ '%s.ml'%i for i in files ] )
# Dependencies
result = subprocess.Popen(
("ocamlfind ocamldep {0} {1} {2}".format(PACKAGES,SYNTAX,ml_files)).split()
,stdout=subprocess.PIPE).communicate()[0]
result = result.replace('\\\n',' ')
dependencies = {}
for line in result.splitlines():
key, value = line.split(':')
dependencies[key.strip()] = value.strip()
result = header
template = """
build {0}.cmi: ocamlc {0}.mli | $GENERATED_NINJA
build {0}.cmo: ocamlc {0}.ml | $GENERATED_NINJA {1}
build {0}.cmx {0}.o: ocamlopt {0}.ml | $GENERATED_NINJA {2}
o = {0}.o
"""
template_root_byte = """
build {2}.byte: ocamlc_link {1} {0}
"""
template_root_native = """
build {2}: ocamlopt_link {1} {0}
"""
# Find roots
dep = {}
for f in dependencies:
dep[f] = [ i.strip() for i in dependencies[f].split() ]
roots = {}
for f in dependencies:
Found = False
for g,l in dep.iteritems():
if f in l:
Found = True
if not Found:
roots[f] = []
def get_deps(l):
result = []
for i in l:
if i in dep:
result += get_deps(dep[i])
result += l
newresult = []
for r in result:
if r not in newresult:
newresult.append(r)
return newresult
for r in roots:
roots[r] = [ i for i in get_deps(dep[r]) if not i.endswith(".cmi") ]
# Write the $GENERATED_NINJA file
result += [ template.format(basename,
dependencies["%s.cmo"%basename],
dependencies["%s.cmx"%basename]
) for basename in files ]
result += [ template_root_byte.format(basename,
' '.join(roots[basename]),
os.path.splitext(basename)[0]
) for basename in roots if basename.endswith('.cmo')]
result += [ template_root_native.format(basename,
' '.join(roots[basename]),
os.path.splitext(basename)[0]
) for basename in roots if basename.endswith('.cmx')]
output = '\n'.join(result)
try:
with open(GENERATED_NINJA,'r') as f:
inp = f.read()
except IOError:
inp = ""
if inp != output:
with open(GENERATED_NINJA,'w') as f:
f.write(output)
def create_build_ninja ():
with open('build.ninja','w') as f:
f.write("""
MAIN=
# Main program to build
PACKAGES=
# Required opam packages, for example:
# PACKAGES=-package core
THREAD=
# If you need threding support, use:
# THREAD=-thread
SYNTAX=
# If you need pre-processing, use:
# SYNTAX=-syntax camlp4o
OCAMLC_FLAGS=
# Flags to give to ocamlc, for example:
# OCAMLC_FLAGS=-g -warn-error A
LINK_FLAGS=
# Flags to give to the linker, for example:
# LINK_FLAGS=-cclib '-Wl,-rpath=../lib,--enable-new-dtags'
GENERATED_NINJA=generated.ninja
# Name of the auto-generated ninja file
rule create_generated
command = python2 ./ninja_ocaml.py
description = Finding dependencies between modules
rule run_ninja
command = ninja -f $in $target
description = Compiling OCaml executables
pool = console
rule run_clean
command = ninja -f $GENERATED_NINJA -t clean ; rm $GENERATED_NINJA
pool = console
description = Cleaning directory
rule ocamlc
command = ocamlfind ocamlc -c $OCAMLC_FLAGS $THREAD $PACKAGES $SYNTAX -o $out $in
description = Compiling $in (bytecode)
rule ocamlopt
command = ocamlfind ocamlopt -c $OCAMLC_FLAGS $THREAD $PACKAGES $SYNTAX -o $out $in
description = Compiling $in (native)
rule ocamlc_link
command = ocamlfind ocamlc $OCAMLC_FLAGS $THREAD $LINK_FLAGS $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (bytecode)
rule ocamlopt_link
command = ocamlfind ocamlopt $OCAMLC_FLAGS $THREAD $LINK_FLAGS $PACKAGES $SYNTAX -o $out $in
description = Compiling $out (native)
build clean: run_clean
build always $GENERATED_NINJA: create_generated
build $MAIN: run_ninja $GENERATED_NINJA
target = $MAIN
build all: run_ninja $GENERATED_NINJA
target =
default $MAIN
""")
def main():
for h in "help -h -help --help ?".split():
if h in sys.argv:
_help_ ()
return
if "build.ninja" in os.listdir(os.getcwd()):
create_generated_ninja ()
else:
create_build_ninja ()
print """
==========================================================
A default build.ninja file was created.
Now, edit build.ninja and compile your project using:
ninja
==========================================================
"""
if __name__ == '__main__':
main()
|
scemama/qmcchem
|
ocaml/ninja_ocaml.py
|
Python
|
gpl-2.0
| 8,180
|
[
"cclib"
] |
94576f5b52605daee284cfebd037834353add738377a9ff5c2d70e0d73333204
|
#!/usr/bin/env python3
# -*- encoding: <utf-8> -*-
# Sprax Lines 2017.04.01 Written with Python 3.5
# sum_file.py (2019.06) based on test_filters.py (2017.04.01)
# To re-import a library in Python 3.4+ (re-import), do:
# import importlib
# importlib.reload(nameOfModule)
'''selected text-filtering functions, e.g. for git blame output'''
import argparse
import heapq
import os.path
import re
import math
import pdb
from pdb import set_trace
import string
import sys
import text_ops
from utf_print import utf_print
import inflection
from collections import defaultdict
# from collections import Counter
###############################################################################
TRANS_NO_WHAT = str.maketrans(u"\u2018\u2019\u201c\u201d", "\'\'\"\"")
TRANS_NO_SMART = str.maketrans("\x91\x92\x93\x94", "''\"\"")
TRANS_NO_PUNCT = str.maketrans('', '', string.punctuation)
TRANS_NO_DIGITS = str.maketrans('', '', string.digits)
UNICODE_TO_ASCII = str.maketrans({
u"\u2018" : "'",
u"\u2019" : "'",
u"\u201c" : '"',
u"\u201d" : '"',
})
ISO_TO_ASCII = str.maketrans({
"`" : "'",
u"\x91" : "'",
u"\x92" : "'",
u"\x93" : '"',
u"\x94" : '"',
u"\x97" : '--',
u"\xf0" : '-',
})
def translate_smart_quotes(in_str, table=TRANS_NO_SMART):
'''Replace curly quotes with straight ones.'''
return in_str.translate(table)
def translate_iso_to_ascii(in_str):
'''Replace curly quotes with straight ones, etc.'''
return in_str.translate(ISO_TO_ASCII)
def remove_punctuation(in_str, table=TRANS_NO_PUNCT):
'''Remove all string.punctuation characters.'''
return in_str.translate(table)
def replace_quotes(instr):
'''Replace curly quotes one-by-one (slow)'''
return instr.replace("\x91", "'").replace("\x92", "'")\
.replace("\x93", '"').replace("\x94", '"')
def replace_emdashes(in_str):
'''Replace each em-dash with two hyphens (--).'''
return in_str.replace("\x97", "--")
#TODO: if really bored, implement reverse_trans for each class
class IsoToAscii:
'''Translate non-ASCII characters to ASCII or nothing'''
translation = ISO_TO_ASCII
def translate(self, in_str):
'''Translate non-ASCII characters to ASCII or nothing'''
try:
return in_str.encode('ascii')
except UnicodeEncodeError:
out = in_str.translate(self.translation)
return ''.join([asc for asc in out if ord(asc) < 128])
class NoSpaceBeforePunct:
'''Eliminate spaces before punctuation'''
regex = re.compile(r' ([!%,./:;?])')
def translate(self, in_str):
'''Eliminate spaces before punctuation'''
result = re.sub(r'\s+', ' ', in_str)
return self.regex.sub(r'\1', result)
class TwoSingleQuoteToDoubleQuote:
'''Translate two single-quotes to one double-quote marker'''
regex = re.compile(" ''([ !\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]|$)")
def translate(self, in_str):
'''Translate two single-quotes to one double-quote marker'''
return self.regex.sub(r' "\1', in_str)
class JoinContractions:
'''Rejoin tokenized contractions.'''
regex = re.compile(r"\b(.*) (n't|'s) ")
def translate(self, in_str):
'''Rejoin tokenized contractions.'''
return self.regex.sub(r"\1\2 ", in_str)
class JoinPossessive:
'''Rejoin tokenized word and possive apostrophe marker'''
regex = re.compile(" ' ")
def translate(self, in_str):
'''Rejoin tokenized word and possive apostrophe marker'''
return self.regex.sub(r"' ", in_str)
class JoinQuoted:
'''Rejoin quatation marks with the text they quote'''
regex = re.compile(r"([\"']) ((?:\\\1|.)*?) \1")
def translate(self, in_str):
'''Rejoin quatation marks with the text they quote'''
return self.regex.sub(r"\1\2\1", in_str)
def filter_non_ascii(in_str):
'''deprecated because 'filter'''
return "".join(filter(lambda x: ord(x) < 128, in_str))
def remove_non_ascii(in_str):
'''filter out non-ASCII characters'''
return "".join(i for i in in_str if ord(i) < 128)
def translate_to_ascii(in_str):
'''try to translate any text to ASCII'''
try:
return translate_iso_to_ascii(in_str)
except UnicodeDecodeError:
return in_str
###############################################################################
def read_lines_to_ascii(file_spec, charset='utf-8'):
'''read and return all lines of a text file as a list of ASCII str'''
with open(file_spec, 'r', encoding=charset) as text:
for line in text:
# utf_print(line.rstrip())
line = line.decode(encoding=charset, errors='ignore')
# .encode('ascii', errors='ignore')
# line = str(line, charset, errors='ignore')
# .encode('ascii', errors='ignore')
yield line.rstrip()
def utf_print_words(fspec):
'''print each word in an ASCII or UTF-8 encoded text'''
with open(fspec, 'r', encoding="utf8") as text:
for line in text:
words = re.split(r'\W+', line.rstrip())
for word in words:
if len(word) > 0:
utf_print(word)
utf_print(words)
def rank_dict_by_value(summary_count, ranking):
'''Return the highest ranked N dictionary entries.'''
return heapq.nlargest(summary_count, ranking, key=ranking.get)
def resolve_count(sub_count, percent, total_count):
'''returns reconciled sub-count and percentage of total, where count trumps percentage'''
if not sub_count:
sub_count = int(math.ceil(percent * total_count / 100.0))
if sub_count > total_count:
sub_count = total_count
if sub_count < 1:
sub_count = 1
percent = sub_count * 100.0 / total_count
return sub_count, percent
def map_file(function, in_path, out_path, charset='utf8'):
'''Apply function to every line in the input file'''
with open(in_path, 'r', encoding=charset) as text:
with open(out_path, 'w') as out_file:
for line in text:
output = function(line.rstrip())
if output:
print(output, file=out_file)
def translate_para_file(para_filter, in_path, out_path, charset='utf8'):
'''Generator yielding filtered paragraphs from a text file'''
with open(in_path, 'r', encoding=charset) as text:
with open(out_path, 'w') as out_file:
for para in text_ops.paragraph_iter(text):
output = para_filter.filter_line(para)
print(output if output else ' ', file=out_file)
def translate_lines_in_file(line_translators, in_path, out_path, charset='utf8'):
'''
Translate input line by line to output file.
Usage: translate_lines_in_file(line_translators, in_path, out_path, charset='utf8')
'''
with open(in_path, 'r', encoding=charset) as text:
with (sys.stdout if out_path == '-' else open(out_path, 'w')) as out_file:
for line in text:
for translator in line_translators:
line = translator.translate(line)
if line:
print(line, file=out_file)
########################################################
def translate_file(in_path, out_path, opt):
"""Rewrite a text file."""
if opt.git_sum:
return add_git_sums_to_file(in_path, out_path, opt.charset)
# Announce output:
print(in_path, '====>', '<stdout>' if out_path == '-' else out_path)
print('-------------------------------------------------------------------')
translators = [IsoToAscii(),
JoinContractions(),
NoSpaceBeforePunct(),
TwoSingleQuoteToDoubleQuote(),
JoinPossessive(),
JoinQuoted()]
translate_lines_in_file(translators, in_path, out_path, opt.charset)
###############################################################################
def pluralize(word):
'''
Return the plural form of the given word.
TODO: Check that word is a noun (or an adjective or at any rate can
be sensibly used as a noun) before calling inflection.pluralize?
If not, return (word, false)
FIXME BUGS: inflection is often wrong, e.g. (safe <-> saves)
'''
if word.lower()[-3:] == 'afe':
return word + 's'
return inflection.pluralize(word)
def singularize(word):
'''
Return the singular form of the given word.
TODO: Check that word is a noun (or an adjective or at any rate can
be sensibly used as a noun) before calling inflection.singularize?
FIXME BUGS: inflection returns many wrong answers by pattern:
*aves -> *afe
It uses incomplete special case matching (octopus),
and does not recognize many other pairs such as:
(locus, loci)
NB: pattern3.en is not yet functional (2017.07.10)
'''
if word.lower()[-4:] == 'aves':
return word.rstrip('sS')
return inflection.singularize(word)
def plural_if_diff(word):
'''return the plural form of word if different from the singular, else None'''
plur = pluralize(word)
sing = singularize(word)
return plur if plur != sing else None
def singular_if_diff(word):
'''return the singular form of word if different from the plural, else None'''
plur = pluralize(word)
sing = singularize(word)
return sing if plur != sing else None
###############################################################################
def abs_path(dir_spec, file_spec):
'''Returns an absolute path based on a dir_spec and a (relative) file_spec'''
if os.path.isabs(file_spec):
return file_spec
return os.path.join(dir_spec, file_spec)
########################################################
AUTHOR_NAME = {
'dav' : 'DMSJ',
'dms' : 'DMSJ',
'spr' : 'Sprax',
'syl' : 'Syler',
'5yl' : 'Syler',
'dun' : 'Duncan',
'alb' : 'Albert',
'jay' : 'JayMW',
'mit' : 'MHebert',
'ter' : 'T2',
'dex' : 'Dexai',
}
AUTHOR_NAME = defaultdict(lambda:"OTHER", AUTHOR_NAME)
def print_author_count(ddct, out_file=sys.stdout, prefix="git_blames"):
'''Print the author_count dict to stdout (default) or a file'''
total = 0.0
for _, val in ddct.items():
total += val
for key, val in sorted(ddct.items(), key=lambda dit: dit[1], reverse=True):
# author = AUTHOR_NAME[key]
author = key
percent = val * 100.0 / total
print("%s: %14s: %6d %7.2f%%" % (prefix, author, val, percent), file=out_file)
def add_git_sums_to_file(in_path, out_path, charset='utf8'):
'''
Translate input line by line to output file.
Usage: translate_lines_in_file(line_translators, in_path, out_path, charset='utf8')
'''
print("GIT SUM: in/out: %s -> %s" % (in_path, out_path))
out_of_sum = True
with open(in_path, 'r', encoding=charset) as text:
with (sys.stdout if out_path == '-' else open(out_path, 'w')) as out_file:
total_counts = defaultdict(int)
for line in text:
# set_trace()
line = line.rstrip()
toks = re.split(r'\W+', line.lstrip())
if len(toks) > 2 and toks[1] == 'author':
author = toks[2]
count = int(toks[0]) # TODO: check if parseint succeeds
# print("================= %s => %d" % (author, count))
if out_of_sum:
out_of_sum = False
author_count = defaultdict(int)
author_count[author] += count
total_counts[author] += count
else:
if not out_of_sum:
print_author_count(author_count, out_file=out_file)
out_of_sum = True
elif line:
print(line, file=out_file)
print_author_count(total_counts, out_file=out_file, prefix="GIT_TOTALS")
def sum_text_file():
'''Filter lines or sentences in a text file.'''
parser = argparse.ArgumentParser(
# usage='%(prog)s [options]',
description="test text_filters")
parser.add_argument('in_path', type=str, nargs='?', default='train_1000.label',
help='file containing text to filter')
parser.add_argument('-dir', dest='text_dir', type=str, default='/Users/sprax/Text',
help='directory to search for in_path')
parser.add_argument('-charset', dest='charset', type=str, default='iso-8859-1',
help='charset encoding of input text')
parser.add_argument('-git_sum', action='store_true',
help='output original file with added git blame summaries')
parser.add_argument('-list_numbers', action='store_true',
help='output list number for each filtered sentence')
parser.add_argument('-map_file', action='store_true',
help='test map_file')
parser.add_argument('-number', dest='max_lines', type=int, nargs='?', const=1, default=0,
help='number of sentences to keep (default: 5), overrides -percent')
parser.add_argument('-out_path', type=str, nargs='?', default='-',
help='output path for filtered text (default: - <stdout>)')
parser.add_argument('-truncate', dest='max_words', type=int, nargs='?',
const=8, default=0,
help='truncate sentences after MAX words (default: INT_MAX)')
parser.add_argument('-verbose', type=int, nargs='?', const=1, default=1,
help='verbosity of output (default: 1)')
args = parser.parse_args()
if args.map_file:
# map_file(singular_if_diff, args.in_path, args.out_path)
map_file(singularize, args.in_path, args.out_path)
exit(0)
if args.verbose > 7:
print("out_path: <{}>".format(args.out_path))
print("args:", args)
print(__doc__)
exit(0)
in_path = abs_path(args.text_dir, args.in_path)
out_path = args.out_path
if out_path != '-':
out_path = abs_path(args.text_dir, args.out_path)
translate_file(in_path, out_path, args)
if __name__ == '__main__':
sum_text_file()
|
sprax/python
|
txt/sum_file.py
|
Python
|
lgpl-3.0
| 14,334
|
[
"Octopus"
] |
6bce79a0982a4897af1ba7b0de1299627f6df03029dea62e9f738c147419195e
|
# Copyright (C) 2012,2013, 2017
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.interaction.VSphereSelf
**********************************
This class provides methods to compute forces and energies of
the VSphereSelf potential.
.. math::
U = e_1\left(\frac{4}{3}\pi \sigma^2\right)^{\frac{3}{2}}
+ \frac{a_1 {N_b}^3}{\sigma^6}
+ \frac{a_2}{N_b} \sigma^2
Reference: Flactuating soft-sphere approach to coars-graining of polymer melts, Soft matter, 2010, 6, 2282
.. function:: espressopp.interaction.VSphereSelf(e1, a1, a2, Nb, cutoff, shift)
:param e1: (default: 0.0)
:param a1: (default: 1.0)
:param a2: (default: 0.0)
:param Nb: (default: 1)
:param cutoff: (default: infinity)
:param shift: (default: 0.0)
:type e1: real
:type a1: real
:type a2: real
:type Nb: int
:type cutoff:
:type shift: real
.. function:: espressopp.interaction.SelfVSphere(system, potential)
:param system:
:param potential:
:type system:
:type potential:
.. function:: espressopp.interaction.SelfVSphere.getPotential()
:rtype:
.. function:: espressopp.interaction.SelfVSphere.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_VSphereSelf, interaction_SelfVSphere
class VSphereSelfLocal(PotentialLocal, interaction_VSphereSelf):
def __init__(self, e1=0.0, a1=1.0, a2=0.0, Nb=1,
cutoff=infinity, shift=0.0):
"""Initialize the local VSphere object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift == "auto":
cxxinit(self, interaction_VSphereSelf, e1, a1, a2, Nb, cutoff)
else:
cxxinit(self, interaction_VSphereSelf, e1, a1, a2, Nb, cutoff, shift)
class SelfVSphereLocal(InteractionLocal, interaction_SelfVSphere):
def __init__(self, system, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_SelfVSphere, system, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getPotential(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self)
if pmi.isController:
class VSphereSelf(Potential):
'The VSphereSelf potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.VSphereSelfLocal',
pmiproperty = ['e1', 'a1', 'a2', 'Nb']
)
class SelfVSphere(Interaction, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.interaction.SelfVSphereLocal',
pmicall = ['setPotential','getPotential']
)
|
espressopp/espressopp
|
src/interaction/VSphereSelf.py
|
Python
|
gpl-3.0
| 4,263
|
[
"ESPResSo"
] |
9914bd3204ab19b3b845a2112027235036b7f6f742b3f24aec67032e12ab4a94
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Firefly documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 21 11:32:55 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Firefly'
copyright = '2017, rorodata'
author = 'rorodata'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fireflydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Firefly.tex', 'Firefly Documentation',
'rorodata', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'firefly', 'Firefly Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Firefly', 'Firefly Documentation',
author, 'Firefly', 'One line description of project.',
'Miscellaneous'),
]
|
rorodata/firefly
|
docs/conf.py
|
Python
|
apache-2.0
| 4,698
|
[
"Firefly"
] |
1762dea7e4185690ae4c0431df69a1cfa4f289e3bed8201f4449896033b5a3f6
|
"""
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
|
RPGOne/Skynet
|
scikit-learn-0.18.1/examples/gaussian_process/plot_gpr_co2.py
|
Python
|
bsd-3-clause
| 5,705
|
[
"Gaussian"
] |
c1a1b82efa8e6142b669a3b5c40b9032ea0c76598fd3514dc70c81a3a633d0f3
|
'''
Utilities for running the external Pairwise Homogenization Algorithm (PHA) software:
Menne, M.J., and C.N. Williams, Jr., 2009: Homogenization of temperature series
via pairwise comparisons. J. Climate, 22, 1700-1717.
Copyright 2014, Jared Oyler.
This file is part of TopoWx.
TopoWx is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TopoWx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TopoWx. If not, see <http://www.gnu.org/licenses/>.
'''
__all__ = ['setup_pha', 'run_pha', 'HomogDaily', 'InsertHomog',
'load_snotel_sensor_hist', 'get_pha_adj_df']
from datetime import datetime
from dateutil.relativedelta import relativedelta
from twx.db import LON, LAT, STN_ID, ELEV, STATE, STN_NAME, MISSING, DTYPE_STNOBS
from twx.utils import get_mth_metadata, YEAR, MONTH, YMD, DATE, DAY
import glob
import numpy as np
import os
import pandas as pd
import subprocess
import twx
INCL_LINE_BEGIN_YR = ' parameter (begyr = 1895)\n'
INCL_LINE_END_YR = ' parameter (endyr = 2015)\n'
INCL_LINE_N_STNS = ' parameter (maxstns = 7720)\n'
CONF_LINE_END_YR = 'endyr=1999\n'
CONF_LINE_MAX_YRS = 'maxyrs=500000\n'
CONF_LINE_ELEMS = 'elems="tavg"\n'
DTYPE_PHA_ADJ = [(STN_ID, "<S50"), ('ymd_start',np.int),('ymd_end',np.int),('adj', np.float64)]
def setup_pha(fpath_pha_tar, path_out_src, path_out_run, yr_begin, yr_end, stns,
tair, varname, stnhist=None):
'''
Perform setup for running external Pairwise Homogenization Algorithm (PHA) software
from NCDC. Setup has been tested against PHA v52i downloaded from
ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v3/software/. Reference:
Menne, M.J., and C.N. Williams, Jr., 2009: Homogenization of temperature series
via pairwise comparisons. J. Climate, 22, 1700-1717.
Parameters
----------
fpath_pha_tar : str
File path to the main PHA tar.gz file (eg phav52i.tar.gz) downloaded from NCDC
path_out_src : str
File path to where PHA source code should be written.
path_out_run : str
File path where PHA will be executed
yr_begin : int
The start year for the PHA run
yr_end : int
The end year for the PHA run
stns : structured ndarray
Stations for which to run PHA. Structured station array must contain at
least the following fields: STN_ID, LAT, LON and can be obtained from
twx.db.StationDataDb
tair : MaskedArray
A 2-D numpy MaskedArray of monthly temperature observations of shape P*N
where P is the number of months between yr_begin and yr_end and N is the
number of stations. Each column is a station's time series and must be in
same order as stns.
varname : str
Temperature variable name (tmin or tmax)
stnhist : pandas.DataFrame, optional
DataFrame of station history metadata: station_id as index and
change_date column specifying the date of a station change.
'''
n_stns = stns.size
n_stnyrs = (yr_end - yr_begin + 1) * n_stns
yrs = np.arange(yr_begin, yr_end + 1)
print "Uncompressing PHA..."
subprocess.call(["tar", "-xzvf", fpath_pha_tar, '-C', path_out_src])
fpath_incl = os.path.join(path_out_src, 'phav52i', 'source_expand',
'parm_includes', 'inhomog.parm.MTHLY.TEST.incl')
f_incl = open(fpath_incl, 'r')
incl_lines = f_incl.readlines()
f_incl.close()
for x in np.arange(len(incl_lines)):
a_line = incl_lines[x]
if a_line == INCL_LINE_BEGIN_YR:
a_line = a_line.replace('1895', "%d" % (yr_begin,))
incl_lines[x] = a_line
elif a_line == INCL_LINE_END_YR:
a_line = a_line.replace('2015', "%d" % (yr_end,))
incl_lines[x] = a_line
elif a_line == INCL_LINE_N_STNS:
a_line = a_line.replace('7720', "%d" % (n_stns,))
incl_lines[x] = a_line
f_incl = open(fpath_incl, 'w')
f_incl.writelines(incl_lines)
f_incl.close()
path_src = os.path.join(path_out_src, 'phav52i')
os.chdir(path_src)
print "Compiling PHA..."
subprocess.call(['make', 'install', 'INSTALLDIR=%s' % (path_out_run,)])
_write_conf(os.path.join(path_out_run, "world1.conf"), yr_end, n_stnyrs, varname)
_write_conf(os.path.join(path_out_run, "data", "world1.conf"), yr_end, n_stnyrs, varname)
print "Writing input station data ASCII files..."
_write_input_station_data(path_out_run, varname, stns, tair, yrs, stnhist)
def run_pha(path_run, varname):
'''
Run a PHA instance
Parameters
----------
path_run : str
The PHA run path from setup_pha
varname : str
Temperature variable name (tmin or tmax)
'''
pha_cmd = 'bash ' + os.path.join(path_run, 'testv52i-pha.sh') + " world1 %s raw 0 0 P" % (varname,)
print "Running PHA for %s..." % (varname,)
subprocess.call(pha_cmd, shell=True)
path_log = os.path.join(path_run,'data','benchmark','world1','output','PHAv52i.FAST.MLY.TEST.*.%s.world1.r00.out.gz'%(varname,))
path_out_log = os.path.join(path_run,'data','benchmark','world1','output','pha_adj_%s.log'%(varname,))
print "Writing log of PHA adjustments: "+path_out_log
cmd = " ".join(["zgrep 'Adj write'",path_log,">",path_out_log])
subprocess.call(cmd,shell=True)
class HomogDaily():
'''
Class for homogenizing daily station data based on monthly homogenization
results from a PHA run.
'''
def __init__(self,stnda,path_pha_run,varname):
'''
Parameters
----------
stnda : twx.db.StationDataDb
A StationDataDb object pointing to the daily netCDF database
that was used as input to the PHA run
path_run : str
The PHA run path from setup_pha
varname : str
Temperature variable name (tmin or tmax)
'''
self.stnda = stnda
self.varname = varname
self.mthly_data = np.ma.masked_invalid(self.stnda.xrds['_'.join([varname, 'mth'])][:].values)
self.miss_data = self.stnda.ds.variables['_'.join([varname,'mthmiss'])][:]
path_adj_log = os.path.join(path_pha_run,'data','benchmark','world1','output','pha_adj_%s.log'%(varname,))
self.pha_adjs = _parse_pha_adj(path_adj_log)
self.path_FLs_data = os.path.join(path_pha_run,'data','benchmark','world1','monthly','FLs.r00')
self.mths = get_mth_metadata(self.stnda.days[YEAR][0], self.stnda.days[YEAR][-1])
self.dly_yrmth_masks = []
yrs = np.unique(self.stnda.days[YEAR])
for yr in yrs:
for mth in np.arange(1,13):
self.dly_yrmth_masks.append(np.logical_and(stnda.days[YEAR]==yr,stnda.days[MONTH]==mth))
self.ndays_per_mth = np.zeros(len(self.dly_yrmth_masks))
for x in np.arange(self.ndays_per_mth.size):
self.ndays_per_mth[x] = np.sum(self.dly_yrmth_masks[x])
self.mthly_yr_masks = {}
for yr in yrs:
self.mthly_yr_masks[yr] = self.mths[YEAR] == yr
def homog_stn(self,stn_id):
'''
Build time series of homogenized daily temperature observations for a station
Parameters
----------
stn_id : str
The ID of the station for which to build the homogenized time series
Returns
-------
dly_vals_homog : ndarray
The homogenized time series of daily temperature observations for the station.
'''
fstn_id = _format_stnid(stn_id)
file_homog_mth = open(os.path.join(self.path_FLs_data,"%s.FLs.r00.%s"%(fstn_id,self.varname)))
mthvals_homog = np.ones(self.mths.size,dtype=np.float)*-9999
for aline in file_homog_mth.readlines():
yr = int(aline[12:17])
yrmthvals = np.array([aline[17:17+5],aline[26:26+5],aline[35:35+5],aline[44:44+5],
aline[53:53+5],aline[62:62+5],aline[71:71+5],aline[80:80+5],
aline[89:89+5],aline[98:98+5],aline[107:107+5],aline[116:116+5]],dtype=np.float)
mthvals_homog[self.mthly_yr_masks[yr]] = yrmthvals
mthvals_homog = np.ma.masked_array(mthvals_homog,mthvals_homog==-9999)
mthvals_homog = mthvals_homog/100.0
mthvals_homog = np.round(mthvals_homog,2)
dly_vals = self.stnda.load_all_stn_obs_var(stn_id,self.varname)[0].astype(np.float64)
dly_vals = np.ma.masked_array(dly_vals,np.isnan(dly_vals))
mth_vals = np.ma.round(self.mthly_data[:,self.stnda.stn_idxs[stn_id]].astype(np.float64),2)
miss_cnts = self.miss_data[:,self.stnda.stn_idxs[stn_id]]
dly_vals_homog = dly_vals.copy()
stn_pha_adj = self.pha_adjs[self.pha_adjs[STN_ID]==fstn_id]
stn_pha_adj = stn_pha_adj[np.argsort(stn_pha_adj['ymd_start'])]
dif_cnt = 0
for x in np.arange(mth_vals.size):
if not np.ma.is_masked(mth_vals[x]) and not np.ma.is_masked(mthvals_homog[x]):
if mth_vals[x] != mthvals_homog[x]:
delta = mthvals_homog[x] - mth_vals[x]
dly_vals_homog[self.dly_yrmth_masks[x]] = dly_vals_homog[self.dly_yrmth_masks[x]] + delta
dif_cnt+=1
elif miss_cnts[x] < self.ndays_per_mth[x] and not np.ma.is_masked(mthvals_homog[x]):
ymd = self.mths[YMD][x]
if ymd < stn_pha_adj['ymd_start'][0]:
#before all change points. assume it falls under the earliest change point
delta = -stn_pha_adj['adj'][0]
else:
mask_adj = np.logical_and(stn_pha_adj['ymd_start'] <= ymd, stn_pha_adj['ymd_end'] >= ymd)
sum_mask = np.sum(mask_adj)
if sum_mask == 0:
#don't do anything. past the last change point which is theoretically 0
delta = 0
elif sum_mask == 1:
delta = -stn_pha_adj[mask_adj]['adj'][0]
else:
raise Exception("Falls within more than one change point")
dly_vals_homog[self.dly_yrmth_masks[x]] = dly_vals_homog[self.dly_yrmth_masks[x]] + np.round(delta,2)
return dly_vals_homog
class InsertHomog(twx.db.Insert):
'''
Class for inserting stations and observations that have been homogenized.
Loads stations and observations from current netCDF
station dataset and performs homogenization adjustments.
'''
def __init__(self,stnda,homog_dly_tmin,homog_dly_tmax,path_pha_run_tmin,path_pha_run_tmax):
'''
Parameters
----------
stnda : twx.db.StationDataDb
A StationDataDb object pointing to the daily netCDF database
that was used as input to the PHA run
homog_dly_tmin : HomogDaily
A HomogDaily object for homogenizing daily Tmin observations
homog_dly_tmax : HomogDaily
A HomogDaily object for homogenizing daily Tmax observations
path_pha_run_tmin : str
The PHA run path from setup_pha for Tmin
path_pha_run_tmax : str
The PHA run path from setup_pha for Tmax
'''
twx.db.Insert.__init__(self,stnda.days[DATE][0],stnda.days[DATE][-1])
self.homog_tmin = homog_dly_tmin
self.homog_tmax = homog_dly_tmax
self.stnda = stnda
#Get stn_ids for which homogenization could not be conducted
unuse_tmin_ids = self.__load_input_not_stnlist(path_pha_run_tmin)
unuse_tmax_ids = self.__load_input_not_stnlist(path_pha_run_tmax)
fmt_ids = np.array([_format_stnid(stnid) for stnid in stnda.stn_ids])
mask_stns_tmin = ~np.in1d(fmt_ids, unuse_tmin_ids, True)
mask_stns_tmax = ~np.in1d(fmt_ids, unuse_tmax_ids, True)
self.stns_tmin = stnda.stns[mask_stns_tmin]
self.stns_tmax = stnda.stns[mask_stns_tmax]
uniq_ids = np.unique(np.concatenate((self.stns_tmin[STN_ID],self.stns_tmax[STN_ID])))
self.stns_all = stnda.stns[np.in1d(stnda.stn_ids, uniq_ids, True)]
self.stn_list = [(stn[STN_ID],stn[LAT],stn[LON],stn[ELEV],stn[STATE],stn[STN_NAME]) for stn in self.stns_all]
self.empty_obs = np.ones(stnda.days.size)*MISSING
self.empty_qa = np.zeros(stnda.days.size,dtype=np.str)
def __load_input_not_stnlist(self,run_path):
fpath_corr = os.path.join(run_path,'data','benchmark','world1','corr')
fnames = np.array(os.listdir(fpath_corr))
fnames_input_not_stnlist = fnames[np.char.endswith(fnames,'input_not_stnlist')]
stnids_all = []
for a_fname in fnames_input_not_stnlist:
fname_input_not_stnlist = os.path.join(fpath_corr,a_fname)
stn_ids = np.atleast_1d(np.loadtxt(fname_input_not_stnlist, dtype=np.str, usecols=[0]))
stnids_all.extend(stn_ids)
stnids_all = np.sort(np.array(stnids_all))
return stnids_all
def get_stns(self):
return self.stn_list
def parse_stn_obs(self,stn_id):
if stn_id in self.stns_tmin[STN_ID]:
tmin_homog = self.homog_tmin.homog_stn(stn_id)
tmin_homog = np.ma.filled(tmin_homog, MISSING)
else:
tmin_homog = self.empty_obs
if stn_id in self.stns_tmax[STN_ID]:
tmax_homog = self.homog_tmax.homog_stn(stn_id)
tmax_homog = np.ma.filled(tmax_homog, MISSING)
else:
tmax_homog = self.empty_obs
obs = np.empty(self.stnda.days.size, dtype=DTYPE_STNOBS)
obs['year'] = self.stnda.days[YEAR]
obs['month'] = self.stnda.days[MONTH]
obs['day'] = self.stnda.days[DAY]
obs['ymd'] = self.stnda.days[YMD]
obs['tmin'] = tmin_homog
obs['tmax'] = tmax_homog
obs['prcp'] = self.empty_obs
obs['swe'] = self.empty_obs
obs['qflag_tmin'] = self.empty_qa
obs['qflag_tmax'] = self.empty_qa
obs['qflag_prcp'] = self.empty_qa
return obs
def _write_conf(fpath_conf, endyr, n_stnyrs, varname):
'''
Write an updated PHA configuration file
Menne, M.J., and C.N. Williams, Jr., 2009: Homogenization of temperature series
via pairwise comparisons. J. Climate, 22, 1700-1717.
Parameters
----------
fpath_conf : str
File path to the existing PHA config file (eg world1.conf)
endyr : int
Last year to run PHA
n_stnyrs : int
Number of station years (# of stations * # of years)
varname : str
Temperature variable name (tmin or tmax)
'''
f_conf = open(fpath_conf, 'r')
conf_lines = f_conf.readlines()
f_conf.close()
for x in np.arange(len(conf_lines)):
a_line = conf_lines[x]
if a_line == CONF_LINE_END_YR:
a_line = a_line.replace('1999', "%d" % (endyr,))
conf_lines[x] = a_line
elif a_line == CONF_LINE_MAX_YRS:
a_line = a_line.replace('500000', "%d" % (n_stnyrs,))
conf_lines[x] = a_line
elif a_line == CONF_LINE_ELEMS:
a_line = a_line.replace('tavg', varname)
conf_lines[x] = a_line
f_conf = open(fpath_conf, 'w')
f_conf.writelines(conf_lines)
f_conf.close()
def _write_input_station_data(path_pha_run, varname, stns, tair, yrs,
stnhist=None):
'''
Write station data to GHCN format for input to PHA
'''
_write_stn_list(stns, os.path.join(path_pha_run, 'data', 'benchmark',
'world1', 'meta',
'world1_stnlist.%s' % (varname,)))
os.remove(os.path.join(path_pha_run, 'data', 'benchmark', 'world1', 'meta',
'world1_stnlist.tavg'))
fpath_metafile = os.path.join(path_pha_run, 'data', 'benchmark', 'world1',
'meta', 'world1_metadata_file.txt')
with open(fpath_metafile, 'w') as f:
if stnhist is not None:
for stn_id, a_date in zip(stnhist.station_id,
pd.DatetimeIndex(stnhist.change_date).strftime('%Y%m')):
stn_id = _format_stnid(stn_id)
f.write(" %s %s 1\n"%(stn_id, a_date))
path_stn_obs = os.path.join(path_pha_run, 'data', 'benchmark', 'world1',
'monthly', 'raw')
rm_stn_files = glob.glob(os.path.join(path_stn_obs, "*.tavg"))
for a_fpath in rm_stn_files:
os.remove(a_fpath)
_write_stn_obs_files(stns, tair, yrs, varname, path_stn_obs)
def _write_stn_list(stns, fpath_out):
'''
Write GHCN format station list ASCII file
'''
fout = open(fpath_out, "w")
for stn in stns:
outid = _format_stnid(stn[STN_ID])
if stn[LAT] < 0 or stn[LON] >= 0:
raise Exception("Only handles formating of positive Lats and negative Lons.")
outLat = "{0:0<8.5F}".format(stn[LAT])
if np.abs(stn[LON]) < 100:
fmtLon = "{0:0<9.5F}"
else:
fmtLon = "{0:0<9.4F}"
outLon = fmtLon.format(stn[LON])
fout.write(" ".join([outid, outLat, outLon, "\n"]))
def _format_stnid(stnid):
'''
Format station id for PHA
'''
if stnid.startswith("GHCND_"):
outid = stnid.split("_")[1]
elif stnid.startswith("NRCS_"):
outid = stnid.split("_")[1]
# Check for triplet-style NRCS ID
if ":" in outid:
outid = outid.replace(":", "")[0:8]
outid = "".join(["SNT", "{0:0>8}".format(outid)])
elif stnid.startswith("RAWS_"):
outid = stnid.split("_")[1]
outid = "".join(["WRC", "{0:0>8}".format(outid)])
elif stnid.startswith("USH"):
outid = stnid
else:
raise Exception("Do not recognize stn id prefix for stnid: " + stnid)
if len(outid) != 11:
raise ValueError("Formatted station id for PHA was not 11 characters: %s"%outid)
return outid
def _write_stn_obs_files(stns, data, yrs, varname, path_out):
'''
Write individual GHCN format station observation ASCII files
'''
for stn, x in zip(stns, np.arange(stns.size)):
outId = _format_stnid(stn[STN_ID])
fout = open(os.path.join(path_out, "".join([outId, ".raw.", varname])), 'w')
tair = data[:, x]
for yr, i in zip(yrs, np.arange(0, yrs.size * 12, 12)):
outLine = " ".join([outId, str(yr)])
tairYr = tair[i:i + 12]
if np.ma.isMA(tairYr):
ttairYr = tairYr.data
validMask = np.logical_not(tairYr.mask)
ttairYr[validMask] = ttairYr[validMask] * 100.0
ttairYr[tairYr.mask] = -9999
tairYr = ttairYr
else:
tairYr = tairYr * 100.0
for aVal in tairYr:
outLine = "".join([outLine, " {0:>5.0f}".format(aVal), " "])
outLine = "".join([outLine, "\n"])
fout.write(outLine)
fout.close()
def _parse_pha_adj(path_adj_log):
'''
Parse a log file of PHA adjustments and return as a structured array
with dtype DTYPE_PHA_ADJ
'''
f = open(path_adj_log)
vals_adj = []
for aline in f.readlines():
stnid = aline[10:21]
yrmth_start = aline[25:31]
yrmth_end = aline[45:51]
val_adj = np.float(aline[75:81])
date_start = datetime(np.int(yrmth_start[0:4]),np.int(yrmth_start[-2:]),1)
ymd_start = np.int("%d%02d%02d"%(date_start.year,date_start.month,date_start.day))
date_end = datetime(np.int(yrmth_end[0:4]),np.int(yrmth_end[-2:]),1)
ymd_end = np.int("%d%02d%02d"%(date_end.year,date_end.month,date_end.day))
#if val_adj != 0.0:
vals_adj.append((stnid,ymd_start,ymd_end,val_adj))
vals_adj = np.array(vals_adj,dtype=DTYPE_PHA_ADJ)
return vals_adj
def load_snotel_sensor_hist(station_ids=None):
'''Load SNOTEL metadata history for YSI extended range sensor installs.
Metadata extracted from NRCS station sensor history pages:
e.g.: http://wcc.sc.egov.usda.gov/nwcc/sensorhistory?sitenum=542
Parameters
----------
station_ids : list-like, optional
Station IDs for which sensor history should be loaded.
Returns
-------
stnhist : pandas.DataFrame
DataFrame with station_id as index and change_date column specifying
the date that the YSI extended range sensor was installed
'''
path_root = os.path.dirname(__file__)
fpath_stnhist = os.path.join(path_root, 'data', 'snotel_sensor_installs.csv')
stnhist = pd.read_csv(fpath_stnhist, index_col='station_id')
stnhist.index = "NRCS_"+stnhist.index
stnhist['date_new_sensor'] = pd.to_datetime(stnhist['date_new_sensor'])
if station_ids is not None:
stnhist = stnhist.loc[station_ids].dropna()
stnhist = stnhist.reset_index().rename(columns={'index':'station_id',
'date_new_sensor':'change_date'})
return stnhist
def get_pha_adj_df(fpath_pha_adj_log, stns, elem):
'''Build DataFrame of PHA adjustments.
Parameters
----------
fpath_pha_adj_log : str
File path to PHA adjustment log file generated by a run_pha.
e.g.: [pha_run_path]/run/data/benchmark/world1/output/pha_adj_tmin.log
stns : structured ndarray
Stations for which PHA was run. Structured station array must contain at
least the following fields: STN_ID, STN_NAME, LAT, LON and can be obtained from
twx.db.StationDataDb
elem : str
Element for which PHA was run (e.g.: tmin, tmax)
Returns
-------
stnhist : pandas.DataFrame
DataFrame with following columns:
YEAR_MONTH_START
YEAR_MONTH_END
ADJ(C)
VARIABLE
STN_ID
NAME
LON
LAT
ELEV(m)
'''
stnids = np.array([_format_stnid(stnid) for stnid in stns[STN_ID]])
stn_meta = {}
for i in np.arange(stnids.size):
stn_meta[stnids[i]] = stns[i]
pha_adj = _parse_pha_adj(fpath_pha_adj_log)
pha_adj = pha_adj[np.in1d(pha_adj[STN_ID], stnids, assume_unique=False)]
pha_adj = pha_adj[np.abs(pha_adj['adj']) > 0]
pha_adj['adj'] = -pha_adj['adj']
a_month = relativedelta(months=1)
dates_start_chgpt = [datetime.strptime(str(a_ymd),"%Y%m%d") + a_month
for a_ymd in pha_adj['ymd_start']]
dates_end_chgpt = [datetime.strptime(str(a_ymd),"%Y%m%d") + a_month
for a_ymd in pha_adj['ymd_end']]
mthyr_start_chgpt = ['%d%02d'%(a_date.year,a_date.month)
for a_date in dates_start_chgpt]
mthyr_end_chgpt = ['%d%02d'%(a_date.year,a_date.month)
for a_date in dates_end_chgpt]
stnelev_chgpt = np.round([stn_meta[a_stnid][ELEV]
for a_stnid in pha_adj[STN_ID]]).astype(np.int)
stnlon_chgpt = [stn_meta[a_stnid][LON] for a_stnid in pha_adj[STN_ID]]
stnlat_chgpt = [stn_meta[a_stnid][LAT] for a_stnid in pha_adj[STN_ID]]
stnname_chgpt = [stn_meta[a_stnid][STN_NAME] for a_stnid in pha_adj[STN_ID]]
stnid_chgpt = [stn_meta[a_stnid][STN_ID] for a_stnid in pha_adj[STN_ID]]
varnames = [elem]*pha_adj.size
df = pd.DataFrame({'YEAR_MONTH_START':mthyr_start_chgpt,
'YEAR_MONTH_END':mthyr_end_chgpt,
'ADJ(C)':pha_adj['adj'], 'VARIABLE':varnames,
'STN_ID':stnid_chgpt, 'NAME':stnname_chgpt,
'LON':stnlon_chgpt, 'LAT':stnlat_chgpt,
'ELEV(m)':stnelev_chgpt})
df = df[['YEAR_MONTH_START', 'YEAR_MONTH_END', 'ADJ(C)', 'VARIABLE',
'STN_ID','NAME','LON','LAT','ELEV(m)']]
df = df.set_index('STN_ID')
return df
|
jaredwo/topowx
|
twx/homog/pha.py
|
Python
|
gpl-3.0
| 25,403
|
[
"NetCDF"
] |
1a6697fba6310385ce4e4021a22eba1b3843294b406fb1c124d16d85c68acb95
|
# -*- coding: utf-8 -*-
#
# pyrubberband documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 19 10:40:20 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'numpydoc',
]
from glob import glob
autosummary_generate = glob('*.rst')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyrubberband'
copyright = u'2015, Brian McFee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import sys
from mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'librosa']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import imp
PYRB = imp.load_source('pyrubberband.version', '../pyrubberband/version.py')
# The short X.Y version.
version = PYRB.version
# The full version, including alpha/beta/rc tags.
release = PYRB.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyrubberbanddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyrubberband.tex', u'pyrubberband Documentation',
u'Brian McFee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyrubberband', u'pyrubberband Documentation',
[u'Brian McFee'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyrubberband', u'pyrubberband Documentation',
u'Brian McFee', 'pyrubberband', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
faroit/pyrubberband
|
docs/conf.py
|
Python
|
isc
| 8,720
|
[
"Brian"
] |
96b02be1dc2d5a011a7416cdb35dc62f1d3f57b37bed1ed79ef1314099137407
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# this is temporary as long netifaces does not work on android
import socket
from kivy.logger import Logger
from ORCA.utils.LogError import LogError
__all__ = ['GetIPAddressV6']
def GetIPAddressV6() -> str:
# Under construction
uIP:str = u''
# Fast but not safe
try:
s:socket.socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Not necessary successfull
s.connect(('2001:0db8:85a3:0000:0000:8a2e:0370:7334', 1))
uIP = s.getsockname()[0]
except Exception as e:
LogError(uMsg="Failure on GetIPAddressV6", oException=e)
return uIP
s.close()
Logger.debug("Found IPv6 Address:"+uIP)
return uIP
|
thica/ORCA-Remote
|
src/ORCA/utils/Platform/android/android_GetIPAddressV6.py
|
Python
|
gpl-3.0
| 1,584
|
[
"ORCA"
] |
fadbf5ef63837cc579976e8f1073a864847b45ce54231637e4de020072a58229
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007-2013 Async Open Source <http://www.async.com.br>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
"""
Kiwi integration for Stoq/Storm
"""
import glib
import gobject
from kiwi.python import Settable
from kiwi.utils import gsignal
from storm import Undef
from storm.database import Connection, convert_param_marks
from storm.expr import compile, And, Or, Like, Not, Alias, State, Lower
from storm.tracer import trace
import psycopg2
import psycopg2.extensions
import re
from stoqlib.database.expr import Date, StoqNormalizeString
from stoqlib.database.viewable import Viewable
from stoqlib.database.interfaces import ISearchFilter
class QueryState(object):
def __init__(self, search_filter):
"""
Create a new QueryState object.
:param search_filter: search filter this query state is associated with
:type search_filter: :class:`SearchFilter`
"""
self.filter = search_filter
class NumberQueryState(QueryState):
"""
Create a new NumberQueryState object.
:cvar value: number
"""
(EQUALS,
DIFFERENT) = range(2)
def __init__(self, filter, value, mode=EQUALS):
QueryState.__init__(self, filter)
self.mode = mode
self.value = value
def __repr__(self):
return '<NumberQueryState value=%r>' % (self.value,)
class NumberIntervalQueryState(QueryState):
"""
Create a new NumberIntervalQueryState object.
:cvar start: number
:cvar end: number
"""
def __init__(self, filter, start, end):
QueryState.__init__(self, filter)
self.start = start
self.end = end
def __repr__(self):
return '<NumberIntervalQueryState start=%r end=%r>' % (self.start, self.end)
class StringQueryState(QueryState):
"""
Create a new StringQueryState object.
:cvar text: string
"""
(CONTAINS_EXACTLY,
IDENTICAL_TO,
NOT_CONTAINS,
CONTAINS_ALL) = range(4)
def __init__(self, filter, text, mode=CONTAINS_ALL):
QueryState.__init__(self, filter)
self.mode = mode
self.text = text
def __repr__(self):
return '<StringQueryState text=%r>' % (self.text,)
class DateQueryState(QueryState):
"""
Create a new DateQueryState object.
:cvar date: date
"""
def __init__(self, filter, date):
QueryState.__init__(self, filter)
self.date = date
def __repr__(self):
return '<DateQueryState date=%r>' % (self.date,)
class DateIntervalQueryState(QueryState):
"""
Create a new DateIntervalQueryState object.
:cvar start: start of interval
:cvar end: end of interval
"""
def __init__(self, filter, start, end):
QueryState.__init__(self, filter)
self.start = start
self.end = end
def __repr__(self):
return '<DateIntervalQueryState start=%r, end=%r>' % (
self.start, self.end)
class BoolQueryState(QueryState):
"""
Create a new BoolQueryState object.
:cvar value: value of the query state
"""
def __init__(self, filter, value):
QueryState.__init__(self, filter)
self.value = value
def __repr__(self):
return '<BoolQueryState value=%r>' % (self.value)
class AsyncQueryOperation(gobject.GObject):
(GET_ALL,
GET_ONE) = range(2)
gsignal('finish')
def __init__(self, operation_type, store, resultset, expr):
"""
:param operation_type: kind of operation this is
:param store: database store
:param resultset: resultset that will be used to construct
the result from.
:param expr: query expression to execute
"""
gobject.GObject.__init__(self)
self.operation_type = operation_type
self.resultset = resultset
self.expr = expr
self._conn = store._connection
self._async_cursor = None
self._async_conn = None
self._statement = None
self._parameters = None
def execute(self, async_conn):
"""Executes a query within an asyncronous psycopg2 connection
"""
# Async variant of Connection.execute() in storm/database.py
state = State()
statement = compile(self.expr, state)
stmt = convert_param_marks(statement, "?", "%s")
self._async_cursor = async_conn.cursor()
self._async_conn = async_conn
# This is postgres specific, see storm/databases/postgres.py
self._statement = stmt.encode('utf-8')
self._parameters = tuple(Connection.to_database(state.parameters))
trace("connection_raw_execute", self._conn,
self._async_cursor, self._statement, self._parameters)
self._async_cursor.execute(self._statement,
self._parameters)
def finish(self):
"""This can only be called when the ``finish``` signal has
been emitted.
:returns: the result, which might be an object or a list depending
on self.operation_type
"""
trace("connection_raw_execute_success", self._conn,
self._async_cursor, self._statement, self._parameters)
result = self._conn.result_factory(self._conn,
self._async_cursor)
if self.operation_type == AsyncQueryOperation.GET_ALL:
# ResultSet.__iter__()
retval = []
for values in result:
obj = self.resultset._load_objects(result, values)
retval.append(obj)
elif self.operation_type == AsyncQueryOperation.GET_ONE:
# ResultSet.one()
values = result.get_one()
retval = self.resultset._load_objects(result, values)
else:
raise NotImplementedError(self.operation_type)
return retval
gobject.type_register(AsyncQueryOperation)
class QueryExecuter(object):
"""
A QueryExecuter is responsible for taking the state (as in QueryState)
objects from search filters and construct a query.
The query is constructed using storm.
:cvar default_search_limit: The default search limit.
"""
def __init__(self, store=None):
self._columns = {}
self._limit = -1
self.store = store
self.search_spec = None
self._query_callbacks = []
self._filter_query_callbacks = {}
self._query = self._default_query
self.post_result = None
self._async_conn = None
self._operations = []
# Public API
def search(self, states=None, resultset=None, limit=None):
"""
Execute a search.
:param resultset: resultset to use, if ``None`` we will
just execute a normal store.find() on the search_spec set in
.set_search_spec()
:param states:
:param limit: use this limit instead of the one defined by set_limit()
"""
if resultset is None:
resultset = self._query(self.store)
resultset = self._parse_states(resultset, states)
limit = limit or self._limit
if limit > 0:
resultset.config(limit=limit)
return resultset
def search_async(self, states=None, resultset=None):
"""
Execute a search asynchronously.
This uses a separate psycopg2 connection which is lazily
created just before executing the first async query.
This method returns an operation for which a signal **finish** is
emitted when the query has finished executing. In that callback,
:meth:`.AsyncQueryOperation.finish` should be called, eg:
>>> from stoqlib.api import api
>>> from stoqlib.domain.person import Person
>>> default_store = api.get_default_store()
>>> resultset = default_store.find(Person)
>>> qe = QueryExecuter(store=default_store)
>>> operation = qe.search_async(resultset=resultset)
>>> def finished(operation, loop):
... operation.finish()
... # use result
... loop.quit()
Create a loop for testing
>>> loop = glib.MainLoop()
>>> sig_id = operation.connect('finish', finished, loop)
>>> loop.run()
:param states:
:param resultset: a resultset or ``None``
:returns: a query operation
"""
if resultset is None:
resultset = self._query(self.store)
resultset = self._parse_states(resultset, states)
operation = AsyncQueryOperation(AsyncQueryOperation.GET_ALL,
self.store,
resultset,
resultset._get_select())
self._schedule_operation(operation)
return operation
def set_limit(self, limit):
"""
Set the maximum number of result items to return in a search query.
:param limit:
"""
self._limit = limit
def get_limit(self):
return self._limit
def set_filter_columns(self, search_filter, columns, use_having=False):
"""Set what columns should be filtered for the search_filter
:param columns: Should be a list of column names or properties to be
used in the query. If they are column names (strings), we will call
getattr on the search_spec to get the property for the query construction.
"""
if not ISearchFilter.providedBy(search_filter):
pass
#raise TypeError("search_filter must implement ISearchFilter")
assert not search_filter in self._columns
self._columns[search_filter] = (columns, use_having)
def set_search_spec(self, search_spec):
"""
Sets the Storm search_spec for this executer
:param search_spec: a Storm search_spec
"""
self.search_spec = search_spec
def add_query_callback(self, callback):
"""
Adds a generic query callback
:param callback: a callable
"""
if not callable(callback):
raise TypeError
self._query_callbacks.append(callback)
def add_filter_query_callback(self, search_filter, callback,
use_having=False):
"""
Adds a query callback for the filter search_filter
:param search_filter: a search filter
:param callback: a callable
"""
if not ISearchFilter.providedBy(search_filter):
raise TypeError
if not callable(callback):
raise TypeError
l = self._filter_query_callbacks.setdefault(search_filter, [])
l.append((callback, use_having))
def set_query(self, callback):
"""
Overrides the default query mechanism.
:param callback: a callable which till take two arguments (query, store)
"""
if callback is None:
callback = self._default_query
elif not callable(callback):
raise TypeError
self._query = callback
def get_post_result(self, result):
descs, query = self.search_spec.post_search_callback(result)
# This should not be present in the query, since post_search_callback
# should only use aggregate functions.
query.order_by = Undef
query.group_by = Undef
store = self.store
values = store.execute(query).get_one()
assert len(descs) == len(values), (descs, values)
data = {}
for desc, value in zip(descs, list(values)):
data[desc] = value
return Settable(**data)
def get_ordered_result(self, result, attribute):
if issubclass(self.search_spec, Viewable):
# sorting viewables is not supported with strings, since that
# viewables can query more than one search_spec at once, and each
# search_spec may have columns with the same name.
if isinstance(attribute, str):
attribute = getattr(self.search_spec, attribute)
return result.order_by(attribute)
# Private API
def _schedule_operation(self, operation):
if self._async_conn is None:
store_conn = self.store._connection
self._async_conn = psycopg2.connect(
store_conn._raw_connection.dsn, async=1)
self._operations.append(operation)
def wait():
if self._async_conn.poll() == psycopg2.extensions.POLL_OK:
self._dispatch_operations()
return False
return True
glib.timeout_add(0, wait)
def _dispatch_operations(self):
def wait(operation):
if self._async_conn.poll() == psycopg2.extensions.POLL_OK:
operation.emit('finish')
return False
return True
while self._operations:
operation = self._operations.pop()
operation.execute(self._async_conn)
glib.timeout_add(0, wait, operation)
def _default_query(self, store):
return store.find(self.search_spec)
def parse_states(self, states):
"""Parses the state given and return a tuple where the first element is
the queries that should be used, and the second is a 'having' that
should be used with the query.
"""
if states is None:
return None, None
search_spec = self.search_spec
if search_spec is None:
raise ValueError("search_spec cannot be None")
queries = []
having = []
for state in states:
search_filter = state.filter
assert state.filter
# Column query
if search_filter in self._columns:
columns, use_having = self._columns[search_filter]
query = self._construct_state_query(search_spec, state, columns)
if query and use_having:
having.append(query)
elif query:
queries.append(query)
# Custom per filter/state query.
elif search_filter in self._filter_query_callbacks:
for callback, use_having in self._filter_query_callbacks[search_filter]:
query = callback(state)
if query and use_having:
having.append(query)
elif query:
queries.append(query)
else:
if (self._query == self._default_query and
not self._query_callbacks):
raise ValueError(
"You need to add a search column or a query callback "
"for filter %s" % (search_filter))
for callback in self._query_callbacks:
query = callback(states)
if query:
queries.append(query)
return queries, having
def _parse_states(self, resultset, states):
queries, having = self.parse_states(states)
if queries:
resultset = resultset.find(And(*queries))
if having:
resultset = resultset.having(And(*having))
return resultset
def _construct_state_query(self, search_spec, state, columns):
queries = []
for column in columns:
query = None
if isinstance(column, str):
table_field = getattr(search_spec, column)
else:
table_field = column
if isinstance(table_field, Alias):
table_field = table_field.expr
if isinstance(state, NumberQueryState):
query = self._parse_number_state(state, table_field)
elif isinstance(state, NumberIntervalQueryState):
query = self._parse_number_interval_state(state, table_field)
elif isinstance(state, StringQueryState):
query = self._parse_string_state(state, table_field)
elif isinstance(state, DateQueryState):
query = self._parse_date_state(state, table_field)
elif isinstance(state, DateIntervalQueryState):
query = self._parse_date_interval_state(state, table_field)
elif isinstance(state, BoolQueryState):
query = self._parse_bool_state(state, table_field)
else:
raise NotImplementedError(state.__class__.__name__)
if query:
queries.append(query)
if queries:
return Or(*queries)
def _parse_number_state(self, state, table_field):
if state.value is None:
return
if state.mode == NumberQueryState.EQUALS:
return table_field == state.value
elif state.mode == NumberQueryState.DIFFERENT:
return table_field != state.value
else:
raise AssertionError
def _parse_number_interval_state(self, state, table_field):
queries = []
if state.start:
queries.append(table_field >= state.start)
if state.end:
queries.append(table_field <= state.end)
if queries:
return And(*queries)
def _parse_string_state(self, state, table_field):
if not state.text.strip():
return
def _like(value):
return Like(StoqNormalizeString(table_field),
StoqNormalizeString(u'%%%s%%' % value.lower()),
case_sensitive=False)
if state.mode == StringQueryState.CONTAINS_ALL:
queries = [_like(word) for word in re.split('[ \n\r]', state.text) if word]
retval = And(*queries)
elif state.mode == StringQueryState.IDENTICAL_TO:
retval = Lower(table_field) == state.text.lower()
elif state.mode == StringQueryState.CONTAINS_EXACTLY:
retval = (_like(state.text.lower()))
elif state.mode == StringQueryState.NOT_CONTAINS:
queries = [Not(_like(word)) for word in state.text.split(' ') if word]
retval = And(*queries)
else: # pragma nocoverage
raise AssertionError
return retval
def _parse_date_state(self, state, table_field):
if state.date:
return Date(table_field) == Date(state.date)
def _parse_date_interval_state(self, state, table_field):
queries = []
if state.start:
queries.append(Date(table_field) >= Date(state.start))
if state.end:
queries.append(Date(table_field) <= Date(state.end))
if queries:
return And(*queries)
def _parse_bool_state(self, state, table_field):
return table_field == state.value
|
tiagocardosos/stoq
|
stoqlib/database/queryexecuter.py
|
Python
|
gpl-2.0
| 19,534
|
[
"VisIt"
] |
cc0858087bdf29203d4717663941feaaf843347df9b08d656dfb61a7924a4b4b
|
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DP Logistic Regression on MNIST.
DP Logistic Regression on MNIST with support for privacy-by-iteration analysis.
Vitaly Feldman, Ilya Mironov, Kunal Talwar, and Abhradeep Thakurta.
"Privacy amplification by iteration."
In 2018 IEEE 59th Annual Symposium on Foundations of Computer Science (FOCS),
pp. 521-532. IEEE, 2018.
https://arxiv.org/abs/1808.06651.
"""
import math
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
from tensorflow_privacy.privacy.optimizers import dp_optimizer
GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
'dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 0.05,
'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_integer('batch_size', 5, 'Batch size')
flags.DEFINE_integer('epochs', 5, 'Number of epochs')
flags.DEFINE_float('regularizer', 0, 'L2 regularizer coefficient')
flags.DEFINE_string('model_dir', None, 'Model directory')
flags.DEFINE_float('data_l2_norm', 8, 'Bound on the L2 norm of normalized data')
def lr_model_fn(features, labels, mode, nclasses, dim):
"""Model function for logistic regression."""
input_layer = tf.reshape(features['x'], tuple([-1]) + dim)
logits = tf.keras.layers.Dense(
units=nclasses,
kernel_regularizer=tf.keras.regularizers.L2(l2=FLAGS.regularizer),
bias_regularizer=tf.keras.regularizers.L2(
l2=FLAGS.regularizer)).apply(input_layer)
# Calculate loss as a vector (to support microbatches in DP-SGD).
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits) + tf.losses.get_regularization_loss()
# Define mean of loss across minibatch (for reporting through tf.Estimator).
scalar_loss = tf.reduce_mean(vector_loss)
# Configure the training op (for TRAIN mode).
if mode == tf.estimator.ModeKeys.TRAIN:
if FLAGS.dpsgd:
# The loss function is L-Lipschitz with L = sqrt(2*(||x||^2 + 1)) where
# ||x|| is the norm of the data.
# We don't use microbatches (thus speeding up computation), since no
# clipping is necessary due to data normalization.
optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
l2_norm_clip=math.sqrt(2 * (FLAGS.data_l2_norm**2 + 1)),
noise_multiplier=FLAGS.noise_multiplier,
num_microbatches=1,
learning_rate=FLAGS.learning_rate)
opt_loss = vector_loss
else:
optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
opt_loss = scalar_loss
global_step = tf.compat.v1.train.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
# In the following, we pass the mean of the loss (scalar_loss) rather than
# the vector_loss because tf.estimator requires a scalar loss. This is only
# used for evaluation and debugging by tf.estimator. The actual loss being
# minimized is opt_loss defined above and passed to optimizer.minimize().
return tf.estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode).
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(input=logits, axis=1))
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
def normalize_data(data, data_l2_norm):
"""Normalizes data such that each samples has bounded L2 norm.
Args:
data: the dataset. Each row represents one samples.
data_l2_norm: the target upper bound on the L2 norm.
"""
for i in range(data.shape[0]):
norm = np.linalg.norm(data[i])
if norm > data_l2_norm:
data[i] = data[i] / norm * data_l2_norm
def load_mnist(data_l2_norm=float('inf')):
"""Loads MNIST and preprocesses to combine training and validation data."""
train, test = tf.keras.datasets.mnist.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.array(train_data, dtype=np.float32) / 255
test_data = np.array(test_data, dtype=np.float32) / 255
train_data = train_data.reshape(train_data.shape[0], -1)
test_data = test_data.reshape(test_data.shape[0], -1)
idx = np.random.permutation(len(train_data)) # shuffle data once
train_data = train_data[idx]
train_labels = train_labels[idx]
normalize_data(train_data, data_l2_norm)
normalize_data(test_data, data_l2_norm)
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
return train_data, train_labels, test_data, test_labels
def print_privacy_guarantees(epochs, batch_size, samples, noise_multiplier):
"""Tabulating position-dependent privacy guarantees."""
if noise_multiplier == 0:
print('No differential privacy (additive noise is 0).')
return
print('In the conditions of Theorem 34 (https://arxiv.org/abs/1808.06651) '
'the training procedure results in the following privacy guarantees.')
print('Out of the total of {} samples:'.format(samples))
steps_per_epoch = samples // batch_size
orders = np.concatenate(
[np.linspace(2, 20, num=181),
np.linspace(20, 100, num=81)])
delta = 1e-5
for p in (.5, .9, .99):
steps = math.ceil(steps_per_epoch * p) # Steps in the last epoch.
coef = 2 * (noise_multiplier * batch_size)**-2 * (
# Accounting for privacy loss
(epochs - 1) / steps_per_epoch + # ... from all-but-last epochs
1 / (steps_per_epoch - steps + 1)) # ... due to the last epoch
# Using RDP accountant to compute eps. Doing computation analytically is
# an option.
rdp = [order * coef for order in orders]
eps, _, _ = get_privacy_spent(orders, rdp, target_delta=delta)
print('\t{:g}% enjoy at least ({:.2f}, {})-DP'.format(p * 100, eps, delta))
# Compute privacy guarantees for the Sampled Gaussian Mechanism.
rdp_sgm = compute_rdp(batch_size / samples, noise_multiplier,
epochs * steps_per_epoch, orders)
eps_sgm, _, _ = get_privacy_spent(orders, rdp_sgm, target_delta=delta)
print('By comparison, DP-SGD analysis for training done with the same '
'parameters and random shuffling in each epoch guarantees '
'({:.2f}, {})-DP for all samples.'.format(eps_sgm, delta))
def main(unused_argv):
logger = tf.get_logger()
logger.set_level(logging.INFO)
if FLAGS.data_l2_norm <= 0:
raise ValueError('data_l2_norm must be positive.')
if FLAGS.dpsgd and FLAGS.learning_rate > 8 / FLAGS.data_l2_norm**2:
raise ValueError('The amplification-by-iteration analysis requires'
'learning_rate <= 2 / beta, where beta is the smoothness'
'of the loss function and is upper bounded by ||x||^2 / 4'
'with ||x|| being the largest L2 norm of the samples.')
# Load training and test data.
# Smoothness = ||x||^2 / 4 where ||x|| is the largest L2 norm of the samples.
# To get bounded smoothness, we normalize the data such that each sample has a
# bounded L2 norm.
train_data, train_labels, test_data, test_labels = load_mnist(
data_l2_norm=FLAGS.data_l2_norm)
# Instantiate tf.Estimator.
# pylint: disable=g-long-lambda
model_fn = lambda features, labels, mode: lr_model_fn(
features, labels, mode, nclasses=10, dim=train_data.shape[1:])
mnist_classifier = tf.estimator.Estimator(
model_fn=model_fn, model_dir=FLAGS.model_dir)
# Create tf.Estimator input functions for the training and test data.
# To analyze the per-user privacy loss, we keep the same orders of samples in
# each epoch by setting shuffle=False.
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=FLAGS.batch_size,
num_epochs=FLAGS.epochs,
shuffle=False)
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={'x': test_data}, y=test_labels, num_epochs=1, shuffle=False)
# Train the model.
num_samples = train_data.shape[0]
steps_per_epoch = num_samples // FLAGS.batch_size
mnist_classifier.train(
input_fn=train_input_fn, steps=steps_per_epoch * FLAGS.epochs)
# Evaluate the model and print results.
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print('Test accuracy after {} epochs is: {:.2f}'.format(
FLAGS.epochs, eval_results['accuracy']))
if FLAGS.dpsgd:
print_privacy_guarantees(
epochs=FLAGS.epochs,
batch_size=FLAGS.batch_size,
samples=num_samples,
noise_multiplier=FLAGS.noise_multiplier,
)
if __name__ == '__main__':
app.run(main)
|
tensorflow/privacy
|
tutorials/mnist_lr_tutorial.py
|
Python
|
apache-2.0
| 9,703
|
[
"Gaussian"
] |
934793c2acc93c2ca4a7dc397e987f2b30483ac218cb63a7db25d9a2254217bd
|
# TODO(SK): module Missing docstring
import logging
import bpy
import numpy
import mathutils
from . import pam
from . import model
from . import colormaps
from . import constants
from . import mesh
from . import connection_mapping
logger = logging.getLogger(__package__)
vis_objects = 0
def setCursor(loc):
"""Just a more convenient way to set the location of the cursor"""
bpy.data.screens['Default'].scene.cursor_location = loc
def getCursor():
"""Just return the cursor location. A bit shorter to type ;)"""
return bpy.data.screens['Default'].scene.cursor_location
def visualizePostNeurons(no_connection, pre_neuron):
"""Visualize the post-synaptic neurons that are connected with a given
neuron from the presynaptic layer
:param int no_connection: connection index
:param int pre_neuron: index of pre-synaptic neuron
"""
global vis_objects
layer = pam.pam_connections[no_connection][0][-1] # get last layer of connection
neuronset = pam.pam_connections[no_connection][2] # neuronset 2
connectivity = pam.pam_connection_results[no_connection]['c'][pre_neuron]
for i in connectivity:
if (i >= 0):
bpy.ops.mesh.primitive_uv_sphere_add(size=1, view_align=False, enter_editmode=False, location=layer.particle_systems[neuronset].particles[i].location, layers=(True, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False))
bpy.ops.transform.resize(value=(0.05, 0.05, 0.05))
bpy.context.selected_objects[0].name = "visualization.%03d" % vis_objects
vis_objects = vis_objects + 1
def generateLayerNeurons(layer, particle_system, obj, object_color=[],
indices=-1):
"""Generate for each particle (neuron) a cone with appropriate naming"""
# generate first mesh
i = 0
p = layer.particle_systems[particle_system].particles[0]
if indices == -1:
particles = layer.particle_systems[particle_system].particles
else:
particles = layer.particle_systems[particle_system].particles[indices[0]:indices[1]]
# generates linked duplicates of this mesh
for i, p in enumerate(particles):
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.objects.active = obj
bpy.context.object.select = True
bpy.ops.object.duplicate(linked=True, mode='INIT')
dupli = bpy.context.active_object
dupli.name = 'n' + '_' + layer.name + '_' + '%05d' % (i + 1)
dupli.location = p.location
if object_color:
dupli.color = object_color[i]
def getColors(colormap, v, interval=[], alpha=True, zeroBlack=False, offset=0.0):
"""Based on a colormaps, values in the vector are converted to colors
from the colormap
:param list colormap: colormap to be used
:param list v: list of values
:param list interval: min and maximal range to be used, if empty these
values are computed based on v
:param list alpha: default true, usually not to be changed
:param list zeroBlack: if true, zero values are colored in black with 0 alpha independent on the chosen colormap
:param list offset: shifts the entire colormap. range between -1 and 1.
"""
if not interval:
interval = [min(v), max(v)]
l = len(colormap) - 1
span = float(interval[1] - interval[0])
colors = []
for i in v:
if i == 0:
if alpha:
colors.append([0.,0.,0.,0.])
else:
colors.append([0.,0.,0.])
continue
ind = int(numpy.floor((((i - interval[0]) / span) + offset) * l))
ind = max(min(l, ind), 0)
if alpha:
colors.append(colormap[ind])
else:
colors.append(colormap[ind][:3])
return colors
def visualizeNeuronProjLength(no_connection, obj):
"""Visualizes the connection-length of the pre-synaptic neurons for a given
mapping-index
:param int no_connection: connection index (mapping index)
"""
global vis_objects
layers = model.MODEL.connections[no_connection].pre_layer.obj # get first layer
neuronset1 = model.MODEL.connections[no_connection].pre_layer.neuronset_name
ds = numpy.mean(model.CONNECTION_RESULTS[no_connection]['d'], 1)
colors = getColors(colormaps.standard, ds)
generateLayerNeurons(layers, neuronset1, obj, colors)
def visualizePoint(point, obj=None):
"""Visualize a point in 3d by creating a small sphere
providing an onject as the obj argument duplicates the object instead of creating a sphere"""
global vis_objects
if not obj:
bpy.ops.mesh.primitive_uv_sphere_add(size=1, view_align=False, enter_editmode=False, location=point, layers=(True, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False))
bpy.ops.transform.resize(value=(0.05, 0.05, 0.05))
else:
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
bpy.ops.object.duplicate(linked=True, mode='INIT')
bpy.context.selected_objects[0].location = point
bpy.context.selected_objects[0].name = "visualization.%03d" % vis_objects
vis_objects = vis_objects + 1
def visualizePath(pointlist, smoothing=0, material=None, bevel_resolution = 0):
"""Create path for a given point list
:param list pointlist: 3d-vectors that are converted to a path
:param list smoothing: smoothing stepts that should be applied afterwards
This code is taken and modified from the bTrace-Addon for Blender
http://blenderartists.org/forum/showthread.php?214872
"""
global vis_objects
# trace the origins
tracer = bpy.data.curves.new('tracer', 'CURVE')
tracer.dimensions = '3D'
spline = tracer.splines.new('BEZIER')
spline.bezier_points.add(len(pointlist) - 1)
curve = bpy.data.objects.new('curve', tracer)
bpy.context.scene.objects.link(curve)
# render ready curve
tracer.resolution_u = 1
tracer.bevel_resolution = bevel_resolution # Set bevel resolution from Panel options
tracer.fill_mode = 'FULL'
tracer.bevel_depth = bpy.context.scene.pam_visualize.bevel_depth # Set bevel depth from Panel options
# move nodes to objects
for i in range(0, len(pointlist)):
p = spline.bezier_points[i]
p.co = pointlist[i]
p.handle_right_type = 'VECTOR'
p.handle_left_type = 'VECTOR'
# bpy.context.scene.objects.active = curve
# bpy.ops.object.mode_set()
curve.name = "visualization.%03d" % vis_objects
vis_objects = vis_objects + 1
# apply material if given
if material is not None:
curve.active_material = material
# apply smoothing if requested
if smoothing > 0:
bpy.context.scene.objects.active = curve
bpy.ops.object.mode_set()
bpy.ops.object.editmode_toggle()
bpy.ops.curve.select_all(action='SELECT')
for i in range(0, smoothing):
bpy.ops.curve.smooth()
bpy.ops.object.editmode_toggle()
return curve
def calculatePathLength(curveObject):
"""Calculates the length of the path of a curve.
Does not take bezier interpolation into account, only distance between points
:param bpy.types.Object curveObject: The curve
:return float: The length of the curve"""
if type(curveObject) == bpy.types.Object:
data = curveObject.data
elif type(curveObject) == bpy.types.Curve:
data = curveObject
else:
raise ValueError("curveObject needs to be an Object or a Curve")
length = 0.0
for spline in data.splines:
for i in range(len(spline.bezier_points) - 1):
dist = spline.bezier_points[i+1].co - spline.bezier_points[i].co
length += dist.length
return length
def visualizeForwardMapping(no_connection, pre_index):
"""This is a debugging routine. The procedure tries to visualize the maximal
amount of mappings to determine, where the mapping fails
no_connection : connection/mapping-index
pre_index : index of pre-synaptic neuron
"""
con = model.MODEL.connections[no_connection]
layers = con.layers
slayer = con.synaptic_layer_index
connections = con.mapping_connections
distances = con.mapping_distances
material = bpy.data.materials.get(bpy.context.scene.pam_visualize.connection_material, None)
for s in range(2, (slayer + 2)):
pre_p3d, pre_p2d, pre_d = pam.computeMapping(
layers[0:s],
connections[0:(s - 1)],
distances[0:(s - 2)] + [constants.DIS_euclidUV],
con.pre_layer.getNeuronPosition(pre_index),
debug=True
)
logger.debug(s)
logger.debug(pre_p3d)
logger.debug(pre_p2d)
logger.debug(pre_d)
if pre_p3d:
visualizePath(pre_p3d, material = material)
def visualizeBackwardMapping(no_connection, post_index):
""" This is a debugging routine. The procedure tries to visualize the maximal
amount of mappings to determine, where the mapping fails
no_connection : connection/mapping-index
post_index : index of post-synaptic neuron
"""
con = model.MODEL.connections[no_connection]
layers = con.layers
slayer = con.synaptic_layer_index
connections = con.mapping_connections
distances = con.mapping_distances
material = bpy.data.materials.get(bpy.context.scene.pam_visualize.connection_material, None)
for s in range(len(layers)-3, slayer-2, -1):
post_p3d, post_p2d, post_d = pam.computeMapping(layers[-1:s:-1],
connections[-1:s:-1],
distances[-1:s:-1],
con.post_layer.getNeuronPosition(post_index))
logger.debug(s)
logger.debug(post_p3d)
if post_p3d:
visualizePath(post_p3d, material = material)
def visualizeConnectionsForNeuron(no_connection, pre_index, smoothing=0, print_statistics = False):
""" Visualizes all connections between a given pre-synaptic neuron and its connections
to all post-synaptic neurons
layers : list of layers connecting a pre- with a post-synaptic layer
neuronset1,
neuronset2 : name of the neuronset (particle system) of the pre- and post-synaptic layer
slayer : index in layers for the synaptic layer
connections : list of values determining the type of layer-mapping
distances : list of values determining the calculation of the distances between layers
pre_index : index of pre-synaptic neuron
post_indices : index-list of post-synaptic neurons
synapses : optional list of coordinates for synapses
"""
con = model.MODEL.connections[no_connection]
layers = con.layers
slayer = con.synaptic_layer_index
connections = con.mapping_connections
distances = con.mapping_distances
post_indices = model.CONNECTION_RESULTS[no_connection]['c'][pre_index]
synapses = model.CONNECTION_RESULTS[no_connection]['s'][pre_index]
if print_statistics:
print("Visualizing connections for neuron", pre_index, "from", " -> ".join([l.name for l in layers]))
# path of the presynaptic neuron to the synaptic layer
pre_p3d, pre_p2d, pre_d = pam.computeMapping(layers[0:(slayer + 1)],
connections[0:slayer],
distances[0:slayer],
con.pre_layer.getNeuronPosition(pre_index))
first_item = True
first_item_distance = 0.0
path_lengthes = []
material = bpy.data.materials.get(bpy.context.scene.pam_visualize.connection_material, None)
layers_post = layers[:(slayer - 1):-1]
connections_post = connections[:(slayer - 1):-1]
distances_post = distances[:(slayer - 1):-1]
mapping_post = connection_mapping.Mapping(layers_post, connections_post, distances_post)
for i in range(0, len(post_indices)):
if post_indices[i] == -1:
continue
post_p3d, post_p2d, post_d = mapping_post.computeMapping(con.post_layer.getNeuronPosition(int(post_indices[i])))
if synapses is None:
curve = visualizePath(pre_p3d + post_p3d[::-1], material = material)
distance = calculatePathLength(curve)
path_lengthes.append(distance)
else:
if (len(synapses[i]) > 0):
distances_pre, pre_path = pam.computeDistanceToSynapse(
layers[slayer - 1], layers[slayer], pre_p3d[-1], mathutils.Vector(synapses[i]), distances[slayer - 1])
if distances_pre >= 0:
distances_post, post_path = pam.computeDistanceToSynapse(
layers[slayer + 1], layers[slayer], post_p3d[-1], mathutils.Vector(synapses[i]), distances[slayer])
if (distances_post >= 0):
if first_item:
curve = visualizePath(pre_p3d, smoothing, material = material)
first_item_distance = calculatePathLength(curve)
curve = visualizePath([pre_p3d[-1]] + pre_path + post_path[::-1] + post_p3d[::-1], smoothing, material = material)
first_item_distance += calculatePathLength(curve)
first_item = False
else:
curve = visualizePath([pre_p3d[-1]] + pre_path + post_path[::-1] + post_p3d[::-1], smoothing, material = material)
distance = calculatePathLength(curve) + first_item_distance
path_lengthes.append(distance)
if print_statistics:
path_lengthes = numpy.array(path_lengthes)
delay = layers[0].obj.particle_systems[layers[0].neuronset_name].settings.get('delay', 1.0)
print("Using a delay modifier of ", delay)
path_lengthes *= delay
average_path_length = numpy.mean(path_lengthes)
standard_deviation = numpy.std(path_lengthes)
print("Average connection length:", average_path_length)
print("Standard deviation: ", standard_deviation)
print("Maximum connection length:", numpy.amax(path_lengthes))
print("Minimum connection length:", numpy.amin(path_lengthes))
if not first_item:
return [pre_p3d[-1]] + pre_path + post_path[::-1] + post_p3d[::-1]
else:
return []
def visualizeOneConnection(no_connection, pre_index, post_index, smoothing=0):
""" Visualizes all connections between a given pre-synaptic and a given post-synaptic
no_connection : connection/mapping-id
pre_index : index of pre-synaptic neuron
post_index : index of post-synaptic neuron
post_list_index : index to be used in c[pre_index][post_list_index] to address post_index
synapses : optional list of coordinates for synapses
"""
where_list = numpy.where(model.CONNECTION_RESULTS[no_connection]['c'][pre_index] == post_index)[0]
if len(where_list)==0:
return None
post_list_index = where_list[0]
con = model.MODEL.connections[no_connection]
layers = con.layers
slayer = con.synaptic_layer_index
connections = con.mapping_connections
distances = con.mapping_distances
synapses = model.CONNECTION_RESULTS[no_connection]['s'][pre_index]
material = bpy.data.materials.get(bpy.context.scene.pam_visualize.connection_material, None)
# path of the presynaptic neuron to the synaptic layer
pre_p3d, pre_p2d, pre_d = pam.computeMapping(layers[0:(slayer + 1)],
connections[0:slayer],
distances[0:slayer],
con.pre_layer.getNeuronPosition(pre_index))
post_p3d, post_p2d, post_d = pam.computeMapping(layers[:(slayer - 1):-1],
connections[:(slayer - 1):-1],
distances[:(slayer - 1):-1],
con.post_layer.getNeuronPosition(post_index))
if synapses is None:
return visualizePath(pre_p3d + post_p3d[::-1], smoothing, material = material)
else:
distances_pre, pre_path = pam.computeDistanceToSynapse(
layers[slayer - 1], layers[slayer], pre_p3d[-1], mathutils.Vector(synapses[post_list_index]), distances[slayer - 1])
if distances_pre >= 0:
distances_post, post_path = pam.computeDistanceToSynapse(
layers[slayer + 1], layers[slayer], post_p3d[-1], mathutils.Vector(synapses[post_list_index]), distances[slayer])
if distances_post >= 0:
return visualizePath(pre_p3d + pre_path + post_path[::-1] + post_p3d[::-1], smoothing, material = material)
def visualizeOneConnectionPre(no_connection, pre_index, smoothing=0):
""" Visualizes the connection up to the forking just before the synapse
:param no_connection: connection/mapping-id
:type no_connection: int
:param pre_index: index of pre-synaptic neuron
:type pre_index: int
:return: The created curve object
:rtype: bpy.types.Object
"""
con = model.MODEL.connections[no_connection]
layers = con.layers
slayer = con.synaptic_layer_index
connections = con.mapping_connections
distances = con.mapping_distances
synapses = model.CONNECTION_RESULTS[no_connection]['s'][pre_index]
material = bpy.data.materials.get(bpy.context.scene.pam_visualize.connection_material, None)
# path of the presynaptic neuron to the synaptic layer
pre_p3d, pre_p2d, pre_d = pam.computeMapping(layers[0:(slayer + 1)],
connections[0:slayer],
distances[0:slayer],
con.pre_layer.getNeuronPosition(pre_index))
return visualizePath(pre_p3d, smoothing, material = material)
def visualizeOneConnectionPost(no_connection, pre_index, post_index, smoothing=0):
""" Visualizes only the part of a connection, where the connection starts to fork to the given post-neuron
:param no_connection: connection/mapping-id
:type no_connection: int
:param pre_index: index of pre-synaptic neuron
:type pre_index: int
:param post_index: index of post-synaptic neuron
:type post_index: int
:return: The created curve object
:rtype: bpy.types.Object
"""
where_list = numpy.where(model.CONNECTION_RESULTS[no_connection]['c'][pre_index] == post_index)[0]
if len(where_list)==0:
return None
post_list_index = where_list[0]
con = model.MODEL.connections[no_connection]
layers = con.layers
slayer = con.synaptic_layer_index
connections = con.mapping_connections
distances = con.mapping_distances
synapses = model.CONNECTION_RESULTS[no_connection]['s'][pre_index]
material = bpy.data.materials.get(bpy.context.scene.pam_visualize.connection_material, None)
# path of the presynaptic neuron to the synaptic layer
pre_p3d, pre_p2d, pre_d = pam.computeMapping(layers[0:(slayer + 1)],
connections[0:slayer],
distances[0:slayer],
con.pre_layer.getNeuronPosition(pre_index))
post_p3d, post_p2d, post_d = pam.computeMapping(layers[:(slayer - 1):-1],
connections[:(slayer - 1):-1],
distances[:(slayer - 1):-1],
con.post_layer.getNeuronPosition(post_index))
if synapses is None:
return visualizePath(post_p3d[::-1], smoothing, material = material)
else:
distances_pre, pre_path = pam.computeDistanceToSynapse(
layers[slayer - 1], layers[slayer], pre_p3d[-1], mathutils.Vector(synapses[post_list_index]), distances[slayer - 1])
if distances_pre >= 0:
distances_post, post_path = pam.computeDistanceToSynapse(
layers[slayer + 1], layers[slayer], post_p3d[-1], mathutils.Vector(synapses[post_list_index]), distances[slayer])
if distances_post >= 0:
return visualizePath(pre_path + post_path[::-1] + post_p3d[::-1], smoothing, material = material)
def visualizeNeuronSpread(connections, neuron):
"""Visualize for a collection of connections, the post-synaptic targets
of a given neuron number of the first layer in the first connection and
iteratively uses the post-synaptic targets as pre-synaptic neurons for
the following connections
:param list connections: list of connection-ids
:param int neuron: neuron number for the pre-synaptic layer of the first
connection
"""
visualizeConnectionsForNeuron(connections[0], neuron)
if (len(connections) > 1):
post_indices = model.CONNECTION_RESULTS[connections[0]]['c'][neuron]
for post_index in post_indices[0:1]:
if post_index >= 0:
visualizeNeuronSpread(connections[1:], post_index)
def visualizeUnconnectedNeurons(no_connection):
""" Visualizes unconnected neurons for a given connection_index """
c = numpy.array(model.CONNECTION_RESULTS[no_connection]['c'])
sums = numpy.array([sum(row) for row in c])
indices = numpy.where(sums == -model.MODEL.connections[no_connection].synaptic_layer.no_synapses)[0]
logger.info(indices)
neuron_count = len(c)
unconnected_count = len(indices)
logger.info(str(unconnected_count) + "/" + str(neuron_count) + ", " + str(round((unconnected_count / neuron_count) * 10000) / 100) + "%")
layer = model.MODEL.connections[no_connection].pre_layer
for index in indices:
visualizePoint(layer.getNeuronPosition(index))
def visualizeUnconnectedPostNeurons(no_connection):
""" Visualizes unconnected neurons for a given connection_index """
c = model.CONNECTION_RESULTS[no_connection]['c']
layer = model.MODEL.connections[no_connection].post_layer #last layer of connection
indices = []
neuron_count = len(c)
for index in range(neuron_count):
if not index in c:
visualizePoint(layer.getNeuronPosition(index))
indices.append(index)
logger.info(indices)
unconnected_count = len(indices)
logger.info(str(unconnected_count) + "/" + str(neuron_count) + ", " + str(round((unconnected_count / neuron_count) * 10000) / 100) + "%")
def visualizePartlyConnectedNeurons(no_connection):
""" Visualizes neurons which are only partly connected """
c = numpy.array(model.CONNECTION_RESULTS[no_connection]['c'])
sums = numpy.array([sum(row) for row in c])
indices = numpy.where(sums < model.MODEL.connections[no_connection].synaptic_layer.no_synapses)[0]
logger.info(indices)
layer = model.MODEL.connections[no_connection].pre_layer
for index in indices:
visualizePoint(layer.getNeuronPosition(index))
def visualizeClean():
"""delete all visualization objects"""
# delete all previous spheres
global vis_objects
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.select_pattern(pattern="visualization*")
bpy.ops.object.delete(use_global=False)
vis_objects = 0
def polygons_coordinate(obj):
r = []
for p in obj.data.polygons:
co = []
for v in p.vertices:
co.append(obj.data.vertices[v].co)
r.append(co)
return r
def color_polygons(obj, colors):
if len(obj.data.polygons) != len(colors):
raise Exception("number of colors given does not match polgyons")
if not obj.data.vertex_colors:
obj.data.vertex_colors.new()
vc = obj.data.vertex_colors.active
for c, p in zip(colors, obj.data.polygons):
for v in p.loop_indices:
vc.data[v].color = c
def vertices_coordinate(obj):
return [v.co for v in obj.data.vertices]
def color_vertices(obj, colors):
if len(obj.data.vertices) != len(colors):
raise Exception("number of colors given does not match vertices")
if not obj.data.vertex_colors:
obj.data.vertex_colors.new()
vc = obj.data.vertex_colors.active
vc_index = [v for p in obj.data.polygons for v in p.vertices]
for i, n in enumerate(vc_index):
vc.data[i].color = colors[n]
# TODO(SK): Parameter types
def colorize_vertices(obj, v, interval=[]):
"""Colorize vertices of an object based on values in v and a
given interval
:param bpy.types.Object obj: objects, whose vertices should be used
:param (???) v: vector length must correspond to number of vertices
:param interval: min and maximal range. if empty, it will be computed
based on v
"""
colors = getColors(colormaps.standard, v, interval, alpha=False)
color_vertices(obj, colors)
def visualizeMappingDistance(no_mapping):
""" visualizes the mapping distance for a pre-synaptic layer and a given
mapping. The mapping distance is visualized by colorizing the vertices
of the layer """
layers = model.MODEL.connections[no_mapping].layers
distances = []
for ds in model.CONNECTION_RESULTS[no_mapping]['d']:
distances.append(numpy.mean(ds))
colorize_vertices(layers[0].obj, distances)
def computeAxonLengths(no_connection, pre_index, visualize=False):
""" Computes the axon length to each synapse for each post-synaptic neuron the pre-
synaptic neuron is connected with
no_connection : connection/mapping-id
pre_index : index of pre-synaptic neuron
"""
con = model.MODEL.connections[no_connection]
layers = con.layers
slayer = con.synaptic_layer_index
connections = con.mapping_connections
distances = con.mapping_distances
post_indices = model.CONNECTION_RESULTS[no_connection]['c'][pre_index]
synapses = model.CONNECTION_RESULTS[no_connection]['s'][pre_index]
# path of the presynaptic neuron to the synaptic layer
pre_p3d, pre_p2d, pre_d = pam.computeMapping(layers[0:(slayer + 1)],
connections[0:slayer],
distances[0:slayer],
con.pre_layer.getNeuronPosition(pre_index))
first_item = True
result = []
for i in range(0, len(post_indices)):
if post_indices[i] == -1:
continue
if synapses is None:
result.append(mesh.compute_path_length(pre_p3d))
else:
if (len(synapses[i]) > 0):
distances_pre, pre_path = pam.computeDistanceToSynapse(
layers[slayer - 1], layers[slayer], pre_p3d[-1], synapses[i], distances[slayer - 1])
result.append(mesh.compute_path_length(pre_p3d + pre_path))
if visualize:
visualizePath(pre_p3d + pre_path)
return result
def hideAllLayers():
""" Hide all layers involved in all mappings. If a layer occurs multiple times
it is also called here multiple times """
for m in model.MODEL.connections:
for layer in m.layers:
layer.obj.hide = True
def showMappingLayers(index):
""" shows for a given mapping all layers involved in but hides everything else """
hideAllLayers()
for layer in model.MODEL.connections[index].layers:
layer.obj.hide = False
def showPrePostLayers():
""" shows for all mappings all the pre- and post-layers and hides everything else """
hideAllLayers()
for m in model.MODEL.connections:
m.pre_layer.obj.hide = False
m.post_layer.obj.hide = False
|
MartinPyka/Parametric-Anatomical-Modeling
|
pam/pam_vis.py
|
Python
|
gpl-2.0
| 28,349
|
[
"NEURON"
] |
f200c92101255d38cfadbdaa88053509edcd70fb8da3a776fa50eec91e49823a
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Parameter estimation by iterated filtering."""
import collections
import contextlib
import functools
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import invert
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import independent
from tensorflow_probability.python.distributions import joint_distribution_named
from tensorflow_probability.python.distributions import joint_distribution_sequential
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.experimental.mcmc import infer_trajectories
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'geometric_cooling_schedule',
'IteratedFilter'
]
JAX_MODE = False # Overwritten by rewrite script.
NUMPY_MODE = False
# Utility to avoid breakage when passed-in structures are mutated externally.
_copy_structure = lambda struct: tf.nest.map_structure(lambda x: x, struct)
ParametersAndState = collections.namedtuple('ParametersAndState',
['unconstrained_parameters',
'state'])
def geometric_cooling_schedule(cooling_fraction_per_k_iterations, k=1.):
"""Defines a cooling schedule following a geometric sequence.
This returns a function `f` such that
```python
f(iteration) = cooling_fraction_per_k_iterations**(iteration / k)
```
Args:
cooling_fraction_per_k_iterations: float `Tensor` ratio by which the
original value should be scaled once `k` iterations have been completed.
k: int `Tensor` number of iterations used to define the schedule.
Returns:
f: Python `callable` representing the cooling schedule.
"""
cooling_fraction_per_k_iterations = tf.convert_to_tensor(
cooling_fraction_per_k_iterations,
dtype_hint=tf.float32,
name='cooling_fraction_per_k_iterations')
dtype = cooling_fraction_per_k_iterations.dtype
k = tf.cast(k, dtype=dtype, name='k')
def f(iteration):
iteration = tf.cast(iteration, dtype=dtype, name='iteration')
return cooling_fraction_per_k_iterations ** (iteration / k)
return f
class DeterministicEmpirical(distribution.Distribution):
"""Dummy 'proposal' distribution that just returns samples we pass in."""
def __init__(self, values_with_sample_dim, batch_ndims=0, validate_args=False,
name=None):
"""Initializes an empirical distribution with a list of samples.
Args:
values_with_sample_dim: nested structure of `Tensor`s, each of shape
prefixed by `[num_samples, B1, ..., Bn]`, where `num_samples` as well as
`B1, ..., Bn` are batch dimensions shared across all `Tensor`s.
batch_ndims: optional scalar int `Tensor`, or structure matching
`values_with_sample_dim` of scalar int `Tensor`s, specifying the number
of batch dimensions. Used to determine the batch and event shapes of the
distribution.
Default value: `0`.
validate_args: Python `bool` indicating whether to perform runtime checks
that may have performance cost.
Default value: `False`.
name: Python `str` name for ops created by this distribution.
"""
parameters = dict(locals())
with tf.name_scope(name or 'DeterministicEmpirical') as name:
# Ensure we don't break if the passed-in structures are externally
# mutated.
values_with_sample_dim = _copy_structure(values_with_sample_dim)
batch_ndims = _copy_structure(batch_ndims)
# Prevent tf.Module from wrapping passed-in values, because the
# wrapper breaks JointDistributionNamed (and maybe other JDs). Instead, we
# save a separate ref to the input that is used only by tf.Module
# tracking.
self._values_for_tracking = values_with_sample_dim
self._values_with_sample_dim = self._no_dependency(values_with_sample_dim)
if not tf.nest.is_nested(batch_ndims):
batch_ndims = tf.nest.map_structure(
lambda _: batch_ndims, values_with_sample_dim)
self._batch_ndims = batch_ndims
self._max_num_samples = ps.reduce_min(
[ps.size0(x) for x in tf.nest.flatten(values_with_sample_dim)])
super(DeterministicEmpirical, self).__init__(
dtype=tf.nest.map_structure(
lambda x: x.dtype, self.values_with_sample_dim),
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=True,
name=name)
self._parameters = self._no_dependency(parameters)
@property
def batch_ndims(self):
return _copy_structure(self._batch_ndims)
@property
def max_num_samples(self):
return self._max_num_samples
@property
def values_with_sample_dim(self):
return _copy_structure(self._values_with_sample_dim)
def _event_shape(self):
return tf.nest.map_structure(
lambda x, nd: tf.TensorShape(x.shape[1 + nd:]),
self.values_with_sample_dim,
self.batch_ndims)
def _event_shape_tensor(self):
return tf.nest.map_structure(
lambda x, nd: tf.shape(x)[1 + nd:],
self.values_with_sample_dim,
self.batch_ndims)
def _batch_shape(self):
return tf.nest.map_structure(
lambda x, nd: tf.TensorShape(x.shape[1 : 1 + nd]),
self.values_with_sample_dim,
self.batch_ndims)
def _batch_shape_tensor(self):
return tf.nest.map_structure(
lambda x, nd: tf.shape(x)[1 : 1 + nd],
self.values_with_sample_dim,
self.batch_ndims)
# TODO(b/152797117): Override _sample_n, once it supports joint distributions.
def sample(self, sample_shape=(), seed=None, name=None):
with tf.name_scope(name or 'sample'):
# Grab the required number of values from the provided tensors.
sample_shape = dist_util.expand_to_vector(sample_shape)
n = ps.cast(ps.reduce_prod(sample_shape), dtype=tf.int32)
# Check that we're not trying to draw too many samples.
assertions = []
will_overflow_ = tf.get_static_value(n > self.max_num_samples)
if will_overflow_:
raise ValueError('Trying to draw {} samples from a '
'`DeterministicEmpirical` instance for which only {} '
'samples were provided.'.format(
tf.get_static_value(n),
tf.get_static_value(self.max_num_samples)))
elif (will_overflow_ is None # Couldn't determine statically.
and self.validate_args):
assertions.append(
tf.debugging.assert_less_equal(
n, self.max_num_samples, message='Number of samples to draw '
'from a `DeterministicEmpirical` instance must not exceed the '
'number provided at construction.'))
# Extract the appropriate number of sampled values.
with tf.control_dependencies(assertions):
sampled = tf.nest.map_structure(
lambda x: x[:n, ...], self.values_with_sample_dim)
# Reshape the values to the appropriate sample shape.
return tf.nest.map_structure(
lambda x: tf.reshape(x, # pylint: disable=g-long-lambda
ps.concat([ps.cast(sample_shape, tf.int32),
ps.cast(ps.shape(x)[1:], tf.int32)],
axis=0)),
sampled)
def _prob(self, x):
flat_values = tf.nest.flatten(self.values_with_sample_dim)
return tf.cast(
tf.reduce_all([
tf.equal(a, b[:ps.size0(a)])
for (a, b) in zip(tf.nest.flatten(x), flat_values)]),
dtype=flat_values[0].dtype)
def _maybe_build_joint_distribution(structure_of_distributions):
"""Turns a (potentially nested) structure of dists into a single dist."""
# Base case: if we already have a Distribution, return it.
if dist_util.is_distribution_instance(structure_of_distributions):
return structure_of_distributions
# Otherwise, recursively convert all interior nested structures into JDs.
outer_structure = tf.nest.map_structure(
_maybe_build_joint_distribution,
structure_of_distributions)
if (hasattr(outer_structure, '_asdict') or
isinstance(outer_structure, collections.abc.Mapping)):
return joint_distribution_named.JointDistributionNamed(outer_structure)
else:
return joint_distribution_sequential.JointDistributionSequential(
outer_structure)
def augment_transition_fn_with_parameters(parameter_prior,
parameterized_transition_fn,
parameter_constraining_bijector):
"""Wraps a transition fn on states to act on `ParametersAndState` tuples."""
def params_and_state_transition_fn(step,
params_and_state,
perturbation_scale,
**kwargs):
"""Transition function operating on a `ParamsAndState` namedtuple."""
# Extract the state, to pass through to the observation fn.
unconstrained_params, state = params_and_state
if 'state_history' in kwargs:
kwargs['state_history'] = kwargs['state_history'].state
# Perturb each (unconstrained) parameter with normally-distributed noise.
if not tf.nest.is_nested(perturbation_scale):
perturbation_scale = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(perturbation_scale, # pylint: disable=g-long-lambda
name='perturbation_scale',
dtype=x.dtype),
unconstrained_params)
perturbed_unconstrained_parameter_dists = tf.nest.map_structure(
lambda x, p, s: independent.Independent( # pylint: disable=g-long-lambda
normal.Normal(loc=x, scale=p),
reinterpreted_batch_ndims=ps.rank_from_shape(s)),
unconstrained_params,
perturbation_scale,
parameter_prior.event_shape_tensor())
# For the joint transition, pass the perturbed parameters
# into the original transition fn (after pushing them into constrained
# space).
return joint_distribution_named.JointDistributionNamed(
ParametersAndState(
unconstrained_parameters=_maybe_build_joint_distribution(
perturbed_unconstrained_parameter_dists),
state=lambda unconstrained_parameters: ( # pylint: disable=g-long-lambda
parameterized_transition_fn(
step,
state,
parameters=parameter_constraining_bijector.forward(
unconstrained_parameters),
**kwargs))))
return params_and_state_transition_fn
def augment_observation_fn_with_parameters(parameterized_observation_fn,
parameter_constraining_bijector):
"""Augments an observation fn to take `ParametersAndState` namedtuples."""
def observation_from_params_and_state_fn(step,
params_and_state,
**kwargs):
# Extract the state, to pass through to the observation fn.
unconstrained_parameters, state = params_and_state
if 'state_history' in kwargs:
_, kwargs['state_history'] = kwargs['state_history']
return parameterized_observation_fn(
step,
state,
parameters=parameter_constraining_bijector.forward(
unconstrained_parameters),
**kwargs)
return observation_from_params_and_state_fn
def joint_prior_on_parameters_and_state(parameter_prior,
parameterized_initial_state_prior_fn,
parameter_constraining_bijector,
prior_is_constrained=True):
"""Constructs a joint dist. from p(parameters) and p(state | parameters)."""
if prior_is_constrained:
parameter_prior = transformed_distribution.TransformedDistribution(
parameter_prior,
invert.Invert(parameter_constraining_bijector),
name='unconstrained_parameter_prior')
return joint_distribution_named.JointDistributionNamed(
ParametersAndState(
unconstrained_parameters=parameter_prior,
state=lambda unconstrained_parameters: ( # pylint: disable=g-long-lambda
parameterized_initial_state_prior_fn(
parameter_constraining_bijector.forward(
unconstrained_parameters)))))
class IteratedFilter(object):
"""A model augmented with parameter perturbations for iterated filtering."""
def __init__(self,
parameter_prior,
parameterized_initial_state_prior_fn,
parameterized_transition_fn,
parameterized_observation_fn,
parameterized_initial_state_proposal_fn=None,
parameterized_proposal_fn=None,
parameter_constraining_bijector=None,
name=None):
"""Builds an iterated filter for parameter estimation in sequential models.
Iterated filtering is a parameter estimation method in which parameters
are included in an augmented state space, with dynamics that introduce
parameter perturbations, and a filtering
algorithm such as particle filtering is run several times with perturbations
of decreasing size. This class implements the IF2 algorithm of
[Ionides et al., 2015][1], for which, under appropriate conditions
(including a uniform prior) the final parameter distribution approaches a
point mass at the maximum likelihood estimate. If a non-uniform prior is
provided, the final parameter distribution will (under appropriate
conditions) approach a point mass at the maximum a posteriori (MAP) value.
This class augments the state space of a sequential model to include
parameter perturbations, and provides utilities to run particle filtering
on that augmented model. Alternately, the augmented components may be passed
directly into a filtering algorithm of the user's choice.
Args:
parameter_prior: prior `tfd.Distribution` over parameters (may be a joint
distribution).
parameterized_initial_state_prior_fn: `callable` with signature
`initial_state_prior = parameterized_initial_state_prior_fn(parameters)`
where `parameters` has the form of a sample from `parameter_prior`,
and `initial_state_prior` is a distribution over the initial state.
parameterized_transition_fn: `callable` with signature
`next_state_dist = parameterized_transition_fn(
step, state, parameters, **kwargs)`.
parameterized_observation_fn: `callable` with signature
`observation_dist = parameterized_observation_fn(
step, state, parameters, **kwargs)`.
parameterized_initial_state_proposal_fn: optional `callable` with
signature `initial_state_proposal =
parameterized_initial_state_proposal_fn(parameters)` where `parameters`
has the form of a sample from `parameter_prior`, and
`initial_state_proposal` is a distribution over the initial state.
parameterized_proposal_fn: optional `callable` with signature
`next_state_dist = parameterized_transition_fn(
step, state, parameters, **kwargs)`.
Default value: `None`.
parameter_constraining_bijector: optional `tfb.Bijector` instance
such that `parameter_constraining_bijector.forward(x)` returns valid
parameters for any real-valued `x` of the same structure and shape
as `parameters`. If `None`, the default bijector of the provided
`parameter_prior` will be used.
Default value: `None`.
name: `str` name for ops constructed by this object.
Default value: `iterated_filter`.
#### Example
We'll walk through applying iterated filtering to a toy
Susceptible-Infected-Recovered (SIR) model, a [compartmental model](
https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SIR_model)
of infectious disease. Note that the model we use here is extremely
simplified and is intended as a pedagogical example; it should not be
interpreted to describe disease spread in the real world.
We begin by specifying a prior distribution over the parameters to be
inferred, thus defining the structure of the parameter space and the support
of the parameters (which will imply a default constraining bijector). Here
we'll use uniform priors over ranges that we expect to contain the
parameters:
```python
parameter_prior = tfd.JointDistributionNamed({
'infection_rate': tfd.Uniform(low=0., high=3.),
'recovery_rate': tfd.Uniform(low=0., high=3.),
})
```
The model specification itself is identical to that used by
`tfp.experimental.mcmc.infer_trajectories`, except that each component
accepts an additional `parameters` keyword argument. We start by specifying
a parameterized prior on initial states. In this case, our state
includes the current number of susceptible and infected individuals
(the third compartment, recovered individuals, is implicitly defined
to include the remaining population). We'll also include, as auxiliary
variables, the daily counts of new infections and new recoveries; these
will help ensure that people shift consistently across compartments.
```python
population_size = 1000
initial_state_prior_fn = lambda parameters: tfd.JointDistributionNamed({
'new_infections': tfd.Poisson(parameters['infection_rate']),
'new_recoveries': tfd.Deterministic(
tf.broadcast_to(0., tf.shape(parameters['recovery_rate']))),
'susceptible': (lambda new_infections:
tfd.Deterministic(population_size - new_infections)),
'infected': (lambda new_infections:
tfd.Deterministic(new_infections))})
```
**Note**: the state prior must have the same batch shape as the
passed-in parameters; equivalently, it must sample a full state for each
parameter particle. If any part of the state prior does not depend
on the parameters, you must manually ensure that it has the appropriate
batch shape. For example, in the definition of `new_recoveries` above,
applying `broadcast_to` with the shape of a parameter ensures that
the batch shape is maintained.
Next, we specify a transition model. This takes the state at the
previous day, along with parameters, and returns a distribution
over the state for the current day.
```python
def parameterized_infection_dynamics(_, previous_state, parameters):
new_infections = tfd.Poisson(
parameters['infection_rate'] * previous_state['infected'] *
previous_state['susceptible'] / population_size)
new_recoveries = tfd.Poisson(
previous_state['infected'] * parameters['recovery_rate'])
return tfd.JointDistributionNamed({
'new_infections': new_infections,
'new_recoveries': new_recoveries,
'susceptible': lambda new_infections: tfd.Deterministic(
tf.maximum(0., previous_state['susceptible'] - new_infections)),
'infected': lambda new_infections, new_recoveries: tfd.Deterministic(
tf.maximum(0.,
(previous_state['infected'] +
new_infections - new_recoveries)))})
```
Finally, assume that every day we get to observe noisy counts of new
infections and recoveries.
```python
def parameterized_infection_observations(_, state, parameters):
del parameters # Not used.
return tfd.JointDistributionNamed({
'new_infections': tfd.Poisson(state['new_infections'] + 0.1),
'new_recoveries': tfd.Poisson(state['new_recoveries'] + 0.1)})
```
Combining these components, an `IteratedFilter` augments
the state space to include parameters that may change over time.
```python
iterated_filter = tfp.experimental.sequential.IteratedFilter(
parameter_prior=parameter_prior,
parameterized_initial_state_prior_fn=initial_state_prior_fn,
parameterized_transition_fn=parameterized_infection_dynamics,
parameterized_observation_fn=parameterized_infection_observations)
```
We may then run the filter to estimate parameters from a series
of observations:
```python
# Simulated with `infection_rate=1.2` and `recovery_rate=0.1`.
observed_values = {
'new_infections': tf.convert_to_tensor([
2., 7., 14., 24., 45., 93., 160., 228., 252., 158., 17.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
'new_recoveries': tf.convert_to_tensor([
0., 0., 3., 4., 3., 8., 12., 31., 49., 73., 85., 65., 71.,
58., 42., 65., 36., 31., 32., 27., 31., 20., 19., 19., 14., 27.])
}
parameter_particles = iterated_filter.estimate_parameters(
observations=observed_values,
num_iterations=20,
num_particles=4096,
initial_perturbation_scale=1.0,
cooling_schedule=(
tfp.experimental.sequential.geometric_cooling_schedule(
0.001, k=20)),
seed=test_util.test_seed())
print('Mean of parameter particles from final iteration: {}'.format(
tf.nest.map_structure(lambda x: tf.reduce_mean(x[-1], axis=0),
parameter_particles)))
print('Standard deviation of parameter particles from '
'final iteration: {}'.format(
tf.nest.map_structure(lambda x: tf.math.reduce_std(x[-1], axis=0),
parameter_particles)))
```
For more control, we could alternately choose to run filtering iterations
on the augmented model manually, using the filter of our choice.
For example, manually invoking `infer_trajectories` would allow us
to inspect the parameter and state values at all timesteps, and their
corresponding log-probabilities:
```python
trajectories, lps = tfp.experimental.mcmc.infer_trajectories(
observations=observations,
initial_state_prior=iterated_filter.joint_initial_state_prior,
transition_fn=functools.partial(
iterated_filter.joint_transition_fn,
perturbation_scale=perturbation_scale),
observation_fn=iterated_filter.joint_observation_fn,
proposal_fn=iterated_filter.joint_proposal_fn,
initial_state_proposal=iterated_filter.joint_initial_state_proposal(
initial_unconstrained_parameters),
num_particles=4096)
```
#### References:
[1] Edward L. Ionides, Dao Nguyen, Yves Atchade, Stilian Stoev, and Aaron A.
King. Inference for dynamic and latent variable models via iterated,
perturbed Bayes maps. _Proceedings of the National Academy of Sciences_
112, no. 3: 719-724, 2015.
https://www.pnas.org/content/pnas/112/3/719.full.pdf
"""
name = name or 'IteratedFilter'
with tf.name_scope(name):
self._parameter_prior = parameter_prior
self._parameterized_initial_state_prior_fn = (
parameterized_initial_state_prior_fn)
if parameter_constraining_bijector is None:
parameter_constraining_bijector = (
parameter_prior.experimental_default_event_space_bijector())
self._parameter_constraining_bijector = parameter_constraining_bijector
# Augment the prior to include both parameters and states.
self._joint_initial_state_prior = joint_prior_on_parameters_and_state(
parameter_prior,
parameterized_initial_state_prior_fn,
parameter_constraining_bijector,
prior_is_constrained=True)
# Check that prior samples have a consistent number of particles.
# TODO(davmre): remove the need for dummy shape dependencies,
# and this check, by using `JointDistributionNamedAutoBatched` with
# auto-vectorization enabled in `joint_prior_on_parameters_and_state`.
num_particles_canary = 13
canary_seed = samplers.sanitize_seed([0, 1])
def _get_shape_1(x):
if hasattr(x, 'state'):
x = x.state
return tf.TensorShape(x.shape[1:2])
prior_static_sample_shapes = tf.nest.map_structure(
# Sample shape [0, num_particles_canary] particles (size will be zero)
# then trim off the leading 0 and (possibly) any event shape.
# We expect shape [num_particles_canary] to remain.
_get_shape_1,
self._joint_initial_state_prior.sample([0, num_particles_canary],
seed=canary_seed))
if not all([
tensorshape_util.is_compatible_with(s[:1], [num_particles_canary])
for s in tf.nest.flatten(prior_static_sample_shapes)
]):
raise ValueError(
'The specified prior does not generate consistent '
'shapes when sampled. Please verify that all parts of '
'`initial_state_prior_fn` have batch shape matching '
'that of the parameters. This may require creating '
'"dummy" dependencies on parameters; for example: '
'`tf.broadcast_to(value, tf.shape(parameter))`. (in a '
f'test sample with {num_particles_canary} particles, we expected '
'all) values to have shape compatible with '
f'[{num_particles_canary}, ...]; '
f'saw shapes {prior_static_sample_shapes})')
# Augment the transition and observation fns to cover both
# parameters and states.
self._joint_transition_fn = augment_transition_fn_with_parameters(
parameter_prior,
parameterized_transition_fn,
parameter_constraining_bijector)
self._joint_observation_fn = augment_observation_fn_with_parameters(
parameterized_observation_fn,
parameter_constraining_bijector)
# If given a proposal for the initial state, augment it into a joint
# proposal over parameters and states.
joint_initial_state_proposal = None
if parameterized_initial_state_proposal_fn:
joint_initial_state_proposal = joint_prior_on_parameters_and_state(
parameter_prior,
parameterized_initial_state_proposal_fn,
parameter_constraining_bijector)
else:
parameterized_initial_state_proposal_fn = (
parameterized_initial_state_prior_fn)
self._joint_initial_state_proposal = joint_initial_state_proposal
self._parameterized_initial_state_proposal_fn = (
parameterized_initial_state_proposal_fn)
# If given a conditional proposal fn (for non-initial states), augment
# it to be joint over states and parameters.
self._joint_proposal_fn = None
if parameterized_proposal_fn:
self._joint_proposal_fn = augment_transition_fn_with_parameters(
parameter_prior,
parameterized_proposal_fn,
parameter_constraining_bijector)
self._batch_ndims = tf.nest.map_structure(
ps.rank_from_shape,
parameter_prior.batch_shape_tensor())
self._name = name
@property
def batch_ndims(self):
return _copy_structure(self._batch_ndims)
@property
def joint_initial_state_prior(self):
"""Initial state prior for the joint (augmented) model."""
return self._joint_initial_state_prior
def joint_initial_state_proposal(self, initial_unconstrained_parameters=None):
"""Proposal to initialize the model with given parameter particles."""
if initial_unconstrained_parameters is None:
joint_initial_state_proposal = self._joint_initial_state_proposal
else:
# Hack: DeterministicEmpirical is a fake distribution whose `sample`
# just proposes *exactly* the parameters we pass in.
unconstrained_parameter_proposal = DeterministicEmpirical(
initial_unconstrained_parameters,
batch_ndims=self.batch_ndims)
# Propose initial state conditioned on the parameters.
joint_initial_state_proposal = joint_prior_on_parameters_and_state(
unconstrained_parameter_proposal,
self.parameterized_initial_state_proposal_fn,
parameter_constraining_bijector=(
self.parameter_constraining_bijector),
prior_is_constrained=False)
# May return `None` if no initial proposal or params were specified.
return joint_initial_state_proposal
@property
def joint_transition_fn(self):
"""Transition function for the joint (augmented) model."""
return self._joint_transition_fn
@property
def joint_observation_fn(self):
"""Observation function for the joint (augmented) model."""
return self._joint_observation_fn
@property
def joint_proposal_fn(self):
"""Proposal function for the joint (augmented) model."""
return self._joint_proposal_fn
@property
def name(self):
return self._name
@property
def parameter_constraining_bijector(self):
"""Bijector mapping unconstrained real values into the parameter space."""
return self._parameter_constraining_bijector
@property
def parameterized_initial_state_prior_fn(self):
"""Prior function that was passed in at construction."""
return self._parameterized_initial_state_prior_fn
@property
def parameterized_initial_state_proposal_fn(self):
"""Initial proposal function passed in at construction."""
return self._parameterized_initial_state_proposal_fn
@property
def parameter_prior(self):
"""Prior distribution on parameters passed in at construction."""
return self._parameter_prior
def one_step(self,
observations,
perturbation_scale,
num_particles,
initial_unconstrained_parameters=None,
seed=None,
name=None,
**kwargs):
"""Runs one step of filtering to sharpen parameter estimates.
Args:
observations: observed `Tensor` value(s) on which to condition the
parameter estimate.
perturbation_scale: scalar float `Tensor`, or any structure of float
`Tensor`s broadcasting to the same shape as the unconstrained
parameters, specifying the scale (standard deviation) of Gaussian
perturbations to each parameter at each timestep.
num_particles: scalar int `Tensor` number of particles to use. Must match
the batch dimension of `initial_unconstrained_parameters`, if specified.
initial_unconstrained_parameters: optional structure of `Tensor`s, of
shape matching
`self.joint_initial_state_prior.sample([
num_particles]).unconstrained_parameters`,
used to initialize the filter.
Default value: `None`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: `str` name for ops constructed by this method.
**kwargs: additional keyword arguments passed to
`tfp.experimental.mcmc.infer_trajectories`.
Returns:
final_unconstrained_parameters: structure of `Tensor`s matching
`initial_unconstrained_parameters`, containing samples of
unconstrained parameters at the final timestep, as computed by
`self.filter_fn`.
"""
with self._name_scope(name or 'one_step'):
# Run the particle filter.
(unconstrained_parameter_trajectories, _), _ = (
infer_trajectories(
observations=observations,
initial_state_prior=self.joint_initial_state_prior,
transition_fn=functools.partial(
self.joint_transition_fn,
perturbation_scale=perturbation_scale),
observation_fn=self.joint_observation_fn,
proposal_fn=self.joint_proposal_fn,
initial_state_proposal=self.joint_initial_state_proposal(
initial_unconstrained_parameters),
num_particles=num_particles,
seed=seed,
**kwargs))
# Return the parameter estimates from the final step of the trajectory.
return tf.nest.map_structure(
lambda part: part[-1],
unconstrained_parameter_trajectories)
def estimate_parameters(self,
observations,
num_iterations,
num_particles,
initial_perturbation_scale,
cooling_schedule,
seed=None,
name=None,
**kwargs):
"""Runs multiple iterations of filtering following a cooling schedule.
Args:
observations: observed `Tensor` value(s) on which to condition the
parameter estimate.
num_iterations: int `Tensor` number of filtering iterations to run.
num_particles: scalar int `Tensor` number of particles to use.
initial_perturbation_scale: scalar float `Tensor`, or any structure of
float `Tensor`s broadcasting to the same shape as the (unconstrained)
parameters, specifying the scale (standard deviation) of Gaussian
perturbations to each parameter at the first timestep.
cooling_schedule: callable with signature
`cooling_factor = cooling_schedule(iteration)` for `iteration` in
`[0, ..., num_iterations - 1]`. The filter is
invoked with perturbations of scale
`initial_perturbation_scale * cooling_schedule(iteration)`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: `str` name for ops constructed by this method.
**kwargs: additional keyword arguments passed to
`tfp.experimental.mcmc.infer_trajectories`.
Returns:
final_parameter_particles: structure of `Tensor`s matching
`self.parameter_prior`, each with batch shape
`[num_iterations, num_particles]`. These are the populations
of particles representing the parameter estimate after each iteration
of filtering.
"""
with self._name_scope(name or 'estimate_parameters'):
step_seed, initial_seed = samplers.split_seed(seed)
initial_perturbation_scale = tf.convert_to_tensor(
initial_perturbation_scale, name='initial_perturbation_scale')
# Get initial parameter particles from the first filtering iteration.
initial_unconstrained_parameters = self.one_step(
observations=observations,
num_particles=num_particles,
perturbation_scale=initial_perturbation_scale,
seed=step_seed,
**kwargs)
# Run the remaining iterations and accumulate the results.
@tf.function(autograph=False)
def loop_body(unconstrained_parameters_seed, cooling_fraction):
unconstrained_parameters, seed = unconstrained_parameters_seed
step_seed, seed = samplers.split_seed(seed)
return (self.one_step(
observations=observations,
num_particles=num_particles,
perturbation_scale=tf.nest.map_structure(
lambda s: cooling_fraction * s, initial_perturbation_scale),
initial_unconstrained_parameters=unconstrained_parameters,
seed=step_seed,
**kwargs), seed)
estimated_unconstrained_parameters, _ = tf.scan(
fn=loop_body,
elems=cooling_schedule(ps.range(1, num_iterations)),
initializer=(initial_unconstrained_parameters, initial_seed))
return self.parameter_constraining_bijector.forward(
estimated_unconstrained_parameters)
@contextlib.contextmanager
def _name_scope(self, name):
with tf.name_scope(self.name):
with tf.name_scope(name) as name_scope:
yield name_scope
|
tensorflow/probability
|
tensorflow_probability/python/experimental/sequential/iterated_filter.py
|
Python
|
apache-2.0
| 37,064
|
[
"Gaussian"
] |
bf5bc1ac138bd9588523e383e54ba625fc6f20629d2d079b083bb344bdf4ac88
|
import numpy as np
import h5py
def writeH5(pressure,u,v,w,velmag,filename):
"""
Write the h5 file that will save the information needed in proper structure.
pressure = numpy array with pressure values
u,v,w = numpy array with velocity data
filename = string with desired filename
dims = 3-tuple with the number of rank of each dimension
"""
f = h5py.File(filename,'w')
# Store velocity data into the velo_group of h5 file
velo_group = f.create_group("velo_group")
x_velo = velo_group.create_dataset("x_velo",data=u)
y_velo = velo_group.create_dataset("y_velo",data=v)
z_velo = velo_group.create_dataset("z_velo",data=w)
velmag = velo_group.create_dataset("velmag",data=velmag)
# Store velocity data into the velo_group of h5 file
pres_group = f.create_group("pres_group")
presmag = pres_group.create_dataset("presmag",data=pressure)
f.close()
def writeXdmf(dims,dx,filename,h5_file):
"""
Write the xmf file, that describes the hdf5 data, to be read by Paraview.
filename = string with the desired filename
dims = 3-tuple with the number of rank in each dimension (z,y,x)
"""
f = open(filename,'w')
f.write('<?xml version="1.0" ?>\n')
f.write('<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>\n')
f.write('<Xdmf xmlns:xi="http://www.w3.org/2003/XInclude" Version="2.1">\n')
f.write('<Domain>\n')
f.write('<Grid Name="my_Grid" GridType="Uniform">\n')
f.write('<Topology TopologyType="3DCoRectMesh" Dimensions="%d %d %d">\n'%(dims[0],dims[1],dims[2]))
f.write('</Topology>\n')
f.write('<Geometry GeometryType="Origin_DxDyDz">\n')
f.write('<DataItem Dimensions="3" NumberType="Integer" Format="XML">\n')
f.write('0 0 0\n')
f.write('</DataItem>\n')
f.write('<DataItem Dimensions="3" NumberType="Integer" Format="XML">\n')
f.write('%g %g %g\n'%(dx,dx,dx))
f.write('</DataItem>\n')
f.write('</Geometry>\n')
f.write('<Attribute Name="velocity" AttributeType="Vector" Center="Node">\n')
f.write('<DataItem ItemType="Function" Function="JOIN($0, $1, $2)" Dimensions="%d %d %d 3">\n'%(dims[0],dims[1],dims[2]))
f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2]))
#f.write('out'+str(i)+'.h5:/velo_group/x_velo\n')
f.write('%s:/velo_group/x_velo\n'%h5_file)
f.write('</DataItem>\n')
f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2]))
#f.write('out'+str(i)+'.h5:/velo_group/y_velo\n')
f.write('%s:/velo_group/y_velo\n'%h5_file)
f.write('</DataItem>\n')
f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2]))
#f.write('out'+str(i)+'.h5:/velo_group/z_velo\n')
f.write('%s:/velo_group/z_velo\n'%h5_file)
f.write('</DataItem>\n')
f.write('</DataItem>\n')
f.write('</Attribute>\n')
f.write('<Attribute Name="pressure" AttributeType="Scalar" Center="Node">\n')
f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2]))
#f.write('out'+str(i)+'.h5:/pres_group/presmag\n')
f.write('%s:/pres_group/presmag\n'%h5_file)
f.write('</DataItem>\n')
f.write('</Attribute>\n')
f.write('<Attribute Name="velocityMagnitude" AttributeType="Scalar" Center="Node">\n')
f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2]))
#f.write('out'+str(i)+'.h5:/velo_group/velmag\n')
f.write('%s:/velo_group/velmag\n'%h5_file)
f.write('</DataItem>\n')
f.write('</Attribute>\n')
f.write('</Grid>\n')
f.write('</Domain>\n')
f.write('</Xdmf>\n')
f.close()
|
stu314159/pyNFC
|
hdf5Helper.py
|
Python
|
mit
| 3,600
|
[
"ParaView"
] |
69c9c7af7a55c063afa8c90fc1ea83f46369c878640d557d78e8441f2a920730
|
from .vpfit import *
from .galaxy import *
|
cwfinn/igmtools
|
igmtools/modeling/__init__.py
|
Python
|
bsd-3-clause
| 42
|
[
"Galaxy"
] |
f66d60f86e142446e97236b59afa439bb8389f5d27c928dad2fc2edf9b67ba8f
|
# coding=utf-8
# (The line above is necessary so that I can use 世界 in the
# *comment* below without Python getting all bent out of shape.)
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = /path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
# We require Python 2.6 for the json package.
if sys.version < '2.6':
print >>sys.stderr, "The codereview extension requires Python 2.6 or newer."
print >>sys.stderr, "You are running Python " + sys.version
sys.exit(2)
import json
import os
import re
import stat
import subprocess
import threading
import time
from mercurial import commands as hg_commands
from mercurial import util as hg_util
defaultcc = None
codereview_disabled = None
real_rollback = None
releaseBranch = None
server = "codereview.appspot.com"
server_url_base = None
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
# The different parts of the file are separated by banners like this one.
#######################################################################
# Helpers
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
#######################################################################
# RE: UNICODE STRING HANDLING
#
# Python distinguishes between the str (string of bytes)
# and unicode (string of code points) types. Most operations
# work on either one just fine, but some (like regexp matching)
# require unicode, and others (like write) require str.
#
# As befits the language, Python hides the distinction between
# unicode and str by converting between them silently, but
# *only* if all the bytes/code points involved are 7-bit ASCII.
# This means that if you're not careful, your program works
# fine on "hello, world" and fails on "hello, 世界". And of course,
# the obvious way to be careful - use static types - is unavailable.
# So the only way is trial and error to find where to put explicit
# conversions.
#
# Because more functions do implicit conversion to str (string of bytes)
# than do implicit conversion to unicode (string of code points),
# the convention in this module is to represent all text as str,
# converting to unicode only when calling a unicode-only function
# and then converting back to str as soon as possible.
def typecheck(s, t):
if type(s) != t:
raise hg_util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
# If we have to pass unicode instead of str, ustr does that conversion clearly.
def ustr(s):
typecheck(s, str)
return s.decode("utf-8")
# Even with those, Mercurial still sometimes turns unicode into str
# and then tries to use it as ascii. Change Mercurial's default.
def set_mercurial_encoding_to_utf8():
from mercurial import encoding
encoding.encoding = 'utf-8'
set_mercurial_encoding_to_utf8()
# Even with those we still run into problems.
# I tried to do things by the book but could not convince
# Mercurial to let me check in a change with UTF-8 in the
# CL description or author field, no matter how many conversions
# between str and unicode I inserted and despite changing the
# default encoding. I'm tired of this game, so set the default
# encoding for all of Python to 'utf-8', not 'ascii'.
def default_to_utf8():
import sys
stdout, __stdout__ = sys.stdout, sys.__stdout__
reload(sys) # site.py deleted setdefaultencoding; get it back
sys.stdout, sys.__stdout__ = stdout, __stdout__
sys.setdefaultencoding('utf-8')
default_to_utf8()
#######################################################################
# Status printer for long-running commands
global_status = None
def set_status(s):
# print >>sys.stderr, "\t", time.asctime(), s
global global_status
global_status = s
class StatusThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# pause a reasonable amount of time before
# starting to display status messages, so that
# most hg commands won't ever see them.
time.sleep(30)
# now show status every 15 seconds
while True:
time.sleep(15 - time.time() % 15)
s = global_status
if s is None:
continue
if s == "":
s = "(unknown status)"
print >>sys.stderr, time.asctime(), s
def start_status_thread():
t = StatusThread()
t.setDaemon(True) # allowed to exit if t is still running
t.start()
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
typecheck(name, str)
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
self.private = False
self.lgtm = []
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
if cl.private:
s += "Private: " + str(self.private) + "\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
typecheck(s, str)
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
if cl.private:
s += "Private: True\n"
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
typecheck(s, str)
return s
def PendingText(self, quick=False):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
if not quick:
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
for (who, line) in cl.lgtm:
s += "\t\t" + who + ": " + line + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
typecheck(s, str)
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
private=self.private)
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
typecheck(s, str)
return s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
if not self.files and not creating:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
set_status("uploading CL metadata + diffs")
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
]
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
# We do not include files when creating the issue,
# because we want the patch sets to record the repository
# and base revision they are diffs against. We use the patch
# set message for that purpose, but there is no message with
# the first patch set. Instead the message gets used as the
# new CL's overall subject. So omit the diffs when creating
# and then we'll run an immediate upload.
# This has the effect that every CL begins with an empty "Patch set 1".
if self.files and not creating:
vcs = MercurialVCS(upload_options, ui, repo)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
if vcs and self.name != "new":
form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + ui.expandpath("default")))
else:
# First upload sets the subject for the CL itself.
form_fields.append(("subject", self.Subject()))
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
if response_body.startswith("Issue updated.") and quiet:
pass
else:
ui.status(msg + "\n")
set_status("uploaded CL metadata + diffs")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise hg_util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
set_status("uploading patches")
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
set_status("uploading base files")
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
set_status("sending mail")
MySend("/" + issue + "/mail", payload="")
self.web = True
set_status("flushing changes to disk")
self.Flush(ui, repo)
return
def Mail(self, ui, repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
repourl = ui.expandpath("default")
if not self.mailed:
pmsg += "I'd like you to review this change to\n" + repourl + "\n"
else:
pmsg += "Please take another look.\n"
typecheck(pmsg, str)
PostMessage(ui, self.name, pmsg, subject=self.Subject())
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
typecheck(name, str)
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
typecheck(text, str)
typecheck(name, str)
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
'Private': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
line = line.strip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
cl.private = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
typecheck(s, str)
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
typecheck(s, str)
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
for s in l:
typecheck(s, str)
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
typecheck(name, str)
set_status("loading CL " + name)
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
set_status("getting issue metadata from web")
d = JSONGet(ui, "/api/" + name + "?messages=true")
set_status(None)
if d is None:
return None, "cannot load CL %s from server" % (name,)
if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
return None, "malformed response loading CL data from code review server"
cl.dict = d
cl.reviewer = d.get('reviewers', [])
cl.cc = d.get('cc', [])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = d.get('description', "")
cl.url = server_url_base + name
cl.web = True
cl.private = d.get('private', False) != False
cl.lgtm = []
for m in d.get('messages', []):
if m.get('approval', False) == True or m.get('disapproval', False) == True:
who = re.sub('@.*', '', m.get('sender', ''))
text = re.sub("\n(.|\n)*", '', m.get('text', ''))
cl.lgtm.append((who, text))
set_status("loaded CL " + name)
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
typecheck(url, str)
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
typecheck(dir, str)
return dir
# Turn leading tabs into spaces, so that the common white space
# prefix doesn't get confused when people's editors write out
# some lines with spaces, some with tabs. Only a heuristic
# (some editors don't use 8 spaces either) but a useful one.
def TabsToSpaces(line):
i = 0
while i < len(line) and line[i] == '\t':
i += 1
return ' '*(8*i) + line[i:]
# Strip maximal common leading white space prefix from text
def StripCommon(text):
typecheck(text, str)
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
line = TabsToSpaces(line)
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
line = TabsToSpaces(line)
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
typecheck(t, str)
return t
# Indent text with indent.
def Indent(text, indent):
typecheck(text, str)
typecheck(indent, str)
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
typecheck(t, str)
return t
# Return the first line of l
def line1(text):
typecheck(text, str)
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
desc_msg = '''Your CL description appears not to use the standard form.
The first line of your change description is conventionally a
one-line summary of the change, prefixed by the primary affected package,
and is used as the subject for code review mail; the rest of the description
elaborates.
Examples:
encoding/rot13: new package
math: add IsInf, IsNaN
net: fix cname in LookupHost
unicode: update to Unicode 5.0.2
'''
def promptyesno(ui, msg):
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
def promptremove(ui, repo, f):
if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
if hg_commands.remove(ui, repo, 'path:'+f) != 0:
ui.warn("error removing %s" % (f,))
def promptadd(ui, repo, f):
if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
if hg_commands.add(ui, repo, 'path:'+f) != 0:
ui.warn("error adding %s" % (f,))
def EditCL(ui, repo, cl):
set_status(None) # do not show status
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
# We can't trust Mercurial + Python not to die before making the change,
# so, by popular demand, just scribble the most recent CL edit into
# $(hg root)/last-change so that if Mercurial does die, people
# can look there for their work.
try:
f = open(repo.root+"/last-change", "w")
f.write(s)
f.close()
except:
pass
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
# Check description.
if clx.desc == '':
if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
continue
elif re.search('<enter reason for undo>', clx.desc):
if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
continue
elif not re.match(desc_re, clx.desc.split('\n')[0]):
if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
continue
# Check file list for files that need to be hg added or hg removed
# or simply aren't understood.
pats = ['path:'+f for f in clx.files]
changed = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
deleted = hg_matchPattern(ui, repo, *pats, deleted=True)
unknown = hg_matchPattern(ui, repo, *pats, unknown=True)
ignored = hg_matchPattern(ui, repo, *pats, ignored=True)
clean = hg_matchPattern(ui, repo, *pats, clean=True)
files = []
for f in clx.files:
if f in changed:
files.append(f)
continue
if f in deleted:
promptremove(ui, repo, f)
files.append(f)
continue
if f in unknown:
promptadd(ui, repo, f)
files.append(f)
continue
if f in ignored:
ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
continue
if f in clean:
ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
files.append(f)
continue
p = repo.root + '/' + f
if os.path.isfile(p):
ui.warn("warning: %s is a file but not known to hg\n" % (f,))
files.append(f)
continue
if os.path.isdir(p):
ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
continue
ui.warn("error: %s does not exist; omitting\n" % (f,))
clx.files = files
cl.desc = clx.desc
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
cl.private = clx.private
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, op="verb", defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if not cl.files:
return None, "no files changed (use hg %s <number> to use existing CL)" % op
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
#######################################################################
# Change list file management
# Return list of changed files in repository that match pats.
# The patterns came from the command line, so we warn
# if they have no effect or cannot be understood.
def ChangedFiles(ui, repo, pats, taken=None):
taken = taken or {}
# Run each pattern separately so that we can warn about
# patterns that didn't do anything useful.
for p in pats:
for f in hg_matchPattern(ui, repo, p, unknown=True):
promptadd(ui, repo, f)
for f in hg_matchPattern(ui, repo, p, removed=True):
promptremove(ui, repo, f)
files = hg_matchPattern(ui, repo, p, modified=True, added=True, removed=True)
for f in files:
if f in taken:
ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
if not files:
ui.warn("warning: %s did not match any modified files\n" % (p,))
# Again, all at once (eliminates duplicates)
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
l.sort()
if taken:
l = Sub(l, taken.keys())
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True)
l.sort()
return l
# Return list of files claimed by existing CLs
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats):
return ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
#######################################################################
# File format checking.
def CheckFormat(ui, repo, files, just_warn=False):
set_status("running gofmt")
CheckGofmt(ui, repo, files, just_warn)
CheckTabfmt(ui, repo, files, just_warn)
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn):
files = gofmt_required(files)
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
if not files:
return
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
cmd.stdin.close()
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
set_status("done with gofmt")
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
# Check that *.[chys] files indent using tabs.
def CheckTabfmt(ui, repo, files, just_warn):
files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f) and not re.search(r"\.tab\.[ch]$", f)]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
badfiles = []
for f in files:
try:
for line in open(f, 'r'):
# Four leading spaces is enough to complain about,
# except that some Plan 9 code uses four spaces as the label indent,
# so allow that.
if line.startswith(' ') and not re.match(' [A-Za-z0-9_]+:', line):
badfiles.append(f)
break
except:
# ignore cannot open file, etc.
pass
if len(badfiles) > 0:
msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
#######################################################################
# CONTRIBUTORS file parsing
contributorsCache = None
contributorsURL = None
def ReadContributors(ui, repo):
global contributorsCache
if contributorsCache is not None:
return contributorsCache
try:
if contributorsURL is not None:
opening = contributorsURL
f = urllib2.urlopen(contributorsURL)
else:
opening = repo.root + '/CONTRIBUTORS'
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
ui.write("warning: cannot open %s: %s\n" % (opening, ExceptionDetail()))
return
contributors = {}
for line in f:
# CONTRIBUTORS is a list of lines like:
# Person <email>
# Person <email> <alt-email>
# The first email address is the one used in commit logs.
if line.startswith('#'):
continue
m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
if m:
name = m.group(1)
email = m.group(2)[1:-1]
contributors[email.lower()] = (name, email)
for extra in m.group(3).split():
contributors[extra[1:-1].lower()] = (name, email)
contributorsCache = contributors
return contributors
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise hg_util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user=None, warn=True):
if not user:
user = ui.config("ui", "username")
if not user:
raise hg_util.Abort("[ui] username is not configured in .hgrc")
user = user.lower()
m = re.match(r".*<(.*)>", user)
if m:
user = m.group(1)
contributors = ReadContributors(ui, repo)
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
#######################################################################
# Mercurial helper functions.
# Read http://mercurial.selenic.com/wiki/MercurialApi before writing any of these.
# We use the ui.pushbuffer/ui.popbuffer + hg_commands.xxx tricks for all interaction
# with Mercurial. It has proved the most stable as they make changes.
hgversion = hg_util.version()
# We require Mercurial 1.9 and suggest Mercurial 2.0.
# The details of the scmutil package changed then,
# so allowing earlier versions would require extra band-aids below.
# Ubuntu 11.10 ships with Mercurial 1.9.1 as the default version.
hg_required = "1.9"
hg_suggested = "2.0"
old_message = """
The code review extension requires Mercurial """+hg_required+""" or newer.
You are using Mercurial """+hgversion+""".
To install a new Mercurial, visit http://mercurial.selenic.com/downloads/.
"""
linux_message = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < hg_required:
msg = old_message
if os.access("/etc/mercurial", 0):
msg += linux_message
raise hg_util.Abort(msg)
from mercurial.hg import clean as hg_clean
from mercurial import cmdutil as hg_cmdutil
from mercurial import error as hg_error
from mercurial import match as hg_match
from mercurial import node as hg_node
class uiwrap(object):
def __init__(self, ui):
self.ui = ui
ui.pushbuffer()
self.oldQuiet = ui.quiet
ui.quiet = True
self.oldVerbose = ui.verbose
ui.verbose = False
def output(self):
ui = self.ui
ui.quiet = self.oldQuiet
ui.verbose = self.oldVerbose
return ui.popbuffer()
def to_slash(path):
if sys.platform == "win32":
return path.replace('\\', '/')
return path
def hg_matchPattern(ui, repo, *pats, **opts):
w = uiwrap(ui)
hg_commands.status(ui, repo, *pats, **opts)
text = w.output()
ret = []
prefix = to_slash(os.path.realpath(repo.root))+'/'
for line in text.split('\n'):
f = line.split()
if len(f) > 1:
if len(pats) > 0:
# Given patterns, Mercurial shows relative to cwd
p = to_slash(os.path.realpath(f[1]))
if not p.startswith(prefix):
print >>sys.stderr, "File %s not in repo root %s.\n" % (p, prefix)
else:
ret.append(p[len(prefix):])
else:
# Without patterns, Mercurial shows relative to root (what we want)
ret.append(to_slash(f[1]))
return ret
def hg_heads(ui, repo):
w = uiwrap(ui)
hg_commands.heads(ui, repo)
return w.output()
noise = [
"",
"resolving manifests",
"searching for changes",
"couldn't find merge tool hgmerge",
"adding changesets",
"adding manifests",
"adding file changes",
"all local heads known remotely",
]
def isNoise(line):
line = str(line)
for x in noise:
if line == x:
return True
return False
def hg_incoming(ui, repo):
w = uiwrap(ui)
ret = hg_commands.incoming(ui, repo, force=False, bundle="")
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_log(ui, repo, **opts):
for k in ['date', 'keyword', 'rev', 'user']:
if not opts.has_key(k):
opts[k] = ""
w = uiwrap(ui)
ret = hg_commands.log(ui, repo, **opts)
if ret:
raise hg_util.Abort(ret)
return w.output()
def hg_outgoing(ui, repo, **opts):
w = uiwrap(ui)
ret = hg_commands.outgoing(ui, repo, **opts)
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_pull(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.pull(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_push(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True
err = hg_commands.push(ui, repo, **opts)
for line in w.output().split('\n'):
if not isNoise(line):
ui.write(line + '\n')
return err
def hg_commit(ui, repo, *pats, **opts):
return hg_commands.commit(ui, repo, *pats, **opts)
#######################################################################
# Mercurial precommit hook to disable commit except through this interface.
commit_okay = False
def precommithook(ui, repo, **opts):
if commit_okay:
return False # False means okay.
ui.write("\ncodereview extension enabled; use mail, upload, or submit instead of commit\n\n")
return True
#######################################################################
# @clnumber file pattern support
# We replace scmutil.match with the MatchAt wrapper to add the @clnumber pattern.
match_repo = None
match_ui = None
match_orig = None
def InstallMatch(ui, repo):
global match_repo
global match_ui
global match_orig
match_ui = ui
match_repo = repo
from mercurial import scmutil
match_orig = scmutil.match
scmutil.match = MatchAt
def MatchAt(ctx, pats=None, opts=None, globbed=False, default='relpath'):
taken = []
files = []
pats = pats or []
opts = opts or {}
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if clname == "default":
files = DefaultFiles(match_ui, match_repo, [])
else:
if not GoodCLName(clname):
raise hg_util.Abort("invalid CL name " + clname)
cl, err = LoadCL(match_repo.ui, match_repo, clname, web=False)
if err != '':
raise hg_util.Abort("loading CL " + clname + ": " + err)
if not cl.files:
raise hg_util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
# work-around for http://selenic.com/hg/rev/785bbc8634f8
if not hasattr(ctx, 'match'):
ctx = ctx[None]
return match_orig(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Commands added by code review extension.
def hgcommand(f):
return f
#######################################################################
# hg change
@hgcommand
def change(ui, repo, *pats, **opts):
"""create, edit or delete a change list
Create, edit or delete a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
raise hg_util.Abort("cannot specify CL name and file patterns")
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
raise hg_util.Abort(err)
if not cl.local and (opts["stdin"] or not opts["stdout"]):
raise hg_util.Abort("cannot change non-local CL " + name)
else:
name = "new"
cl = CL("new")
if repo[None].branch() != "default":
raise hg_util.Abort("cannot create CL outside default branch; switch with 'hg update default'")
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
raise hg_util.Abort("cannot use -d and -D together")
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
raise hg_util.Abort("cannot use "+flag+" with file patterns")
if opts["stdin"] or opts["stdout"]:
raise hg_util.Abort("cannot use "+flag+" with -i or -o")
if not cl.local:
raise hg_util.Abort("cannot change non-local CL " + name)
if opts["delete"]:
if cl.copied_from:
raise hg_util.Abort("original author must delete CL; hg change -D will remove locally")
PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
raise hg_util.Abort("error parsing change list: line %d: %s" % (line, err))
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if clx.private != cl.private:
cl.private = clx.private
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
raise hg_util.Abort(err)
dirty[cl] = True
for d, _ in dirty.items():
name = d.name
d.Flush(ui, repo)
if name == "new":
d.Upload(ui, repo, quiet=True)
if opts["stdout"]:
ui.write(cl.EditorText())
elif opts["pending"]:
ui.write(cl.PendingText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
#######################################################################
# hg code-login (broken?)
@hgcommand
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
MySend(None)
#######################################################################
# hg clpatch / undo / release-apply / download
# All concerned with applying or unapplying patches to the repository.
@hgcommand
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
if repo[None].branch() != "default":
raise hg_util.Abort("cannot run hg clpatch outside default branch")
err = clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
if err:
raise hg_util.Abort(err)
@hgcommand
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
Creates a new CL that undoes an earlier CL.
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
if repo[None].branch() != "default":
raise hg_util.Abort("cannot run hg undo outside default branch")
err = clpatch_or_undo(ui, repo, clname, opts, mode="undo")
if err:
raise hg_util.Abort(err)
@hgcommand
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
raise hg_util.Abort("no active release branches")
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise hg_util.Abort("uncommitted local changes - cannot switch branches")
err = hg_clean(repo, releaseBranch)
if err:
raise hg_util.Abort(err)
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise hg_util.Abort(err)
except Exception, e:
hg_clean(repo, "default")
raise e
def rev2clname(rev):
# Extract CL name from revision description.
# The last line in the description that is a codereview URL is the real one.
# Earlier lines might be part of the user-written description.
all = re.findall('(?m)^https?://codereview.appspot.com/([0-9]+)$', rev.description())
if len(all) > 0:
return all[-1]
return ""
undoHeader = """undo CL %s / %s
<enter reason for undo>
««« original CL description
"""
undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, mode):
if codereview_disabled:
return codereview_disabled
if mode == "undo" or mode == "backport":
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
# Otherwise is either change log sequence number (fewer decimal digits),
# hexadecimal hash, or tag name.
# Mercurial will fall over long before the change log
# sequence numbers get to be 7 digits long.
if re.match('^[0-9]{7,}$', clname):
found = False
for r in hg_log(ui, repo, keyword="codereview.appspot.com/"+clname, limit=100, template="{node}\n").split():
rev = repo[r]
# Last line with a code review URL is the actual review URL.
# Earlier ones might be part of the CL description.
n = rev2clname(rev)
if n == clname:
found = True
break
if not found:
return "cannot find CL %s in local repository" % clname
else:
rev = repo[clname]
if not rev:
return "unknown revision %s" % clname
clname = rev2clname(rev)
if clname == "":
return "cannot find CL name in revision description"
# Create fresh CL and start with patch that would reverse the change.
vers = hg_node.short(rev.node())
cl = CL("new")
desc = str(rev.description())
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = hg_node.short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
if patch == emptydiff:
return "codereview issue %s has no diff" % clname
# find current hg version (hg identify)
ctx = repo[None]
parents = ctx.parents()
id = '+'.join([hg_node.short(p.node()) for p in parents])
# if version does not match the patch version,
# try to update the patch line numbers.
if vers != "" and id != vers:
# "vers in repo" gives the wrong answer
# on some versions of Mercurial. Instead, do the actual
# lookup and catch the exception.
try:
repo[vers].description()
except:
return "local repository is out of date; sync to get %s" % (vers)
patch1, err = portPatch(repo, patch, vers, id)
if err != "":
if not opts["ignore_hgapplydiff_failure"]:
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
else:
patch = patch1
argv = ["hgapplydiff"]
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
except:
return "hgapplydiff: " + ExceptionDetail() + "\nInstall hgapplydiff with:\n$ go get code.google.com/p/go.codereview/cmd/hgapplydiff\n"
out, err = cmd.communicate(patch)
if cmd.returncode != 0 and not opts["ignore_hgapplydiff_failure"]:
return "hgapplydiff failed"
cl.local = True
cl.files = out.strip().split()
if not cl.files and not opts["ignore_hgapplydiff_failure"]:
return "codereview issue %s has no changed files" % clname
files = ChangedFiles(ui, repo, [])
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
cl.Flush(ui, repo)
else:
ui.write(cl.PendingText() + "\n")
# portPatch rewrites patch from being a patch against
# oldver to being a patch against newver.
def portPatch(repo, patch, oldver, newver):
lines = patch.splitlines(True) # True = keep \n
delta = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('--- a/'):
file = line[6:-1]
delta = fileDeltas(repo, file, oldver, newver)
if not delta or not line.startswith('@@ '):
continue
# @@ -x,y +z,w @@ means the patch chunk replaces
# the original file's line numbers x up to x+y with the
# line numbers z up to z+w in the new file.
# Find the delta from x in the original to the same
# line in the current version and add that delta to both
# x and z.
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
return None, "error parsing patch line numbers"
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
d, err = lineDelta(delta, n1, len1)
if err != "":
return "", err
n1 += d
n2 += d
lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
newpatch = ''.join(lines)
return newpatch, ""
# fileDelta returns the line number deltas for the given file's
# changes from oldver to newver.
# The deltas are a list of (n, len, newdelta) triples that say
# lines [n, n+len) were modified, and after that range the
# line numbers are +newdelta from what they were before.
def fileDeltas(repo, file, oldver, newver):
cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
data = RunShell(cmd, silent_ok=True)
deltas = []
for line in data.splitlines():
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
continue
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
deltas.append((n1, len1, n2+len2-(n1+len1)))
return deltas
# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
# It returns an error if those lines were rewritten by the patch.
def lineDelta(deltas, n, len):
d = 0
for (old, oldlen, newdelta) in deltas:
if old >= n+len:
break
if old+len > n:
return 0, "patch and recent changes conflict"
d = newdelta
return d, ""
@hgcommand
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
#######################################################################
# hg file
@hgcommand
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
#######################################################################
# hg gofmt
@hgcommand
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
return "no modified go files"
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if subprocess.call(cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return
def gofmt_required(files):
return [f for f in files if (not f.startswith('test/') or f.startswith('test/bench/')) and f.endswith('.go')]
#######################################################################
# hg mail
@hgcommand
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
cl, err = CommandLineCL(ui, repo, pats, opts, op="mail", defaultcc=defaultcc)
if err != "":
raise hg_util.Abort(err)
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc:
raise hg_util.Abort("no reviewers listed in CL")
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
raise hg_util.Abort("no changed files, not sending mail")
cl.Mail(ui, repo)
#######################################################################
# hg p / hg pq / hg ps / hg pending
@hgcommand
def ps(ui, repo, *pats, **opts):
"""alias for hg p --short
"""
opts['short'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pq(ui, repo, *pats, **opts):
"""alias for hg p --quick
"""
opts['quick'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
quick = opts.get('quick', False)
short = opts.get('short', False)
m = LoadAllCL(ui, repo, web=not quick and not short)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
if short:
ui.write(name + "\t" + line1(cl.desc) + "\n")
else:
ui.write(cl.PendingText(quick=quick) + "\n")
if short:
return 0
files = DefaultFiles(ui, repo, [])
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
#######################################################################
# hg submit
def need_sync():
raise hg_util.Abort("local repository out of date; must sync before submit")
@hgcommand
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
# We already called this on startup but sometimes Mercurial forgets.
set_mercurial_encoding_to_utf8()
if not opts["no_incoming"] and hg_incoming(ui, repo):
need_sync()
cl, err = CommandLineCL(ui, repo, pats, opts, op="submit", defaultcc=defaultcc)
if err != "":
raise hg_util.Abort(err)
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
typecheck(userline, str)
about = ""
if cl.reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer:
raise hg_util.Abort("no reviewers listed in CL")
if not cl.local:
raise hg_util.Abort("cannot submit non-local CL")
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckFormat(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
typecheck(about, str)
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
message = cl.desc.rstrip() + "\n\n" + about
typecheck(message, str)
set_status("pushing " + cl.name + " to remote server")
if hg_outgoing(ui, repo):
raise hg_util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
old_heads = len(hg_heads(ui, repo).split())
global commit_okay
commit_okay = True
ret = hg_commit(ui, repo, *['path:'+f for f in cl.files], message=message, user=userline)
commit_okay = False
if ret:
raise hg_util.Abort("nothing changed")
node = repo["-1"].node()
# push to remote; if it fails for any reason, roll back
try:
new_heads = len(hg_heads(ui, repo).split())
if old_heads != new_heads and not (old_heads == 0 and new_heads == 1):
# Created new head, so we weren't up to date.
need_sync()
# Push changes to remote. If it works, we're committed. If not, roll back.
try:
if hg_push(ui, repo):
raise hg_util.Abort("push error")
except hg_error.Abort, e:
if e.message.find("push creates new heads") >= 0:
# Remote repository had changes we missed.
need_sync()
raise
except:
real_rollback()
raise
# We're committed. Upload final patch, close review, add commit message.
changeURL = hg_node.short(node)
url = ui.expandpath("default")
m = re.match("(^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?)" + "|" +
"(^https?://([^@/]+@)?code\.google\.com/p/([^/.]+)(\.[^./]+)?/?)", url)
if m:
if m.group(1): # prj.googlecode.com/hg/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s" % (m.group(3), changeURL)
elif m.group(4) and m.group(7): # code.google.com/p/prj.subrepo/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s&repo=%s" % (m.group(6), changeURL, m.group(7)[1:])
elif m.group(4): # code.google.com/p/prj/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s" % (m.group(6), changeURL)
else:
print >>sys.stderr, "URL: ", url
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + message
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg_clean(repo, "default")
if err:
return err
return 0
#######################################################################
# hg sync
@hgcommand
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
if not opts["local"]:
err = hg_pull(ui, repo, update=True)
if err:
return err
sync_changes(ui, repo)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
for rev in hg_log(ui, repo, limit=100, template="{node}\n").split():
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^https?://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [])
for cl in all.values():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
if not cl.copied_from:
ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
else:
ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
return 0
#######################################################################
# hg upload
@hgcommand
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
raise hg_util.Abort(err)
if not cl.local:
raise hg_util.Abort("cannot upload non-local change")
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return 0
#######################################################################
# Table of commands, supplied to Mercurial for installation.
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
('p', 'pending', None, 'print pending summary to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[
('s', 'short', False, 'show short result form'),
('', 'quick', False, 'do not consult codereview server'),
],
"[FILE ...]"
),
"^ps": (
ps,
[],
"[FILE ...]"
),
"^pq": (
pq,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + hg_commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + hg_commands.walkopts + hg_commands.commitopts + hg_commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^undo": (
undo,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Mercurial extension initialization
def norollback(*pats, **opts):
"""(disabled when using this extension)"""
raise hg_util.Abort("codereview extension enabled; use undo instead of rollback")
codereview_init = False
def reposetup(ui, repo):
global codereview_disabled
global defaultcc
# reposetup gets called both for the local repository
# and also for any repository we are pulling or pushing to.
# Only initialize the first time.
global codereview_init
if codereview_init:
return
codereview_init = True
# Read repository-specific options from lib/codereview/codereview.cfg or codereview.cfg.
root = ''
try:
root = repo.root
except:
# Yes, repo might not have root; see issue 959.
codereview_disabled = 'codereview disabled: repository has no root'
return
repo_config_path = ''
p1 = root + '/lib/codereview/codereview.cfg'
p2 = root + '/codereview.cfg'
if os.access(p1, os.F_OK):
repo_config_path = p1
else:
repo_config_path = p2
try:
f = open(repo_config_path)
for line in f:
if line.startswith('defaultcc:'):
defaultcc = SplitCommaSpace(line[len('defaultcc:'):])
if line.startswith('contributors:'):
global contributorsURL
contributorsURL = line[len('contributors:'):].strip()
except:
codereview_disabled = 'codereview disabled: cannot open ' + repo_config_path
return
remote = ui.config("paths", "default", "")
if remote.find("://") < 0:
raise hg_util.Abort("codereview: default path '%s' is not a URL" % (remote,))
InstallMatch(ui, repo)
RietveldSetup(ui, repo)
# Disable the Mercurial commands that might change the repository.
# Only commands in this extension are supposed to do that.
ui.setconfig("hooks", "precommit.codereview", precommithook)
# Rollback removes an existing commit. Don't do that either.
global real_rollback
real_rollback = repo.rollback
repo.rollback = norollback
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
from HTMLParser import HTMLParser
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
def JSONGet(ui, path):
try:
data = MySend(path, force_auth=False)
typecheck(data, str)
d = fix_json(json.loads(data))
except:
ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
return None
return d
# Clean up json parser output to match our expectations:
# * all strings are UTF-8-encoded str, not unicode.
# * missing fields are missing, not None,
# so that d.get("foo", defaultvalue) works.
def fix_json(x):
if type(x) in [str, int, float, bool, type(None)]:
pass
elif type(x) is unicode:
x = x.encode("utf-8")
elif type(x) is list:
for i in range(len(x)):
x[i] = fix_json(x[i])
elif type(x) is dict:
todel = []
for k in x:
if x[k] is None:
todel.append(k)
else:
x[k] = fix_json(x[k])
for k in todel:
del x[k]
else:
raise hg_util.Abort("unknown type " + str(type(x)) + " in fix_json")
if type(x) is str:
x = x.replace('\r\n', '\n')
return x
def IsRietveldSubmitted(ui, clname, hex):
dict = JSONGet(ui, "/api/" + clname + "?messages=true")
if dict is None:
return False
for msg in dict.get("messages", []):
text = msg.get("text", "")
m = re.match('\*\*\* Submitted as [^*]*?([0-9a-f]+) \*\*\*', text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def IsRietveldMailed(cl):
for msg in cl.dict.get("messages", []):
if msg.get("text", "").find("I'd like you to review this change") >= 0:
return True
return False
def DownloadCL(ui, repo, clname):
set_status("downloading CL " + clname)
cl, err = LoadCL(ui, repo, clname, web=True)
if err != "":
return None, None, None, "error loading CL %s: %s" % (clname, err)
# Find most recent diff
diffs = cl.dict.get("patchsets", [])
if not diffs:
return None, None, None, "CL has no patch sets"
patchid = diffs[-1]
patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
if patchset is None:
return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
if patchset.get("patchset", 0) != patchid:
return None, None, None, "malformed patchset information"
vers = ""
msg = patchset.get("message", "").split()
if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
vers = msg[2]
diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
diffdata = MySend(diff, force_auth=False)
# Print warning if email is not in CONTRIBUTORS file.
email = cl.dict.get("owner_email", "")
if not email:
return None, None, None, "cannot find owner for %s" % (clname)
him = FindContributor(ui, repo, email)
me = FindContributor(ui, repo, None)
if him == me:
cl.mailed = IsRietveldMailed(cl)
else:
cl.copied_from = email
return cl, vers, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
if timeout is None:
timeout = 30 # seconds
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "https://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
# who knows what urllib will give us
if type(response) == unicode:
response = response.encode("utf-8")
typecheck(response, str)
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(ustr(MySend(url))) # f.feed wants unicode
f.close()
# convert back to utf-8 to restore sanity
m = {}
for k,v in f.map.items():
m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
return m
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
set_status("uploading change to description")
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed:
form_fields['closed'] = "checked"
if private:
form_fields['private'] = "checked"
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
set_status("uploading message")
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global force_google_account
global rpc
global server
global server_url_base
global upload_options
global verbosity
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "https://" + server + "/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
global releaseBranch
tags = repo.branchtags().keys()
if 'release-branch.go10' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.go9 with
# release-branch.go10. It will be a while before we care.
raise hg_util.Abort('tags.sort needs to be fixed for release-branch.go10')
tags.sort()
for t in tags:
if t.startswith('release-branch.go'):
releaseBranch = t
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = [
'application/javascript',
'application/x-javascript',
'application/x-freemind'
]
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
# .reason is now a read-only property based on .msg
# this means we ignore 'msg', but that seems to work fine.
self.msg = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("https://%s/_ah/login?%s" % (self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.msg == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.msg == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.msg == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.msg == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.msg == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.msg == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.msg == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.msg == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "https://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Disable status prints so they don't obscure the password prompt.
global global_status
st = global_status
global_status = None
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
# Put status back.
global_status = st
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host, save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True, env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines, env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
set_status("uploading " + filename)
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [
("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
response_body = rpc_server.Send(url, body, content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
# Don't want to spawn too many threads, nor do we want to
# hit Rietveld too hard, or it will start serving 500 errors.
# When 8 works, it's no better than 4, and sometimes 8 is
# too many for Rietveld to handle.
MAX_PARALLEL_UPLOADS = 4
sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
upload_threads = []
finished_upload_threads = []
class UploadFileThread(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
def run(self):
UploadFile(*self.args)
finished_upload_threads.append(self)
sema.release()
def StartUploadFile(*args):
sema.acquire()
while len(finished_upload_threads) > 0:
t = finished_upload_threads.pop()
upload_threads.remove(t)
t.join()
t = UploadFileThread(args)
upload_threads.append(t)
t.start()
def WaitForUploads():
for t in upload_threads:
t.join()
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
StartUploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
StartUploadFile(filename, file_id, new_content, is_binary, status, False)
WaitForUploads()
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class FakeMercurialUI(object):
def __init__(self):
self.quiet = True
self.output = ''
def write(self, *args, **opts):
self.output += ' '.join(args)
def copy(self):
return self
def status(self, *args, **opts):
pass
def formatter(self, topic, opts):
from mercurial.formatter import plainformatter
return plainformatter(self, topic, opts)
def readconfig(self, *args, **opts):
pass
def expandpath(self, *args, **opts):
return global_ui.expandpath(*args, **opts)
def configitems(self, *args, **opts):
return global_ui.configitems(*args, **opts)
def config(self, *args, **opts):
return global_ui.config(*args, **opts)
use_hg_shell = False # set to True to shell out to hg always; slower
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, ui, repo):
super(MercurialVCS, self).__init__(options)
self.ui = ui
self.repo = repo
self.status = None
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo.root)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
if not err and mqparent != "":
self.base_rev = mqparent
else:
out = RunShell(["hg", "parents", "-q"], silent_ok=True).strip()
if not out:
# No revisions; use 0 to mean a repository with nothing.
out = "0:0"
self.base_rev = out.split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def get_hg_status(self, rev, path):
# We'd like to use 'hg status -C path', but that is buggy
# (see http://mercurial.selenic.com/bts/issue3023).
# Instead, run 'hg status -C' without a path
# and skim the output for the path we want.
if self.status is None:
if use_hg_shell:
out = RunShell(["hg", "status", "-C", "--rev", rev])
else:
fui = FakeMercurialUI()
ret = hg_commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
if ret:
raise hg_util.Abort(ret)
out = fui.output
self.status = out.splitlines()
for i in range(len(self.status)):
# line is
# A path
# M path
# etc
line = to_slash(self.status[i])
if line[2:] == path:
if i+1 < len(self.status) and self.status[i+1][:2] == ' ':
return self.status[i:i+2]
return self.status[i:i+1]
raise hg_util.Abort("no status for " + path)
def GetBaseFile(self, filename):
set_status("inspecting " + filename)
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
out = self.get_hg_status(self.base_rev, relpath)
status, what = out[0].split(' ', 1)
if len(out) > 1 and status == "A" and what == relpath:
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
if use_hg_shell:
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
else:
base_content = str(self.repo[base_rev][oldrelpath].data())
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content and use_hg_shell:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = to_slash(temp_filename.strip())
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
set_status("uploading patch for " + patch[0])
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
|
floren/go9p
|
lib/codereview/codereview.py
|
Python
|
bsd-3-clause
| 106,676
|
[
"VisIt"
] |
a2e7bcc847158cedd314da6e2eafa51cdfa9a58144bd553d1c82bf5a94663a74
|
#!/usr/local/bin/python3
#
# tinker.py
# Test subroutine of the CHRUBIX project
# ...for me to tinker with things :)
import sys
import os
from chrubix import generate_distro_record_from_name, load_distro_record
from chrubix.utils import fix_broken_hyperlinks, system_or_die, call_makepkg_or_die, remaining_megabytes_free_on_device, \
chroot_this, patch_org_freedesktop_networkmanager_conf_file, failed, migrate_to_obfuscated_filesystem
from chrubix.distros.debian import generate_mickeymouse_lxdm_patch
from chrubix.utils.postinst import remove_junk, \
GUEST_HOMEDIR
MYDISK_MTPT = '/.mydisk'
try:
import urwid
except ImportError:
os.system( 'easy_install urwid' )
import urwid
testval = urwid # stop silly warning in Eclipse
argv = sys.argv
res = 0
if argv[1] != 'tinker':
raise RuntimeError( 'first param must be tinker' )
good_list = []
bad_list = [] # ubuntu failed to build afio
if argv[2] == 'secretsquirrel':
if 0 == os.system( 'mount | fgrep "cryptroot on /"' ):
failed( 'No! You are already in Secret Squirrel Mode.' )
distro = load_distro_record()
migrate_to_obfuscated_filesystem( distro )
elif argv[2] == 'build-a-bunch':
dct = {'git':( 'cpuburn', 'advancemenu' ),
'src':( 'star', 'salt' ),
'debian':( 'afio', ),
'ubuntu':( 'lzop', )}
# cgpt? lxdm? chromium?
distro = generate_distro_record_from_name( 'debianwheezy' )
distro.mountpoint = MYDISK_MTPT if os.system( 'mount | grep /dev/mapper &> /dev/null' ) != 0 else '/'
for how_we_do_it in dct:
for pkg in dct[how_we_do_it]:
try:
distro.install_expatriate_software_into_a_debianish_OS(
package_name = pkg,
method = how_we_do_it )
good_list.append( pkg )
except ( IOError, SyntaxError, RuntimeError ):
bad_list.append( pkg )
print( "good:", good_list )
print( "bad :", bad_list )
elif argv[2] == 'logmein':
distro = load_distro_record( '/' if os.system( 'cat /proc/cmdline 2>/dev/null | fgrep root=/dev/dm-0 > /dev/null' ) != 0 else MYDISK_MTPT )
for cmd in (
'mkdir -p /tmp/.sda2',
'mount /dev/sda2 /tmp/.sda2',
'/usr/local/bin/redo_mbr.sh > /tmp/.sda2/log_me_in.sh'
):
system_or_die( cmd )
os.system( 'sync;sync;sync;sync' )
system_or_die( 'umount /tmp/.sda2' )
elif argv[2] == 'build-from-debian':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
distro.build_and_install_package_from_debian_source( pkg, 'jessie' )
elif argv[2] == 'build-from-jessie-for-stretch':
distro = generate_distro_record_from_name( 'debianstretch' )
distro.mountpoint = '/'
distro.build_and_install_package_from_debian_source( argv[3], 'jessie' )
# sys.exit( 0 )
print( "Building %s from Deb-ish => %s" % ( pkg, argv[3] ) )
distro.build_and_install_package_from_debian_source( pkg, 'wheezy' if argv[3] == 'debianwheezy' else 'jessie' )
elif argv[2] == 'build-from-ubuntu':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
# sys.exit( 0 )
print( "Building %s from Ubu-ish => Wheezy" % ( pkg ) )
distro.build_and_install_package_from_ubuntu_source( pkg )
elif argv[2] == 'build-from-src':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
distro.build_and_install_software_from_archlinux_source( pkg )
elif argv[2] == 'fix-hyperlinks':
fix_broken_hyperlinks( argv[3] )
elif argv[2] == 'build-from-git':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
sources_basedir = '/root/.rmo/PKGBUILDs/core'
mountpoint = MYDISK_MTPT
distro.build_and_install_software_from_archlinux_git( pkg )
elif argv[2] == 'fire-everything':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
pkg = argv[4]
distro.install_expatriate_software_into_a_debianish_OS( package_name = pkg, method = None )
elif argv[2] == 'remove-junk':
remove_junk( MYDISK_MTPT, '/root/.rmo/PKGBUILDs/core/linux-chromebook' )
elif argv[2] == 'modify-sources':
system_or_die( 'bash /usr/local/bin/modify_sources.sh /dev/mmcblk1 /.mydisk no yes' )
elif argv[2] == 'postinst':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = '/'
distro.install_tweaks_for_lxdm_chrome_iceweasel_and_distrospecific_stuff()
elif argv[2] == 'initramfs':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.redo_kernel( argv[4], distro.root_dev, distro.mountpoint )
elif argv[2] == 'redo-kernel':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.modify_build_and_install_mkfs_and_kernel_for_OS( apply_kali_patch = False )
elif argv[2] == 'tails':
distro = generate_distro_record_from_name( 'debiantails' )
distro.mountpoint = '/' if os.system( 'cat /proc/cmdline 2>/dev/null | fgrep root=/dev/dm-0 > /dev/null' ) != 0 else MYDISK_MTPT
distro.grab_all_tails_packages()
elif argv[2] == 'install-freenet':
distro = generate_distro_record_from_name( argv[3] )
assert( os.path.isdir( argv[4] ) is True )
distro.mountpoint = argv[4]
distro.install_freenet()
elif argv[2] == 'clone-guest':
outfile = '/tmp/default_guest_settings.tar.xz'
files_to_save = '\
.config/gtk-3.0/settings.ini \
.config/dconf/user \
.config/mate/backgrounds.xml \
.config/keepassx/config.ini \
.xscreensaver \
.themes \
.gtkrc-2.0 \
.config/chromium'
distro = generate_distro_record_from_name( argv[3] )
system_or_die( 'cd %s; tar -cJ %s > %s' % ( GUEST_HOMEDIR, files_to_save, outfile ) )
print( 'Saved %s/.* goodies to %s' % ( GUEST_HOMEDIR, outfile ) )
elif argv[2] == 'do-kernel':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT if os.system( 'mount | grep /dev/mapper &> /dev/null' ) != 0 else '/'
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.download_modify_build_and_install_kernel_and_mkfs()
elif argv[2] == 'kooky':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT if os.system( 'mount | grep /dev/mapper &> /dev/null' ) != 0 else '/'
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.build_kooky_filesystem_modules_for_chromeos( really = True )
elif argv[2] == 'modify-kernel-sources':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT if os.system( 'mount | grep /dev/mapper &> /dev/null' ) != 0 else '/'
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.call_bash_script_that_modifies_kernel_n_mkfs_sources()
assert( 0 == os.system( 'cat %s%s/config | grep UNION_FS' % ( distro.mountpoint, distro.kernel_src_basedir ) ) )
# distro.download_kernel_and_mkfs_sources()
# distro.modify_build_and_install_mkfs_and_kernel_for_OS()
elif argv[2] == 'sign-and-write':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
# if root_partition_device.find( '/dev/mapper' ) >= 0:
# param_A = 'cryptdevice=%s:%s' % ( self.spare_dev, os.path.basename( root_partition_device ) )
# else:
res = distro.sign_and_write_custom_kernel( distro.device, distro.root_dev, '' )
elif argv[2] == 'tarball-me':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
distro.generate_tarball_of_my_rootfs( '/tmp/out.tgz' )
os.system( 'rm -f /tmp/out.tgz' )
elif argv[2] == 'posterity':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
if 0 != distro.save_for_posterity_if_possible_D():
failed( 'Failed to create sample distro posterity file' )
elif argv[2] == 'udev':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
os.system( 'python3 /usr/local/bin/Chrubix/src/poweroff_if_disk_removed.py' )
elif argv[2] == 'tweak-lxdm-source':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
p = '%s/%s' % ( distro.sources_basedir, 'lxdm' )
generate_mickeymouse_lxdm_patch( distro.mountpoint, p, '%s/debian/patches/99_mickeymouse.patch' % ( p ) )
elif argv[2] == 'chromium':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
chroot_this( distro.mountpoint, 'yes "" 2>/dev/null | apt-get build-dep chromium' )
distro.build_and_install_package_from_deb_or_ubu_source( 'chromium-browser', 'https://packages.debian.org/' + argv[3] )
elif argv[2] == 'install-bitmask':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
distro.install_leap_bitmask()
# elif argv[2] == 'download-kernel-source':
# distro = generate_distro_record_from_name( argv[3] )
# distro.mountpoint = '/' if os.system( 'cat /proc/cmdline 2>/dev/null | fgrep root=/dev/dm-0 > /dev/null' ) != 0 else MYDISK_MTPT
# distro.device = '/dev/mmcblk1'
# distro.root_dev = '/dev/mmcblk1p3'
# distro.spare_dev = '/dev/mmcblk1p2'
# distro.kernel_rebuild_required = True # ...because the initramfs needs our boom pw, which means we'll have to rebuild initramfs.... which means rebuilding kernel!
# distro.root_is_encrypted = False
# distro.kthx = True # True
# distro.use_latest_kernel = False
# distro.download_kernel_source()
# elif argv[2] == 'build-kernel':
# distro = generate_distro_record_from_name( 'debianjessie' )
# distro.mountpoint = '/' if os.system( 'cat /proc/cmdline 2>/dev/null | fgrep root=/dev/dm-0 > /dev/null' ) != 0 else MYDISK_MTPT
# distro.device = '/dev/mmcblk1'
# distro.root_dev = '/dev/mmcblk1p3'
# distro.spare_dev = '/dev/mmcblk1p2'
# distro.kernel_rebuild_required = True # ...because the initramfs needs our boom pw, which means we'll have to rebuild initramfs.... which means rebuilding kernel!
# distro.root_is_encrypted = False
# distro.kthx = True # True
# distro.use_latest_kernel = True
# distro.build_kernel()
elif argv[2] == 'patch-nm':
distro = generate_distro_record_from_name( 'debianwheezy' )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
patch_org_freedesktop_networkmanager_conf_file( '%s/etc/dbus-1/system.d/org.freedesktop.NetworkManager.conf' % ( distro.mountpoint ),
'%s/usr/local/bin/Chrubix/blobs/settings/nmgr-cfg-diff.txt.gz' % ( distro.mountpoint ) )
elif argv[2] == 'makepkg':
print( 'Assuming archlinux' )
distro = generate_distro_record_from_name( 'archlinux' )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
pkg = argv[3]
# sys.exit( 0 )
print( "Building %s" % ( pkg ) )
if pkg == 'linux-chromebook':
call_makepkg_or_die( mountpoint = '/', \
package_path = '%s/%s' % ( distro.sources_basedir, pkg ), \
cmd = 'cd %s && makepkg --skipchecksums --nobuild -f' % ( distro.mountpoint + distro.kernel_src_basedir ),
errtxt = 'Failed to handle %s' % ( pkg ) )
else:
call_makepkg_or_die( mountpoint = '/', \
package_path = '%s/%s' % ( distro.sources_basedir, pkg ), \
cmd = 'cd %s/%s && makepkg --skipchecksums --nobuild -f' % ( distro.sources_basedir, pkg ),
errtxt = 'Failed to download %s' % ( pkg ) )
elif argv[2] == 'alarpy-build':
distro = generate_distro_record_from_name( 'debianwheezy' )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
distro.build_and_install_package_into_alarpy_from_source( argv[3], quiet = True )
elif argv[2] == 'install-i2p':
distro = generate_distro_record_from_name( argv[3] )
assert( os.path.isdir( argv[4] ) is True )
distro.mountpoint = argv[4]
# distro.mountpoint = MYDISK_MTPT
# distro.device = '/dev/mmcblk1'
# distro.root_dev = '/dev/mmcblk1p3'
# distro.spare_dev = '/dev/mmcblk1p2'
distro.install_i2p()
elif argv[2] == 'win-xp-theme':
distro = generate_distro_record_from_name( argv[3] )
distro.mountpoint = MYDISK_MTPT
distro.device = '/dev/mmcblk1'
distro.root_dev = '/dev/mmcblk1p3'
distro.spare_dev = '/dev/mmcblk1p2'
distro.install_win_xp_theme()
elif argv[2] == 'free':
r = remaining_megabytes_free_on_device( argv[3] )
failed( 'free space on %s is %d MB' % ( argv[3], r ) )
else:
raise RuntimeError ( 'I do not understand %s' % ( argv[2] ) )
os.system( 'sleep 4' )
print( "Exiting w/ retval=%d" % ( res ) )
sys.exit( res )
|
ReubenAbrams/Chrubix
|
src/tinker.py
|
Python
|
gpl-3.0
| 14,182
|
[
"TINKER"
] |
35e73051e27d78b8cb4b42ef1f1b30e089c9371ffe7ad9228379fde8c5b52168
|
from llvmlite.ir import CallInstr
class Visitor(object):
def visit(self, module):
self._module = module
for func in module.functions:
self.visit_Function(func)
def visit_Function(self, func):
self._function = func
for bb in func.blocks:
self.visit_BasicBlock(bb)
def visit_BasicBlock(self, bb):
self._basic_block = bb
for instr in bb.instructions:
self.visit_Instruction(instr)
def visit_Instruction(self, instr):
raise NotImplementedError
@property
def module(self):
return self._module
@property
def function(self):
return self._function
@property
def basic_block(self):
return self._basic_block
class CallVisitor(Visitor):
def visit_Instruction(self, instr):
if isinstance(instr, CallInstr):
self.visit_Call(instr)
def visit_Call(self, instr):
raise NotImplementedError
class ReplaceCalls(CallVisitor):
def __init__(self, orig, repl):
super(ReplaceCalls, self).__init__()
self.orig = orig
self.repl = repl
self.calls = []
def visit_Call(self, instr):
if instr.callee == self.orig:
instr.replace_callee(self.repl)
self.calls.append(instr)
def replace_all_calls(mod, orig, repl):
"""Replace all calls to `orig` to `repl` in module `mod`.
Returns the references to the returned calls
"""
rc = ReplaceCalls(orig, repl)
rc.visit(mod)
return rc.calls
|
numba/llvmlite
|
llvmlite/ir/transforms.py
|
Python
|
bsd-2-clause
| 1,552
|
[
"VisIt"
] |
a55efda41db49b837f1cb981124b30e5554ff1cc727fb01818309b4b513b7ce4
|
# TODO: handle encoding of netCDF4 specific options
from .. import Variable
from ..conventions import cf_encoder
from ..core import indexing
from ..core.utils import FrozenOrderedDict, close_on_error
from ..core.pycompat import iteritems, basestring, unicode_type, OrderedDict
from .common import AbstractWritableDataStore
from .netCDF4_ import _nc4_group, _nc4_values_and_dtype
class H5NetCDFStore(AbstractWritableDataStore):
"""Store for reading and writing data via h5netcdf
"""
def __init__(self, filename, mode='r', group=None):
import h5netcdf.legacyapi
ds = h5netcdf.legacyapi.Dataset(filename, mode=mode)
with close_on_error(ds):
self.ds = _nc4_group(ds, group, mode)
self.format = format
self._filename = filename
def store(self, variables, attributes):
# All NetCDF files get CF encoded by default, without this attempting
# to write times, for example, would fail.
cf_variables, cf_attrs = cf_encoder(variables, attributes)
AbstractWritableDataStore.store(self, cf_variables, cf_attrs)
def open_store_variable(self, var):
dimensions = var.dimensions
data = indexing.LazilyIndexedArray(var)
attributes = OrderedDict((k, var.getncattr(k))
for k in var.ncattrs())
# netCDF4 specific encoding
encoding = dict(var.filters())
chunking = var.chunking()
encoding['chunksizes'] = chunking if chunking != 'contiguous' else None
# save source so __repr__ can detect if it's local or not
encoding['source'] = self._filename
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(v))
for k, v in iteritems(self.ds.variables))
def get_attrs(self):
return FrozenOrderedDict((k, self.ds.getncattr(k))
for k in self.ds.ncattrs())
def get_dimensions(self):
return self.ds.dimensions
def set_dimension(self, name, length):
self.ds.createDimension(name, size=length)
def set_attribute(self, key, value):
self.ds.setncattr(key, value)
def prepare_variable(self, name, variable):
import h5py
attrs = variable.attrs.copy()
variable, dtype = _nc4_values_and_dtype(variable)
if dtype is str:
dtype = h5py.special_dtype(vlen=unicode_type)
self.set_necessary_dimensions(variable)
fill_value = attrs.pop('_FillValue', None)
if fill_value in ['\x00']:
fill_value = None
encoding = variable.encoding
kwargs = {}
for key in ['zlib', 'complevel', 'shuffle',
'chunksizes', 'fletcher32']:
if key in encoding:
kwargs[key] = encoding[key]
nc4_var = self.ds.createVariable(name, dtype, variable.dims,
fill_value=fill_value, **kwargs)
for k, v in iteritems(attrs):
nc4_var.setncattr(k, v)
return nc4_var, variable.data
def sync(self):
self.ds.sync()
def close(self):
ds = self.ds
# netCDF4 only allows closing the root group
while ds.parent is not None:
ds = ds.parent
ds.close()
|
clarkfitzg/xray
|
xray/backends/h5netcdf_.py
|
Python
|
apache-2.0
| 3,392
|
[
"NetCDF"
] |
29bc6c9dbfe93e6fc69e404740f8a2c8f6ba5c45449672386a77ab4f86ac7ab7
|
"""Tests for accounts.models."""
# pylint: disable=invalid-name, too-many-lines
from datetime import timedelta
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.timezone import now
from mock import patch
from model_mommy import mommy
from open_connect.accounts import models
from open_connect.accounts.models import PermissionDeniedError
from open_connect.groups.models import Group, GroupRequest
from open_connect.notifications.models import Subscription
from open_connect.connectmessages.models import Thread
from open_connect.connectmessages.tests import ConnectMessageTestCase
from open_connect.connect_core.utils.basetests import (
ConnectTestCase, ConnectTestMixin
)
User = get_user_model()
class PatcherMixin(object):
"""Mixin for adding create_patch to a test."""
# pylint: disable=too-few-public-methods
def create_patch(self, name):
"""Create a patch matching name."""
patcher = patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
class UserManagerTest(TestCase):
"""Tests for UserManager."""
def test_create_user(self):
"""Test that creating a user is successful."""
user = models.User.objects.create_user(
username='go+1@dj.local', password='bM1!@')
self.assertEqual(user.email, 'go+1@dj.local')
self.assertTrue(user.check_password('bM1!@'))
self.assertEqual(user.first_name, '')
self.assertEqual(user.last_name, '')
self.assertFalse(user.is_superuser)
self.assertFalse(user.is_staff)
self.assertTrue(user.is_active)
def test_create_user_first_last_email(self):
"""Test creating a user with a first and last name"""
user = models.User.objects.create_user(
username='modeltest1@org.local',
password='abcd123',
email='MODELTEST123@OrG.LoCaL',
first_name='John',
last_name='Smith'
)
# Test that first and last name came across
self.assertEqual(user.first_name, 'John')
self.assertEqual(user.last_name, 'Smith')
# Test that the normalized email and username came across
self.assertEqual(user.email, 'modeltest123@org.local')
self.assertEqual(user.username, 'modeltest1@org.local')
# Ensure correct permissions
self.assertFalse(user.is_superuser)
self.assertFalse(user.is_staff)
self.assertTrue(user.is_active)
def test_create_user_removes_unsubscribes(self):
"""Test that creating a user wipes any unsubscribes having his email"""
from open_connect.mailer.models import Unsubscribe
mommy.make(Unsubscribe, address='go+unsubtest@dj.local')
self.assertTrue(Unsubscribe.objects.filter(
address='go+unsubtest@dj.local').exists())
models.User.objects.create_user(
username='go+unsubtest@dj.local', password='bM1!@')
self.assertFalse(Unsubscribe.objects.filter(
address='go+unsubtest@dj.local').exists())
def test_create_user_no_password(self):
"""Test creating a user without a password."""
user = models.User.objects.create_user(username='go+1@dj.local')
self.assertEqual(user.email, 'go+1@dj.local')
self.assertFalse(user.has_usable_password())
self.assertFalse(user.is_superuser)
self.assertFalse(user.is_staff)
self.assertTrue(user.is_active)
def test_create_superuser(self):
"""Test creating a superuser."""
user = models.User.objects.create_superuser(
username='b@g.local', password='bM1@')
self.assertEqual(user.email, 'b@g.local')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_active)
class UserManagerInviteTest(TestCase):
"""Tests for the user manager"""
def setUp(self):
"""Setup the UserManagerInviteTest TestCase"""
self.user = models.User.objects.create(username='b@g.local')
def test_create_with_invite_no_staff_no_superuser(self):
"""create should consume invite if there is one."""
invite = models.Invite.objects.create(
email='alkjfdslakjdsf@dj.local', created_by=self.user)
user = models.User.objects.create_user(
username='alkjfdslakjdsf@dj.local')
invite = models.Invite.objects.get(pk=invite.pk)
self.assertIsNotNone(invite.consumed_at)
self.assertEqual(invite.consumed_by, user)
def test_create_with_invite_is_staff(self):
"""create should set user to staff if invite is set to is_staff."""
models.Invite.objects.create(
email='jkldfsjkldsjk@dj.local', created_by=self.user, is_staff=True)
user = models.User.objects.create_user(
username='jkldfsjkldsjk@dj.local')
self.assertTrue(user.is_staff)
def test_create_with_invite_is_superuser(self):
"""create should set user to superuser if invite.is_superuser."""
models.Invite.objects.create(
email='afdavawev@dj.local', created_by=self.user, is_superuser=True)
user = models.User.objects.create_user(username='afdavawev@dj.local')
self.assertTrue(user.is_superuser)
def test_create_with_invite_groups_are_added(self):
"""create should add user to any groups indicated in invite."""
invite = mommy.make('accounts.Invite', email='testuser@dj.local')
group1 = mommy.make('groups.Group')
group2 = mommy.make('groups.group')
invite.groups.add(group1)
invite.groups.add(group2)
user = models.User.objects.create_user(username=invite.email)
all_groups = user.groups_joined
self.assertIn(group1, all_groups)
self.assertIn(group2, all_groups)
class UserTest(ConnectTestMixin, TestCase):
"""Test User model methods."""
def setUp(self):
"""Setup the test"""
self.user = self.create_user(
email='usertest@org.local',
password='lalala',
first_name='Jack',
last_name='Grant'
)
def test_get_absolute_url(self):
"""User's absolute url should be their detail page."""
user = self.user
user.email = 'kdjfskls@fjdklsfjsdl.com'
user.save()
self.assertEqual(
self.user.get_absolute_url(),
reverse('user_details', args=[user.uuid])
)
user.delete()
def test_unicode(self):
"""User's unicode conversion should be first name and last initial."""
self.assertEqual(unicode(self.user), 'Jack G.')
def test_save_lowercase(self):
"""Test that uppercase emails are converted to lowercase on save"""
user = self.user
user.email = 'JackGrant@razzmatazz.local'
user.save()
self.assertEqual(
models.User.objects.get(pk=user.pk).email,
'jackgrant@razzmatazz.local'
)
def test_get_full_name_with_first_and_last(self):
"""User.get_full_name() should return first name and last initial."""
self.assertEqual(self.user.get_full_name(), 'Jack G.')
def test_get_full_name_system_user(self):
"""get_full_name should return the system user name for the sys user"""
with self.settings(SYSTEM_USER_EMAIL=self.user.email):
self.assertEqual(
self.user.get_full_name(),
settings.SYSTEM_USER_NAME
)
def test_get_full_name_missing_first_name(self):
"""get_full_name should be first part of email if first name missing."""
self.user.first_name = ''
self.assertEqual(self.user.get_full_name(), 'usertest')
def test_get_full_name_missing_last_name(self):
"""get_full_name should be first name if last name missing."""
self.user.last_name = ''
self.assertEqual(self.user.get_full_name(), 'Jack')
def test_get_full_name_missing_first_and_last_names(self):
"""get_full_name should be first part of email if name is missing."""
self.user.first_name = ''
self.user.last_name = ''
self.assertEqual(self.user.get_full_name(), 'usertest')
def test_get_short_name_with_first_name(self):
"""get_short_name should be the user's first name."""
self.assertEqual(self.user.get_short_name(), 'Jack')
def test_get_short_name_missing_first_name(self):
"""get_short_name is first part of email if first name is missing."""
self.user.first_name = ''
self.assertEqual(self.user.get_short_name(), 'usertest')
@override_settings(ORIGIN='https://theorigin.bo.com')
def test_full_url(self):
"""Test returning user's profile URL using the origin"""
user = self.create_user(username='awefawef@fjdklsfjsdl.com')
correct_url = 'https://theorigin.bo.com{path}'.format(
path=reverse('user_details', args=[user.uuid]))
self.assertEqual(
user.full_url,
correct_url
)
user.delete()
def test_private_hash(self):
"""Test the private code generator attached to the User model"""
user = mommy.make(User, email='awesome@example.com')
first_code = user.private_hash
user.email = 'awesome2@example.com'
second_code = user.private_hash
self.assertIsNotNone(first_code)
self.assertNotEqual(first_code, second_code)
def test_system_user(self):
"""system_user should return true for the system user"""
self.assertFalse(self.user.system_user)
with self.settings(SYSTEM_USER_EMAIL=self.user.email):
self.assertTrue(self.user.system_user)
def test_unsubscribe_url(self):
"""
Test the URL the user will visit if they need to unsubscribe from
notices without logging in.
"""
url = self.user.unsubscribe_url
self.assertIn('http', url)
self.assertIn(settings.ORIGIN, url)
self.assertIn(self.user.private_hash, url)
self.assertIn(str(self.user.email), url)
def test_change_notification_url(self):
"""Test that the notification change URL is correct"""
self.user.save()
url = self.user.change_notification_url
self.assertIn('http', url)
self.assertIn(settings.ORIGIN, url)
self.assertIn(self.user.private_hash, url)
self.assertIn(str(self.user.pk), url)
def test_group_categories(self):
"""Should return a set with categories for groups a user belongs to."""
gvp_group = mommy.make('groups.Group', category__name='Apples')
lgbt_group = mommy.make('groups.Group', category__name='Oranges')
user = mommy.make(User)
user.save()
user.add_to_group(gvp_group.pk)
user.add_to_group(lgbt_group.pk)
categories = user.group_categories
self.assertIn('Apples', categories)
self.assertIn('Oranges', categories)
def test_get_real_name(self):
"""Should return full name with full last name."""
self.assertEqual(
self.user.get_real_name(),
u'{first_name} {last_name}'.format(
first_name=self.user.first_name,
last_name=self.user.last_name
)
)
def test_get_real_name_system_user(self):
"""Should return system user name."""
self.user.email = settings.SYSTEM_USER_EMAIL
self.assertEqual(self.user.get_real_name(), settings.SYSTEM_USER_NAME)
def test_get_real_name_no_last_name(self):
"""Should return the short name."""
self.user.last_name = ''
self.assertEqual(self.user.get_full_name(), self.user.get_short_name())
class UserViewListTest(ConnectTestMixin, TestCase):
"""Tests for User.can_view_user_list"""
def test_staff_can_see_list(self):
"""Test that staff members can see the user list"""
user = self.create_user(is_staff=True)
self.assertTrue(user.can_view_user_list())
def test_moderator_can_see_list(self):
"""Test that group moderators can view the list"""
user = self.create_user()
group = self.create_group()
group.owners.add(user)
self.assertTrue(user.is_moderator())
self.assertTrue(user.can_view_user_list())
def test_invite_users_see_list(self):
"""Test that users that can invite others to groups can see list"""
user = self.create_user()
self.add_perm(user, 'add_invite', 'accounts', 'invite')
self.assertTrue(user.can_view_user_list())
def test_edit_group_see_list(self):
"""Test that users that can edit groups can see the user list"""
user = self.create_user()
self.add_perm(user, 'can_edit_any_group', 'groups', 'group')
self.assertTrue(user.can_view_user_list())
def test_add_group_see_list(self):
"""Test that users that can add groups can see the user list"""
user = self.create_user()
self.add_perm(user, 'add_group', 'groups', 'group')
self.assertTrue(user.can_view_user_list())
def test_regular_user(self):
"""Test that regular users cannot see the user list"""
user = self.create_user()
self.assertFalse(user.can_view_user_list())
class UserDirectMessagePermissionTest(ConnectTestMixin, TestCase):
"""Tests for User.can_direct_message_user and User.all_user_messageable"""
def setUp(self):
"""Setup the test"""
self.recipient = self.create_user()
def test_staff_messageable(self):
"""Test that direct messages to staff are allowed"""
user = self.create_user(is_staff=True)
self.assertTrue(user.all_user_messageable)
# When a user's staff status is false, disable open messaging
user.is_staff = False
self.assertFalse(user.all_user_messageable)
def test_moderator_messageable(self):
"""Test that direct messages to moderators are allowed"""
user = self.create_user()
group = self.create_group()
group.owners.add(user)
self.assertTrue(user.can_moderate)
self.assertTrue(user.all_user_messageable)
def test_regular_user_not_auto_messageable(self):
"""Test that regular users are not labeled always direct-messageable"""
user = self.create_user()
self.assertFalse(user.is_staff)
self.assertFalse(user.can_moderate)
self.assertFalse(user.all_user_messageable)
def test_direct_message_test_superuser(self):
"""Test that superusers can always direct message"""
user = self.create_superuser()
self.assertTrue(user.can_direct_message_user(self.recipient))
def test_direct_message_self(self):
"""Test that users cannot direct message themselves"""
user = self.create_superuser()
self.assertFalse(user.can_direct_message_user(user))
def test_staff_initiate_direct_message(self):
"""Test that staff can initiate direct messages"""
user = self.create_user(is_staff=True)
self.assertTrue(user.can_direct_message_user(self.recipient))
def test_messageable_users_can_receive(self):
"""Test users who can always receive direct messages can be messaged"""
moderator = self.create_user()
group = self.create_group()
group.owners.add(moderator)
user = self.create_user()
self.assertTrue(moderator.all_user_messageable)
self.assertTrue(user.can_direct_message_user(moderator))
def test_permissioned_initiate_direct_messages(self):
"""Test that users with permissions can inititate direct messages"""
permission = Permission.objects.get(
codename='can_initiate_direct_messages')
user = self.create_user()
user.user_permissions.add(permission)
self.assertTrue(user.can_direct_message_user(self.recipient))
def test_regular_user_cannot_inititate_direct_message(self):
"""Test that a regular user cannot initiate a direct message"""
user = self.create_user()
self.assertFalse(user.can_direct_message_user(self.recipient))
class UserCanViewProfileTest(ConnectMessageTestCase):
"""Tests for User.can_view_profile."""
def setUp(self):
self.banned_user = mommy.make('accounts.User', is_banned=True)
def test_can_view_profile_self(self):
"""User can view their own profile."""
self.assertTrue(self.banned_user.can_view_profile(self.banned_user))
def test_can_view_profile_not_banned(self):
"""Anyone can view an unbanned profile."""
self.assertTrue(self.staff_user.can_view_profile(self.normal_user))
def test_can_view_profile_has_permission(self):
"""Users with the can_view_banned permission can view any profile."""
user = mommy.make('accounts.User')
permission = Permission.objects.get(codename='can_view_banned')
user.user_permissions.add(permission)
self.assertTrue(user.can_view_profile(self.banned_user))
def test_can_view_profile_superuser(self):
"""Super users can do whatever they want."""
self.assertTrue(self.superuser.can_view_profile(self.banned_user))
def test_can_view_profile_user_is_banned_other(self):
"""Any other user should not be able to see a banned user's profile."""
self.assertFalse(self.normal_user.can_view_profile(self.banned_user))
class UserDirectMessagesSentSinceTest(ConnectTestMixin, TestCase):
"""Tests for User.direct_messages_sent_since."""
def test_message_count(self):
"""Should return the correct message count."""
sender = self.create_user()
self.create_thread(direct=True, sender=sender)
self.create_thread(direct=True, sender=sender)
self.create_thread(direct=True, sender=sender)
yesterday = now() - timedelta(hours=24)
self.assertEqual(sender.direct_messages_sent_since(yesterday), 3)
class UserGroupModerationRequestsTest(ConnectTestCase):
"""Tests for UserGroupModerationRequest"""
# pylint: disable=too-many-instance-attributes
def setUp(self):
self.thread1 = mommy.make('connectmessages.Thread')
self.user1 = models.User.objects.create_user(
username='mo@dj.local',
password='lalala',
first_name='Grace',
last_name='Grant'
)
self.user2 = models.User.objects.create_user(
username='maeby@dj.local',
password='lalala',
first_name='Maeby',
last_name='Funke'
)
self.group_owner = self.user3
self.group1 = Group.objects.create(name='Test Group')
self.group1.owners.add(self.group_owner)
self.unapproved_request = GroupRequest.objects.create(
user=self.user1, group=self.group1)
self.approved_request = GroupRequest.objects.create(
user=self.user2,
group=self.group1,
moderated_by=self.group_owner,
moderated_at=now(),
approved=True
)
self.group2 = Group.objects.create(name='Other Test Group')
self.non_owner_request = GroupRequest.objects.create(
user=self.user1, group=self.group2)
def test_group_join_requests_to_moderate(self):
"""Unapproved requests should be in group_join_requests_to_moderate()"""
self.assertIn(
self.unapproved_request,
self.group_owner.group_join_requests_to_moderate()
)
def test_group_join_requests_to_moderate_no_approved_requests_present(self):
"""Approved requests are not in group_join_requests_to_moderate()"""
self.assertNotIn(
self.approved_request,
self.group_owner.group_join_requests_to_moderate()
)
def test_group_join_requests_to_moderate_not_an_owner(self):
"""Requests for groups you don't own aren't returned."""
self.assertNotIn(
self.non_owner_request,
self.group_owner.group_join_requests_to_moderate()
)
def test_has_group_join_requests_to_moderate(self):
"""If you have requests to moderate, it should return True."""
self.assertTrue(self.group_owner.has_group_join_requests_to_moderate)
def test_has_group_join_requests_to_moderate_nothing_to_moderate(self):
"""If you don't have requests to moderate, it should return False."""
self.unapproved_request.delete()
self.assertFalse(self.group_owner.has_group_join_requests_to_moderate())
def test_get_moderation_tasks_has_group_moderation_tasks(self):
"""groups_to_mod in response dict should be true."""
response = self.group_owner.get_moderation_tasks()
self.assertTrue(response['groups_to_mod'])
def test_get_moderation_tasks_has_no_group_moderation_tasks(self):
"""groups_to_mod in response dict should be false."""
self.unapproved_request.delete()
cache.clear()
response = self.group_owner.get_moderation_tasks()
self.assertFalse(response['groups_to_mod'])
class UserGroupManagementTest(ConnectTestMixin, TestCase):
"""Message moderation test"""
def test_approved_group_message_not_in_messages_to_moderate(self):
"""Approved messages shouldn't be returned."""
thread = self.create_thread()
user = self.create_user()
user.add_to_group(thread.group)
self.assertNotIn(thread.first_message, user.messages_to_moderate)
def test_approved_dm_not_in_messages_to_moderate(self):
"""Approved dms should not be in messages_to_moderate."""
thread = self.create_thread(direct=True)
thread.first_message.status = 'approved'
thread.first_message.save()
user = self.create_user()
self.assertNotIn(thread.first_message, user.messages_to_moderate)
def test_global_moderator(self):
"""Test that users who have the relevant permissions are global mods"""
user = self.create_user()
self.assertFalse(user.global_moderator)
self.add_perm(user, 'can_moderate_all_messages', 'accounts', 'user')
latest_user = User.objects.get(pk=user.pk)
self.assertTrue(latest_user.global_moderator)
def test_has_perm_group_message_in_messages_to_moderate(self):
"""Even if user is not group owner, should see messages to moderate."""
thread = self.create_thread()
thread.first_message.status = 'pending'
thread.first_message.save()
user = self.create_user()
self.add_perm(user, 'can_moderate_all_messages', 'accounts', 'user')
self.assertIn(thread.first_message, user.messages_to_moderate)
def test_has_perm_dm_in_messages_to_moderate(self):
"""Even if user is not on thread, should see in messages to moderate."""
dm = self.create_thread(direct=True)
dm.first_message.status = 'pending'
dm.first_message.save()
user = self.create_user()
self.add_perm(user, 'can_moderate_all_messages', 'accounts', 'user')
self.assertIn(dm.first_message, user.messages_to_moderate)
def test_non_group_owner_messages_to_moderate(self):
"""Pending messages should be returned only if user owns the group."""
thread = self.create_thread()
thread.first_message.status = 'pending'
thread.first_message.save()
user = self.create_user()
self.assertNotIn(thread.first_message, user.messages_to_moderate)
def test_group_owner_messages_to_moderate(self):
"""Flagged messages should be in moderation queue."""
thread = self.create_thread()
thread.first_message.status = 'pending'
thread.first_message.save()
user = self.create_user()
thread.group.owners.add(user)
self.assertIn(thread.first_message, user.messages_to_moderate)
def test_banned_sender_messages_to_moderate(self):
"""Messages sent by banned user should not appear."""
sender = self.create_user()
thread = self.create_thread(sender=sender)
thread.first_message.status = 'pending'
thread.first_message.save()
user = self.create_user()
thread.group.owners.add(user)
self.assertIn(thread.first_message, user.messages_to_moderate)
sender.is_banned = True
sender.save()
self.assertNotIn(thread.first_message, user.messages_to_moderate)
def test_has_messages_to_moderate(self):
"""Should return True if there are messages to moderate."""
Thread.objects.all().delete()
thread = self.create_thread()
thread.first_message.status = 'pending'
thread.first_message.save()
user = self.create_superuser()
self.assertTrue(user.has_messages_to_moderate())
def test_has_messages_to_moderate_nothing_to_moderate(self):
"""Should return False if there aren't messages to moderate."""
Thread.objects.all().delete()
self.create_thread()
user = self.create_superuser()
self.assertFalse(user.has_messages_to_moderate())
def test_get_moderation_tasks_has_messages_to_moderate(self):
"""messages_to_mod should be True."""
Thread.objects.all().delete()
thread = self.create_thread()
thread.first_message.status = 'pending'
thread.first_message.save()
user = self.create_superuser()
cache.clear()
response = user.get_moderation_tasks()
self.assertTrue(response['messages_to_mod'])
def test_get_moderation_tasks_no_messages_to_moderate(self):
"""messages_to_mod should be False."""
Thread.objects.all().delete()
self.create_thread()
user = self.create_superuser()
cache.clear()
response = user.get_moderation_tasks()
self.assertFalse(response['messages_to_mod'])
def test_groups_moderating(self):
"""groups_moderating returns groups you can moderate"""
group = mommy.make('groups.Group')
user = self.create_user()
group.owners.add(user)
self.assertIn(group, user.groups_moderating)
def test_user_with_perm_can_moderate(self):
"""Superusers can always moderate groups."""
user = self.create_user()
self.add_perm(user, 'can_moderate_all_messages', 'accounts', 'user')
self.assertTrue(user.can_moderate)
def test_can_moderate_non_group_owner(self):
"""Non group owners cannot moderate."""
user = self.create_user()
self.assertFalse(user.can_moderate)
def test_can_moderate_group_owner(self):
"""Group owners can moderate."""
user = self.create_user()
group = mommy.make('groups.Group')
group.owners.add(user)
self.assertTrue(user.can_moderate)
def test_can_flag_messages(self):
"""Test that can_flag_messages is True for normal users."""
user = self.create_user()
self.assertTrue(user.can_flag_messages)
def test_can_flag_messages_user_is_banned(self):
"""Banned users cannot flag messages. Should fail silently."""
user = self.create_user(is_banned=True)
self.assertFalse(user.can_flag_messages)
def test_groups_joined(self):
"""groups_joined returns groups a user is a member of"""
group = mommy.make('groups.Group')
user = self.create_user()
user.add_to_group(group.pk)
self.assertIn(group, user.groups_joined)
def test_groups_joined_user_is_not_member(self):
"""groups_joined does not return groups a user is not a member of."""
group = mommy.make('groups.Group')
user = self.create_user()
self.assertNotIn(group, user.groups_joined)
def test_cached_groups_joined(self):
"""Should return the same as groups_joined."""
user = self.create_user()
groups = mommy.make('groups.Group', _quantity=2)
for group in groups:
user.add_to_group(group.pk)
self.assertQuerysetItemsEqual(
user.groups_joined,
user.cached_groups_joined
)
def test_cached_groups_joined_multiple_calls(self):
"""Should only call groups_joined once."""
user = self.create_user()
with patch('open_connect.accounts.models.cache') as mock_cache:
groups = mommy.make('groups.Group', _quantity=2)
for group in groups:
user.add_to_group(group.pk)
# pylint: disable=pointless-statement
user.cached_groups_joined
user.cached_groups_joined
self.assertEqual(mock_cache.get.call_count, 1)
def test_messagable_groups(self):
"""messagable_groups returns groups you can send a message to."""
group = mommy.make('groups.Group')
user = self.create_user()
user.add_to_group(group.pk)
self.assertIn(group, user.messagable_groups)
def test_messagable_groups_does_not_contain_non_messageable_groups(self):
"""messagable_groups doesn't return groups you can't message."""
group = mommy.make('groups.Group')
user = self.create_user()
self.assertNotIn(group, user.messagable_groups)
def test_superuser_can_send_to_non_member_group(self):
"""Super users can send to any group, even if not a member."""
group = mommy.make(Group)
user = self.create_superuser()
self.assertTrue(user.can_send_to_group(group))
def test_superuser_can_send_to_moderated_group(self):
"""Super users can send to moderated groups."""
moderated_group = mommy.make(Group, moderated=True)
user = self.create_superuser()
self.assertTrue(user.can_send_to_group(moderated_group))
def test_group_owner_can_send_to_moderated_groups(self):
"""Moderators can send to their own groups."""
moderated_group = mommy.make(Group, moderated=True)
group_owner = self.create_user()
moderated_group.owners.add(group_owner)
self.assertTrue(group_owner.can_send_to_group(moderated_group))
def test_group_member_can_send_to_non_moderated_group(self):
"""A group member can send to a group if it is not moderated."""
group = mommy.make('groups.group')
user = self.create_user()
user.add_to_group(group.pk)
self.assertTrue(user.can_send_to_group(group))
def test_group_member_can_not_send_to_moderated_group(self):
"""A group member can not send to a moderated group.
(When a group member sends to a moderated group, it goes to moderation.)
"""
group = mommy.make('groups.Group', moderated=True)
user = self.create_user()
user.add_to_group(group.pk)
self.assertFalse(user.can_send_to_group(group))
def test_non_group_member_cannot_send_to_group(self):
"""A user cannot send to a group they aren't a member of."""
group = mommy.make('groups.Group')
user = self.create_user()
self.assertRaises(
PermissionDeniedError,
user.can_send_to_group,
group
)
def test_can_send_to_group_if_group_is_falsey(self):
"""If the group is falsey, we're checking a DM."""
user = self.create_user()
self.assertTrue(user.can_send_to_group(None))
def test_whitelisted_user_can_send_to_group_if_group_is_moderated(self):
"""Whitelisted users can send to moderated groups."""
group = mommy.make('groups.Group', moderated=True)
user = self.create_user()
user.add_to_group(group.pk)
group.whitelist_users.add(user)
self.assertTrue(user.can_send_to_group(group))
def test_whitelisted_user_can_send_to_group_if_not_member(self):
"""Whitelist users can send to a group even if they're not a member."""
group = mommy.make('groups.Group')
user = self.create_user()
group.whitelist_users.add(user)
self.assertTrue(user.can_send_to_group(group))
# pylint: disable=line-too-long
@patch('open_connect.accounts.models.group_tasks.notify_owners_of_group_request')
def test_request_to_join_group(self, mock_group_tasks):
"""Should return the GroupRequest and notify group owners."""
group = mommy.make('groups.Group', moderated=True)
user = self.create_user()
request = user.request_to_join_group(group)
self.assertIsInstance(request, GroupRequest)
mock_group_tasks.delay.assert_called_once_with(request.pk)
# pylint: disable=line-too-long
@patch('open_connect.accounts.models.group_tasks.notify_owners_of_group_request')
def test_request_to_join_group_already_requested(self, mock_group_tasks):
"""Should return the GroupRequest and not notify group owners again"""
group = mommy.make('groups.Group', moderated=True)
user = self.create_user()
group_request = GroupRequest.objects.create(user=user, group=group)
result = user.request_to_join_group(group)
self.assertEqual(group_request, result)
self.assertEqual(mock_group_tasks.delay.call_count, 0)
def test_add_to_group(self):
"""Test adding a user to a group."""
new_group = mommy.make(Group)
user = self.create_user()
user.add_to_group(new_group.pk)
self.assertIn(user, new_group.get_members())
subscription = Subscription.objects.get(user=user, group=new_group)
# Assert that the subscription created has the default period
self.assertEqual(subscription.period, 'immediate')
def test_add_to_group_with_extra_kwargs(self):
"""Pass extra kwargs through to Subscription."""
group = mommy.make(Group)
user = self.create_user()
user.add_to_group(group.pk, period='none')
self.assertIn(user, group.get_members())
second_subscription = Subscription.objects.get(user=user, group=group)
# Assert that the extra kwarg was passed along to Subscription
self.assertEqual(second_subscription.period, 'none')
@patch('open_connect.accounts.models.group_tasks.add_user_to_group')
def test_add_to_group_delay(self, mock):
"""Test adding a user to a group with a delay."""
new_group = mommy.make(Group)
user = self.create_user()
user.add_to_group(new_group.pk)
mock.delay.assert_called_once_with(
user_id=user.pk,
group_id=new_group.pk,
notification=None,
period=None
)
@patch('open_connect.accounts.models.group_tasks.add_user_to_group')
def test_add_to_group_immediate(self, mock):
"""Test adding a user to a group with no task delay."""
new_group = mommy.make(Group)
user = self.create_user()
user.add_to_group(new_group.pk, immediate=True)
mock.assert_called_once_with(
user_id=user.pk,
group_id=new_group.pk,
notification=None,
period=None
)
def test_remove_from_group(self):
"""Test removing a user from a group."""
new_group = mommy.make(Group)
user = self.create_user()
user.add_to_group(new_group.pk)
user.remove_from_group(new_group)
self.assertNotIn(new_group, user.groups_joined)
def test_remove_owner_from_group(self):
"""Test removing a group owner from a group."""
group = mommy.make('groups.Group')
user = self.create_user()
user.add_to_group(group.pk)
group.owners.add(user)
user.remove_from_group(group)
self.assertNotIn(group, user.groups_joined)
self.assertNotIn(user, group.owners.all())
def test_bulk_unsubscribe(self):
"""Test bulk unsubscribing from open_connect.notifications"""
group1 = mommy.make('groups.Group')
group2 = mommy.make('groups.Group')
user = self.create_user()
user.add_to_group(group1.pk, period='immediate')
user.add_to_group(group2.pk)
user.bulk_unsubscribe()
self.assertFalse(
Subscription.objects.filter(
period='immediate', group__in=[group1, group2], user=user
).exists()
)
self.assertEqual(
Subscription.objects.filter(
period='none', group__in=[group1, group2], user=user).count(),
2
)
def add_email_invites_permission(user):
"""Add the accounts.email_invites permission to a user."""
permission = Permission.objects.get(
codename='email_invites', content_type__model='invite')
user.user_permissions.add(permission)
return user
class InviteTest(ConnectTestMixin, TestCase):
"""Tests for Invite model."""
def test_clean(self):
"""clean should lowercase email."""
invite = models.Invite(email='SOMEONE@DJ.local')
invite.clean()
self.assertEqual(invite.email, 'someone@dj.local')
def test_cannot_send_duplicate_invite(self):
"""Invited user should only receive one email notification."""
user = self.create_superuser()
invite = mommy.make(models.Invite, created_by=user)
# pylint: disable=line-too-long
with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:
invite.send_invite()
self.assertTrue(mock.delay.called)
# Ensure that we can't send another identical invite
invite.notified = now()
invite.save()
# pylint: disable=line-too-long
with patch('open_connect.accounts.models.render_and_send_invite_email') as mock2:
invite.send_invite()
self.assertFalse(mock2.delay.called)
def test_send_invite_superuser(self):
"""Test the invite sender"""
user = self.create_superuser()
self.assertTrue(user.is_superuser)
invite = mommy.make(models.Invite, created_by=user)
# pylint: disable=line-too-long
with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:
invite.send_invite()
self.assertTrue(mock.delay.called)
def test_send_invite_user_has_permission(self):
"""Sending the invite as a user with email_invites permission."""
user = self.create_user()
add_email_invites_permission(user)
self.assertTrue(user.has_perm('accounts.email_invites'))
invite = mommy.make(models.Invite, created_by=user)
# pylint: disable=line-too-long
with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:
invite.send_invite()
self.assertTrue(mock.delay.called)
def test_send_invite_user_does_not_have_permission(self):
"""Sending the invite as a user without email_invites permission."""
user = self.create_user()
self.assertFalse(user.has_perm('accounts.email_invites'))
invite = mommy.make(models.Invite, created_by=user)
# pylint: disable=line-too-long
with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:
invite.send_invite()
self.assertFalse(mock.delay.called)
def test_send_invite_user_has_perm_and_is_superuser(self):
"""Sending the invite as a superuser with permission."""
user = self.create_superuser()
add_email_invites_permission(user)
self.assertTrue(user.has_perm('accounts.email_invites'))
self.assertTrue(user.is_superuser)
invite = mommy.make(models.Invite, created_by=user)
# pylint: disable=line-too-long
with patch('open_connect.accounts.models.render_and_send_invite_email') as mock:
invite.send_invite()
self.assertTrue(mock.delay.called)
class TestValidateTwitterHandle(TestCase):
"""Tests for the valid_twitter_handle validator."""
def test_valid_twitter_handle(self):
"""Should return None if handle is valid."""
valid_values = ['JackGrant', '1234User', 'Grace_Grant']
for value in valid_values:
self.assertIsNone(models.validate_twitter_handle(value))
def test_invalid_twitter_handle(self):
"""Test an invalid twitter handle format"""
invalid_values = [
'http://www.twitter.com/LINCOLN', '@LINCOLN', 'A Lincoln'
]
for value in invalid_values:
self.assertRaises(
ValidationError, models.validate_twitter_handle, value=value)
class TestUserAutocomplete(ConnectTestMixin, TestCase):
"""Test UserAutocomplete view."""
def test_regular_user(self):
"""A regular user should see no content"""
user = self.create_user()
self.client.login(username=user.email, password='moo')
response = self.client.get(
reverse('autocomplete_light_autocomplete',
kwargs={'autocomplete': 'UserAutocomplete'})
)
self.assertContains(response, 'No matches found')
self.assertNotContains(response, user.email)
def test_user_is_superuser(self):
"""Superuser should see users in response."""
user = self.create_superuser(first_name="abcd123", last_name="xyz123")
self.client.login(username=user.email, password='moo')
response = self.client.get(
reverse('autocomplete_light_autocomplete',
kwargs={'autocomplete': 'UserAutocomplete'})
)
# The response should contain the first and last name of the user
self.assertContains(response, "abcd123")
self.assertContains(response, "xyz123")
# So we don't expose email address we should never show email
self.assertNotContains(response, user.email)
@patch('open_connect.accounts.models.cache')
class TestUserIsModerator(ConnectTestMixin, TestCase):
"""Tests for User.is_moderator."""
def test_user_is_moderator(self, mock_cache):
"""Test when user is a moderator."""
user = self.create_user()
group = self.create_group()
group.owners.add(user)
# Force every cache lookup to miss
mock_cache.get.return_value = None
self.assertTrue(user.is_moderator())
mock_cache.get.assert_any_call(
'{}is_moderator'.format(user.cache_key))
mock_cache.set.any_call(
'{}is_moderator'.format(user.cache_key), True, 3600)
def test_user_is_not_moderator(self, mock_cache):
"""Test when user is not a moderator."""
user = self.create_user()
# Force every cache lookup to miss
mock_cache.get.return_value = None
self.assertFalse(user.is_moderator())
mock_cache.set.assert_any_call(
'{}is_moderator'.format(user.cache_key), False, 3600)
def test_cache_is_set(self, mock_cache):
"""Test the cache is empty."""
user = self.create_user()
mock_cache.get.return_value = 'PUMPKIN SPICE LATTE'
self.assertEqual(user.is_moderator(), 'PUMPKIN SPICE LATTE')
self.assertEqual(mock_cache.set.call_count, 0)
|
ofa/connect
|
open_connect/accounts/tests/test_models.py
|
Python
|
mit
| 43,556
|
[
"VisIt"
] |
ed300d6d4dd9c13735c00d8493b80a701e394a2b9861b4800fef46940cf11cc6
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012-2014 Laurent Bachelier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function
import re
import pickle
import base64
import zlib
try:
import urllib3
except ImportError:
from requests.packages import urllib3
try:
from urllib.parse import urlparse, urljoin
except ImportError:
from urlparse import urlparse, urljoin
import os
import sys
from copy import deepcopy
import inspect
try:
import requests
if int(requests.__version__.split('.')[0]) < 2:
raise ImportError()
except ImportError:
raise ImportError('Please install python-requests >= 2.0')
from weboob.tools.log import getLogger
from weboob.tools.ordereddict import OrderedDict
from weboob.tools.json import json
from .cookies import WeboobCookieJar
from .exceptions import HTTPNotFound, ClientError, ServerError
from .sessions import FuturesSession
from .profiles import Firefox
from .pages import NextPage
from .url import URL
class Browser(object):
"""
Simple browser class.
Act like a browser, and don't try to do too much.
"""
PROFILE = Firefox()
"""
Default profile used by browser to navigate on websites.
"""
TIMEOUT = 10.0
"""
Default timeout during requests.
"""
REFRESH_MAX = 0.0
"""
When handling a Refresh header, the browsers considers it only if the sleep
time in lesser than this value.
"""
VERIFY = True
"""
Check SSL certificates.
"""
PROXIES = None
MAX_RETRIES = 2
MAX_WORKERS = 10
"""
Maximum of threads for asynchronous requests.
"""
__states__ = []
"""
Saved state variables.
"""
@classmethod
def asset(cls, localfile):
"""
Absolute file path for a module local file.
"""
if os.path.isabs(localfile):
return localfile
return os.path.join(os.path.dirname(inspect.getfile(cls)), localfile)
def __init__(self, logger=None, proxy=None, responses_dirname=None):
self.logger = getLogger('browser', logger)
self.PROXIES = proxy
self._setup_session(self.PROFILE)
self.url = None
self.response = None
self.responses_dirname = responses_dirname
self.responses_count = 1
if isinstance(self.VERIFY, basestring):
self.VERIFY = self.asset(self.VERIFY)
def deinit(self):
self.session.close()
def load_state(self, state):
if 'cookies' in state:
try:
self.session.cookies = pickle.loads(zlib.decompress(base64.b64decode(state['cookies'])))
except (TypeError, zlib.error, EOFError, ValueError):
self.logger.error('Unable to reload cookies from storage')
else:
self.logger.info('Reloaded cookies from storage')
for attrname in self.__states__:
if attrname in state:
setattr(self, attrname, state[attrname])
def dump_state(self):
state = {}
state['cookies'] = base64.b64encode(zlib.compress(pickle.dumps(self.session.cookies, -1)))
for attrname in self.__states__:
state[attrname] = getattr(self, attrname)
self.logger.info('Stored cookies into storage')
return state
def save_response(self, response, warning=False, **kwargs):
if self.responses_dirname is None:
import tempfile
self.responses_dirname = tempfile.mkdtemp(prefix='weboob_session_')
print('Debug data will be saved in this directory: %s' % self.responses_dirname, file=sys.stderr)
elif not os.path.isdir(self.responses_dirname):
os.makedirs(self.responses_dirname)
import mimetypes
# get the content-type, remove optionnal charset part
mimetype = response.headers.get('Content-Type', '').split(';')[0]
# due to http://bugs.python.org/issue1043134
if mimetype == 'text/plain':
ext = '.txt'
else:
# try to get an extension (and avoid adding 'None')
ext = mimetypes.guess_extension(mimetype, False) or ''
path = re.sub(r'[^A-z0-9\.-_]+', '_', urlparse(response.url).path.rpartition('/')[2])[-10:]
if path.endswith(ext):
ext = ''
filename = '%02d-%d%s%s%s' % \
(self.responses_count, response.status_code, '-' if path else '', path, ext)
response_filepath = os.path.join(self.responses_dirname, filename)
with open(response_filepath, 'w') as f:
f.write(response.content)
request = response.request
with open(response_filepath + '-request.txt', 'w') as f:
f.write('%s %s\n\n\n' % (request.method, request.url))
for key, value in request.headers.iteritems():
f.write('%s: %s\n' % (key, value))
if request.body is not None: # separate '' from None
f.write('\n\n\n%s' % request.body)
with open(response_filepath + '-response.txt', 'w') as f:
if hasattr(response.elapsed, 'total_seconds'):
f.write('Time: %3.3fs\n' % response.elapsed.total_seconds())
f.write('%s %s\n\n\n' % (response.status_code, response.reason))
for key, value in response.headers.iteritems():
f.write('%s: %s\n' % (key, value))
match_filepath = os.path.join(self.responses_dirname, 'url_response_match.txt')
with open(match_filepath, 'a') as f:
f.write('# %d %s %s\n' % (response.status_code, response.reason, response.headers.get('Content-Type', '')))
f.write('%s\t%s\n' % (response.url, filename))
self.responses_count += 1
msg = u'Response saved to %s' % response_filepath
if warning:
self.logger.warning(msg)
else:
self.logger.info(msg)
def _setup_session(self, profile):
"""
Set up a python-requests session for our usage.
"""
session = FuturesSession(max_workers=self.MAX_WORKERS, max_retries=self.MAX_RETRIES)
session.proxies = self.PROXIES
session.verify = not self.logger.settings['ssl_insecure'] and self.VERIFY
if not session.verify:
try:
urllib3.disable_warnings()
except AttributeError:
# urllib3 is too old, warnings won't be disable
pass
# defines a max_retries. It's mandatory in case a server is not
# handling keep alive correctly, like the proxy burp
a = requests.adapters.HTTPAdapter(max_retries=self.MAX_RETRIES)
session.mount('http://', a)
session.mount('https://', a)
if self.TIMEOUT:
session.timeout = self.TIMEOUT
## weboob only can provide proxy and HTTP auth options
session.trust_env = False
profile.setup_session(session)
if self.logger.settings['save_responses']:
session.hooks['response'].append(self.save_response)
self.session = session
session.cookies = WeboobCookieJar()
def set_profile(self, profile):
profile.setup_session(self.session)
def location(self, url, **kwargs):
"""
Like :meth:`open` but also changes the current URL and response.
This is the most common method to request web pages.
Other than that, has the exact same behavior of open().
"""
assert not kwargs.get('async'), "Please use open() instead of location() to make asynchronous requests."
response = self.open(url, **kwargs)
self.response = response
self.url = self.response.url
return response
def open(self, url, referrer=None,
allow_redirects=True,
stream=None,
timeout=None,
verify=None,
cert=None,
proxies=None,
data_encoding=None,
async=False,
callback=lambda response: response,
**kwargs):
"""
Make an HTTP request like a browser does:
* follow redirects (unless disabled)
* provide referrers (unless disabled)
Unless a `method` is explicitly provided, it makes a GET request,
or a POST if data is not None,
An empty `data` (not None, like '' or {}) *will* make a POST.
It is a wrapper around session.request().
All session.request() options are available.
You should use location() or open() and not session.request(),
since it has some interesting additions, which are easily
individually disabled through the arguments.
Call this instead of location() if you do not want to "visit" the URL
(for instance, you are downloading a file).
When `async` is True, open() returns a Future objet (see
concurrent.futures for more details), which can be evaluated with its
result() method. If any exception is raised while processing request,
it is catched and re-raised when calling result().
For example:
>>> Browser().open('http://google.com', async=True).result().text # doctest: +SKIP
:param url: URL
:type url: str
:param data: POST data
:type url: str or dict or None
:param referrer: Force referrer. False to disable sending it, None for guessing
:type referrer: str or False or None
:param async: Process request in a non-blocking way
:type async: bool
:param callback: Callback to be called when request has finished,
with response as its first and only argument
:type callback: function
:rtype: :class:`requests.Response`
"""
req = self.build_request(url, referrer, data_encoding=data_encoding, **kwargs)
preq = self.prepare_request(req)
if hasattr(preq, '_cookies'):
# The _cookies attribute is not present in requests < 2.2. As in
# previous version it doesn't calls extract_cookies_to_jar(), it is
# not a problem as we keep our own cookiejar instance.
preq._cookies = WeboobCookieJar.from_cookiejar(preq._cookies)
if proxies is None:
proxies = self.PROXIES
if verify is None:
verify = not self.logger.settings['ssl_insecure'] and self.VERIFY
if timeout is None:
timeout = self.TIMEOUT
# We define an inner_callback here in order to execute the same code
# regardless of async param.
def inner_callback(future, response):
if allow_redirects:
response = self.handle_refresh(response)
self.raise_for_status(response)
return callback(response)
# call python-requests
response = self.session.send(preq,
allow_redirects=allow_redirects,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
background_callback=async and inner_callback)
if not async:
inner_callback(self, response)
return response
def async_open(self, url, **kwargs):
"""
Shortcut to open(url, async=True).
"""
if 'async' in kwargs:
del kwargs['async']
return self.open(url, async=True, **kwargs)
def raise_for_status(self, response):
"""
Like Response.raise_for_status but will use other classes if needed.
"""
http_error_msg = None
if 400 <= response.status_code < 500:
http_error_msg = '%s Client Error: %s' % (response.status_code, response.reason)
cls = ClientError
if response.status_code == 404:
cls = HTTPNotFound
elif 500 <= response.status_code < 600:
http_error_msg = '%s Server Error: %s' % (response.status_code, response.reason)
cls = ServerError
if http_error_msg:
raise cls(http_error_msg, response=response)
# in case we did not catch something that should be
response.raise_for_status()
def build_request(self, url, referrer=None, data_encoding=None, **kwargs):
"""
Does the same job as open(), but returns a Request without
submitting it.
This allows further customization to the Request.
"""
if isinstance(url, requests.Request):
req = url
url = req.url
else:
req = requests.Request(url=url, **kwargs)
# guess method
if req.method is None:
if req.data:
req.method = 'POST'
else:
req.method = 'GET'
# convert unicode strings to proper encoding
if isinstance(req.data, unicode) and data_encoding:
req.data = req.data.encode(data_encoding)
if isinstance(req.data, dict) and data_encoding:
req.data = dict([(k, v.encode(data_encoding) if isinstance(v, unicode) else v)
for k, v in req.data.iteritems()])
if referrer is None:
referrer = self.get_referrer(self.url, url)
if referrer:
# Yes, it is a misspelling.
req.headers.setdefault('Referer', referrer)
return req
def prepare_request(self, req):
"""
Get a prepared request from a Request object.
This method aims to be overloaded by children classes.
"""
return self.session.prepare_request(req)
REFRESH_RE = re.compile(r"^(?P<sleep>[\d\.]+)(; url=[\"']?(?P<url>.*?)[\"']?)?$", re.IGNORECASE)
def handle_refresh(self, response):
"""
Called by open, to handle Refresh HTTP header.
It only redirect to the refresh URL if the sleep time is inferior to
REFRESH_MAX.
"""
if 'Refresh' not in response.headers:
return response
m = self.REFRESH_RE.match(response.headers['Refresh'])
if m:
# XXX perhaps we should not redirect if the refresh url is equal to the current url.
url = m.groupdict().get('url', None) or response.request.url
sleep = float(m.groupdict()['sleep'])
if sleep <= self.REFRESH_MAX:
self.logger.debug('Refresh to %s' % url)
return self.open(url)
else:
self.logger.debug('Do not refresh to %s because %s > REFRESH_MAX(%s)' % (url, sleep, self.REFRESH_MAX))
return response
self.logger.warning('Unable to handle refresh "%s"' % response.headers['Refresh'])
return response
def get_referrer(self, oldurl, newurl):
"""
Get the referrer to send when doing a request.
If we should not send a referrer, it will return None.
Reference: https://en.wikipedia.org/wiki/HTTP_referer
:param oldurl: Current absolute URL
:type oldurl: str or None
:param newurl: Target absolute URL
:type newurl: str
:rtype: str or None
"""
if oldurl is None:
return None
old = urlparse(oldurl)
new = urlparse(newurl)
# Do not leak secure URLs to insecure URLs
if old.scheme == 'https' and new.scheme != 'https':
return None
# Reloading the page. Usually no referrer.
if oldurl == newurl:
return None
# TODO maybe implement some *optional* privacy features:
# * do not leak referrer to other domains (often breaks websites)
# * send a fake referrer (root of the current domain)
# * never send the referrer
# Inspired by the RefControl Firefox addon.
return oldurl
class UrlNotAllowed(Exception):
"""
Raises by :class:`DomainBrowser` when `RESTRICT_URL` is set and trying to go
on an url not matching `BASEURL`.
"""
class DomainBrowser(Browser):
"""
A browser that handles relative URLs and can have a base URL (usually a domain).
For instance self.location('/hello') will get http://weboob.org/hello
if BASEURL is 'http://weboob.org/'.
"""
BASEURL = None
"""
Base URL, e.g. 'http://weboob.org/' or 'https://weboob.org/'
See absurl().
"""
RESTRICT_URL = False
"""
URLs allowed to load.
This can be used to force SSL (if the BASEURL is SSL) or any other leakage.
Set to True to allow only URLs starting by the BASEURL.
Set it to a list of allowed URLs if you have multiple allowed URLs.
More complex behavior is possible by overloading url_allowed()
"""
def __init__(self, baseurl=None, *args, **kwargs):
super(DomainBrowser, self).__init__(*args, **kwargs)
if baseurl is not None:
self.BASEURL = baseurl
def url_allowed(self, url):
"""
Checks if we are allowed to visit an URL.
See RESTRICT_URL.
:param url: Absolute URL
:type url: str
:rtype: bool
"""
if self.BASEURL is None or self.RESTRICT_URL is False:
return True
if self.RESTRICT_URL is True:
return url.startswith(self.BASEURL)
for restrict_url in self.RESTRICT_URL:
if url.startswith(restrict_url):
return True
return False
def absurl(self, uri, base=None):
"""
Get the absolute URL, relative to the base URL.
If BASEURL is None, it will try to use the current URL.
If base is False, it will always try to use the current URL.
:param uri: URI to make absolute. It can be already absolute.
:type uri: str
:param base: Base absolute URL.
:type base: str or None or False
:rtype: str
"""
if not base:
base = self.url
if base is None or base is True:
base = self.BASEURL
return urljoin(base, uri)
def open(self, req, *args, **kwargs):
"""
Like :meth:`Browser.open` but hanldes urls without domains, using
the :attr:`BASEURL` attribute.
"""
uri = req.url if isinstance(req, requests.Request) else req
url = self.absurl(uri)
if not self.url_allowed(url):
raise UrlNotAllowed(url)
if isinstance(req, requests.Request):
req.url = url
else:
req = url
return super(DomainBrowser, self).open(req, *args, **kwargs)
def go_home(self):
"""
Go to the "home" page, usually the BASEURL.
"""
return self.location(self.BASEURL or self.absurl('/'))
class _PagesBrowserMeta(type):
"""
Private meta-class used to keep order of URLs instances of PagesBrowser.
"""
def __new__(mcs, name, bases, attrs):
urls = [(url_name, attrs.pop(url_name)) for url_name, obj in attrs.items() if isinstance(obj, URL)]
urls.sort(key=lambda x: x[1]._creation_counter)
new_class = super(_PagesBrowserMeta, mcs).__new__(mcs, name, bases, attrs)
if new_class._urls is None:
new_class._urls = OrderedDict()
else:
new_class._urls = deepcopy(new_class._urls)
new_class._urls.update(urls)
return new_class
class PagesBrowser(DomainBrowser):
r"""
A browser which works pages and keep state of navigation.
To use it, you have to derive it and to create URL objects as class
attributes. When open() or location() are called, if the url matches
one of URL objects, it returns a Page object. In case of location(), it
stores it in self.page.
Example:
>>> from .pages import Page
>>> class HomePage(Page):
... pass
...
>>> class ListPage(Page):
... pass
...
>>> class MyBrowser(PagesBrowser):
... BASEURL = 'http://example.org'
... home = URL('/(index\.html)?', HomePage)
... list = URL('/list\.html', ListPage)
...
You can then use URL instances to go on pages.
"""
_urls = None
__metaclass__ = _PagesBrowserMeta
def __getattr__(self, name):
if self._urls is not None and name in self._urls:
return self._urls[name]
else:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__.__name__, name))
def __init__(self, *args, **kwargs):
super(PagesBrowser, self).__init__(*args, **kwargs)
self.page = None
self._urls = deepcopy(self._urls)
for url in self._urls.itervalues():
url.browser = self
def open(self, *args, **kwargs):
"""
Same method than
:meth:`weboob.browser.browsers.DomainBrowser.open`, but the
response contains an attribute `page` if the url matches any
:class:`URL` object.
"""
callback = kwargs.pop('callback', lambda response: response)
# Have to define a callback to seamlessly process synchronous and
# asynchronous requests, see :meth:`Browser.open` and its `async`
# and `callback` params.
def internal_callback(response):
# Try to handle the response page with an URL instance.
response.page = None
for url in self._urls.itervalues():
page = url.handle(response)
if page is not None:
self.logger.debug('Handle %s with %s' % (response.url, page.__class__.__name__))
response.page = page
break
if response.page is None:
self.logger.debug('Unable to handle %s' % response.url)
return callback(response)
return super(PagesBrowser, self).open(callback=internal_callback, *args, **kwargs)
def location(self, *args, **kwargs):
"""
Same method than
:meth:`weboob.browser.browsers.Browser.location`, but if the
url matches any :class:`URL` object, an attribute `page` is added to
response, and the attribute :attr:`PagesBrowser.page` is set.
"""
if self.page is not None:
# Call leave hook.
self.page.on_leave()
response = self.open(*args, **kwargs)
self.response = response
self.page = response.page
self.url = response.url
if self.page is not None:
# Call load hook.
self.page.on_load()
# Returns self.response in case on_load recalls location()
return self.response
def pagination(self, func, *args, **kwargs):
r"""
This helper function can be used to handle pagination pages easily.
When the called function raises an exception :class:`NextPage`, it goes
on the wanted page and recall the function.
:class:`NextPage` constructor can take an url or a Request object.
>>> from .pages import HTMLPage
>>> class Page(HTMLPage):
... def iter_values(self):
... for el in self.doc.xpath('//li'):
... yield el.text
... for next in self.doc.xpath('//a'):
... raise NextPage(next.attrib['href'])
...
>>> class Browser(PagesBrowser):
... BASEURL = 'http://people.symlink.me'
... list = URL('/~rom1/projects/weboob/list-(?P<pagenum>\d+).html', Page)
...
>>> b = Browser()
>>> b.list.go(pagenum=1) # doctest: +ELLIPSIS
<weboob.browser.browsers.Page object at 0x...>
>>> list(b.pagination(lambda: b.page.iter_values()))
['One', 'Two', 'Three', 'Four']
"""
while True:
try:
for r in func(*args, **kwargs):
yield r
except NextPage as e:
self.location(e.request)
else:
return
def need_login(func):
"""
Decorator used to require to be logged to access to this function.
"""
def inner(browser, *args, **kwargs):
if browser.page is None or not browser.page.logged:
browser.do_login()
return func(browser, *args, **kwargs)
return inner
class LoginBrowser(PagesBrowser):
"""
A browser which supports login.
"""
def __init__(self, username, password, *args, **kwargs):
super(LoginBrowser, self).__init__(*args, **kwargs)
self.username = username
self.password = password
def do_login(self):
"""
Abstract method to implement to login on website.
It is call when a login is needed.
"""
raise NotImplementedError()
def do_logout(self):
self.session.cookies.clear()
def load_state(self, state):
super(LoginBrowser, self).load_state(state)
if 'url' in state:
try:
self.location(state['url'])
except requests.exceptions.HTTPError:
pass
def dump_state(self):
if not self.page or not self.page.logged:
return {}
state = super(LoginBrowser, self).dump_state()
state['url'] = self.page.url
return state
class APIBrowser(DomainBrowser):
def open(self, *args, **kwargs):
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data'])
if not 'headers' in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Content-Type'] = 'application/json'
return super(APIBrowser, self).open(*args, **kwargs)
def request(self, *args, **kwargs):
return self.open(*args, **kwargs).json()
|
laurent-george/weboob
|
weboob/browser/browsers.py
|
Python
|
agpl-3.0
| 26,602
|
[
"VisIt"
] |
f6cfb97a206f4cb841201a7473bee186016729aedff8066b190b0f3033342b5b
|
# .. coding: utf8
# $Id: __init__.py 7102 2011-08-24 13:36:28Z milde $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
"""LaTeX2e document tree Writer."""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # i.e. ##.
import sys
import os
import time
import re
import string
import urllib
from docutils import frontend, nodes, languages, writers, utils, io
from docutils.transforms import writer_aux
from docutils.math import unimathsymbols2tex, pick_math_environment
# compatibility module for Python 2.3
if not hasattr(string, 'Template'):
import docutils._string_template_compat
string.Template = docutils._string_template_compat.Template
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
default_template = 'default.tex'
default_template_path = os.path.dirname(__file__)
default_preamble = '\n'.join([r'% PDF Standard Fonts',
r'\usepackage{mathptmx} % Times',
r'\usepackage[scaled=.90]{helvet}',
r'\usepackage{courier}'])
settings_spec = (
'LaTeX-Specific Options',
None,
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "a4paper".',
['--documentoptions'],
{'default': 'a4paper', }),
('Footnotes with numbers/symbols by Docutils. (default)',
['--docutils-footnotes'],
{'default': True, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Alias for --docutils-footnotes (deprecated)',
['--use-latex-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for footnote text (deprecated)',
['--figure-footnotes'],
{'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use \\cite command for citations. ',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Use figure floats for citations '
'(might get mixed with real figures). (default)',
['--figure-citations'],
{'dest': 'use_latex_citations', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify LaTeX packages/stylesheets. '
' A style is referenced with \\usepackage if extension is '
'".sty" or omitted and with \\input else. '
' Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'stylesheet_path'}),
('Like --stylesheet, but the path is rewritten '
'relative to the output file. ',
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet'}),
('Link to the stylesheet(s) in the output file. (default)',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Embed the stylesheet(s) in the output file. '
'Stylesheets must be accessible during processing. ',
['--embed-stylesheet'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Customization by LaTeX code in the preamble. '
'Default: select PDF standard fonts (Times, Helvetica, Courier).',
['--latex-preamble'],
{'default': default_preamble}),
('Specify the template file. Default: "%s".' % default_template,
['--template'],
{'default': default_template, 'metavar': '<file>'}),
('Table of contents by LaTeX. (default) ',
['--use-latex-toc'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table of contents by Docutils (without page numbers). ',
['--use-docutils-toc'],
{'dest': 'use_latex_toc', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Add parts on top of the section hierarchy.',
['--use-part-section'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Attach author and date to the document info table. (default) ',
['--use-docutils-docinfo'],
{'dest': 'use_latex_docinfo', 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Attach author and date to the document title.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
("Typeset abstract as topic. (default)",
['--topic-abstract'],
{'dest': 'use_latex_abstract', 'action': 'store_false',
'validator': frontend.validate_boolean}),
("Use LaTeX abstract environment for the document's abstract. ",
['--use-latex-abstract'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "false" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Additional options to the "hyperref" package '
'(default: "").',
['--hyperref-options'], {'default': ''}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. '
'This is the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators.'
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use the specified environment for literal-blocks. '
'Default is quoting of whitespace and special chars.',
['--literal-block-env'],
{'default': ''}),
('When possibile, use verbatim for literal-blocks. '
'Compatibility alias for "--literal-block-env=verbatim".',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "borderless". '
'Default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines', 'borderless'],
'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "", "T1" (default), "OT1", "LGR,T1" or '
'any other combination of options to the `fontenc` package. ',
['--font-encoding'],
{'default': 'T1'}),
('Per default the latex-writer puts the reference title into '
'hyperreferences. Specify "ref*" or "pageref*" to get the section '
'number or the page number.',
['--reference-label'],
{'default': None, }),
('Specify style and database for bibtex, for example '
'"--use-bibtex=mystyle,mydb1,mydb2".',
['--use-bibtex'],
{'default': None, }),
),)
settings_defaults = {'sectnum_depth': 0 # updated by SectNum transform
}
relative_path_settings = ('stylesheet_path',)
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
head_parts = ('head_prefix', 'requirements', 'latex_preamble',
'stylesheet', 'fallbacks', 'pdfsetup',
'title', 'subtitle', 'titledata')
visitor_attributes = head_parts + ('body_pre_docinfo', 'docinfo',
'dedication', 'abstract', 'body')
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
# Override parent method to add latex-specific transforms
def get_transforms(self):
# call the parent class' method
transform_list = writers.Writer.get_transforms(self)
# print transform_list
# Convert specific admonitions to generic one
transform_list.append(writer_aux.Admonitions)
# TODO: footnote collection transform
# transform_list.append(footnotes.collect)
return transform_list
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
# copy parts
for part in self.visitor_attributes:
setattr(self, part, getattr(visitor, part))
# get template string from file
try:
template_file = open(self.document.settings.template, 'rb')
except IOError:
template_file = open(os.path.join(self.default_template_path,
self.document.settings.template), 'rb')
template = string.Template(unicode(template_file.read(), 'utf-8'))
template_file.close()
# fill template
self.assemble_parts() # create dictionary of parts
self.output = template.substitute(self.parts)
def assemble_parts(self):
"""Assemble the `self.parts` dictionary of output fragments."""
writers.Writer.assemble_parts(self)
for part in self.visitor_attributes:
lines = getattr(self, part)
if part in self.head_parts:
if lines:
lines.append('') # to get a trailing newline
self.parts[part] = '\n'.join(lines)
else:
# body contains inline elements, so join without newline
self.parts[part] = ''.join(lines)
class Babel(object):
"""Language specifics for LaTeX."""
# TeX (babel) language names:
# ! not all of these are supported by Docutils!
#
# based on LyX' languages file with adaptions to `BCP 47`_
# (http://www.rfc-editor.org/rfc/bcp/bcp47.txt) and
# http://www.tug.org/TUGboat/Articles/tb29-3/tb93miklavec.pdf
# * the key without subtags is the default
# * case is ignored
# cf. http://docutils.sourceforge.net/docs/howto/i18n.html
# http://www.w3.org/International/articles/language-tags/
# and http://www.iana.org/assignments/language-subtag-registry
language_codes = {
# code TeX/Babel-name comment
'af': 'afrikaans',
'ar': 'arabic',
# 'be': 'belarusian',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
# 'cop': 'coptic',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'de': 'ngerman', # new spelling (de_1996)
'de_1901': 'german', # old spelling
'de_at': 'naustrian',
'de_at_1901': 'austrian',
'dsb': 'lowersorbian',
'el': 'greek', # monotonic (el-monoton)
'el_polyton': 'polutonikogreek',
'en': 'english', # TeX' default language
'en_au': 'australian',
'en_ca': 'canadian',
'en_gb': 'british',
'en_nz': 'newzealand',
'en_us': 'american',
'eo': 'esperanto', # '^' is active
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
# 'fa': 'farsi',
'fi': 'finnish',
'fr': 'french',
'fr_ca': 'canadien',
'ga': 'irish', # Irish Gaelic
# 'grc': # Ancient Greek
'grc_ibycus': 'ibycus', # Ibycus encoding
'gl': 'galician',
'he': 'hebrew',
'hr': 'croatian',
'hsb': 'uppersorbian',
'hu': 'magyar',
'ia': 'interlingua',
'id': 'bahasai', # Bahasa (Indonesian)
'is': 'icelandic',
'it': 'italian',
'ja': 'japanese',
'kk': 'kazakh',
'la': 'latin',
'lt': 'lithuanian',
'lv': 'latvian',
'mn': 'mongolian', # Mongolian, Cyrillic script (mn-cyrl)
'ms': 'bahasam', # Bahasa (Malay)
'nb': 'norsk', # Norwegian Bokmal
'nl': 'dutch',
'nn': 'nynorsk', # Norwegian Nynorsk
'no': 'norsk', # Norwegian Bokmal
'pl': 'polish',
'pt': 'portuges',
'pt_br': 'brazil',
'ro': 'romanian',
'ru': 'russian', # '"' is active
'se': 'samin', # North Sami
# sh-cyrl: Serbo-Croatian, Cyrillic script
'sh-latn': 'serbian', # Serbo-Croatian, Latin script
'sk': 'slovak',
'sl': 'slovene',
'sq': 'albanian',
# 'sr-cyrl': Serbian, Cyrillic script (sr-cyrl)
'sr-latn': 'serbian', # Serbian, Latin script, " active.
'sv': 'swedish',
# 'th': 'thai',
'tr': 'turkish',
'uk': 'ukrainian',
'vi': 'vietnam',
# zh-latn: Chinese Pinyin
}
warn_msg = 'Language "%s" not supported by LaTeX (babel)'
def __init__(self, language_code, reporter=None):
self.reporter = reporter
self.language = self.language_name(language_code)
self.otherlanguages = {}
self.quote_index = 0
self.quotes = ('``', "''")
# language dependent configuration:
# double quotes are "active" in some languages (e.g. German).
self.literal_double_quote = u'"'
if self.language in ('ngerman', 'german', 'austrian', 'naustrian',
'russian'):
self.quotes = (r'\glqq{}', r'\grqq{}')
self.literal_double_quote = ur'\dq{}'
if self.language == 'french':
self.quotes = (r'\og{}', r'\fg{}')
if self.language == 'italian':
self.literal_double_quote = ur'{\char`\"}'
def __call__(self):
"""Return the babel call with correct options and settings"""
languages = self.otherlanguages.keys()
languages.append(self.language or 'english')
self.setup = [r'\usepackage[%s]{babel}' % ','.join(languages)]
if 'spanish' in languages:
# reset active chars to the original meaning:
self.setup.append(
r'\addto\shorthandsspanish{\spanishdeactivate{."~<>}}')
# or prepend r'\def\spanishoptions{es-noshorthands}'
if (languages[-1] is 'english' and
'french' in self.otherlanguages.keys()):
self.setup += ['% Prevent side-effects if French hyphenation '
'patterns are not loaded:',
r'\frenchbsetup{StandardLayout}',
r'\AtBeginDocument{\selectlanguage{%s}'
r'\noextrasfrench}' % self.language]
return '\n'.join(self.setup)
def next_quote(self):
q = self.quotes[self.quote_index]
self.quote_index = (self.quote_index+1) % 2
return q
def quote_quotes(self,text):
t = None
for part in text.split('"'):
if t == None:
t = part
else:
t += self.next_quote() + part
return t
def language_name(self, language_code):
"""Return TeX language name for `language_code`"""
for tag in utils.normalize_language_tag(language_code):
try:
return self.language_codes[tag]
except KeyError:
pass
if self.reporter is not None:
self.reporter.warning(self.warn_msg % language_code)
return ''
def get_language(self):
"""Return `self.language` (for backwards compatibility with Sphinx).
"""
return self.language
# Building blocks for the latex preamble
# --------------------------------------
class SortableDict(dict):
"""Dictionary with additional sorting methods
Tip: use key starting with with '_' for sorting before small letters
and with '~' for sorting after small letters.
"""
def sortedkeys(self):
"""Return sorted list of keys"""
keys = self.keys()
keys.sort()
return keys
def sortedvalues(self):
"""Return list of values sorted by keys"""
return [self[key] for key in self.sortedkeys()]
# PreambleCmds
# `````````````
# A container for LaTeX code snippets that can be
# inserted into the preamble if required in the document.
#
# .. The package 'makecmds' would enable shorter definitions using the
# \providelength and \provideenvironment commands.
# However, it is pretty non-standard (texlive-latex-extra).
class PreambleCmds(object):
"""Building blocks for the latex preamble."""
PreambleCmds.abstract = r"""
% abstract title
\providecommand*{\DUtitleabstract}[1]{\centerline{\textbf{#1}}}"""
PreambleCmds.admonition = r"""
% admonition (specially marked topic)
\providecommand{\DUadmonition}[2][class-arg]{%
% try \DUadmonition#1{#2}:
\ifcsname DUadmonition#1\endcsname%
\csname DUadmonition#1\endcsname{#2}%
\else
\begin{center}
\fbox{\parbox{0.9\textwidth}{#2}}
\end{center}
\fi
}"""
PreambleCmds.align_center = r"""
\makeatletter
\@namedef{DUrolealign-center}{\centering}
\makeatother
"""
## PreambleCmds.caption = r"""% configure caption layout
## \usepackage{caption}
## \captionsetup{singlelinecheck=false}% no exceptions for one-liners"""
PreambleCmds.color = r"""\usepackage{color}"""
PreambleCmds.docinfo = r"""
% docinfo (width of docinfo table)
\DUprovidelength{\DUdocinfowidth}{0.9\textwidth}"""
# PreambleCmds.docinfo._depends = 'providelength'
PreambleCmds.embedded_package_wrapper = r"""\makeatletter
%% embedded stylesheet: %s
%s
\makeatother"""
PreambleCmds.dedication = r"""
% dedication topic
\providecommand{\DUtopicdedication}[1]{\begin{center}#1\end{center}}"""
PreambleCmds.error = r"""
% error admonition title
\providecommand*{\DUtitleerror}[1]{\DUtitle{\color{red}#1}}"""
# PreambleCmds.errortitle._depends = 'color'
PreambleCmds.fieldlist = r"""
% fieldlist environment
\ifthenelse{\isundefined{\DUfieldlist}}{
\newenvironment{DUfieldlist}%
{\quote\description}
{\enddescription\endquote}
}{}"""
PreambleCmds.float_settings = r"""\usepackage{float} % float configuration
\floatplacement{figure}{H} % place figures here definitely"""
PreambleCmds.footnotes = r"""% numeric or symbol footnotes with hyperlinks
\providecommand*{\DUfootnotemark}[3]{%
\raisebox{1em}{\hypertarget{#1}{}}%
\hyperlink{#2}{\textsuperscript{#3}}%
}
\providecommand{\DUfootnotetext}[4]{%
\begingroup%
\renewcommand{\thefootnote}{%
\protect\raisebox{1em}{\protect\hypertarget{#1}{}}%
\protect\hyperlink{#2}{#3}}%
\footnotetext{#4}%
\endgroup%
}"""
PreambleCmds.footnote_floats = r"""% settings for footnotes as floats:
\setlength{\floatsep}{0.5em}
\setlength{\textfloatsep}{\fill}
\addtolength{\textfloatsep}{3em}
\renewcommand{\textfraction}{0.5}
\renewcommand{\topfraction}{0.5}
\renewcommand{\bottomfraction}{0.5}
\setcounter{totalnumber}{50}
\setcounter{topnumber}{50}
\setcounter{bottomnumber}{50}"""
PreambleCmds.graphicx_auto = r"""% Check output format
\ifx\pdftexversion\undefined
\usepackage{graphicx}
\else
\usepackage[pdftex]{graphicx}
\fi'))"""
PreambleCmds.inline = r"""
% inline markup (custom roles)
% \DUrole{#1}{#2} tries \DUrole#1{#2}
\providecommand*{\DUrole}[2]{%
\ifcsname DUrole#1\endcsname%
\csname DUrole#1\endcsname{#2}%
\else% backwards compatibility: try \docutilsrole#1{#2}
\ifcsname docutilsrole#1\endcsname%
\csname docutilsrole#1\endcsname{#2}%
\else%
#2%
\fi%
\fi%
}"""
PreambleCmds.legend = r"""
% legend environment
\ifthenelse{\isundefined{\DUlegend}}{
\newenvironment{DUlegend}{\small}{}
}{}"""
PreambleCmds.lineblock = r"""
% lineblock environment
\DUprovidelength{\DUlineblockindent}{2.5em}
\ifthenelse{\isundefined{\DUlineblock}}{
\newenvironment{DUlineblock}[1]{%
\list{}{\setlength{\partopsep}{\parskip}
\addtolength{\partopsep}{\baselineskip}
\setlength{\topsep}{0pt}
\setlength{\itemsep}{0.15\baselineskip}
\setlength{\parsep}{0pt}
\setlength{\leftmargin}{#1}}
\raggedright
}
{\endlist}
}{}"""
# PreambleCmds.lineblock._depends = 'providelength'
PreambleCmds.linking = r"""
%% hyperlinks:
\ifthenelse{\isundefined{\hypersetup}}{
\usepackage[%s]{hyperref}
\urlstyle{same} %% normal text font (alternatives: tt, rm, sf)
}{}"""
PreambleCmds.minitoc = r"""%% local table of contents
\usepackage{minitoc}"""
PreambleCmds.optionlist = r"""
% optionlist environment
\providecommand*{\DUoptionlistlabel}[1]{\bf #1 \hfill}
\DUprovidelength{\DUoptionlistindent}{3cm}
\ifthenelse{\isundefined{\DUoptionlist}}{
\newenvironment{DUoptionlist}{%
\list{}{\setlength{\labelwidth}{\DUoptionlistindent}
\setlength{\rightmargin}{1cm}
\setlength{\leftmargin}{\rightmargin}
\addtolength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
\renewcommand{\makelabel}{\DUoptionlistlabel}}
}
{\endlist}
}{}"""
# PreambleCmds.optionlist._depends = 'providelength'
PreambleCmds.providelength = r"""
% providelength (provide a length variable and set default, if it is new)
\providecommand*{\DUprovidelength}[2]{
\ifthenelse{\isundefined{#1}}{\newlength{#1}\setlength{#1}{#2}}{}
}"""
PreambleCmds.rubric = r"""
% rubric (informal heading)
\providecommand*{\DUrubric}[2][class-arg]{%
\subsubsection*{\centering\textit{\textmd{#2}}}}"""
PreambleCmds.sidebar = r"""
% sidebar (text outside the main text flow)
\providecommand{\DUsidebar}[2][class-arg]{%
\begin{center}
\colorbox[gray]{0.80}{\parbox{0.9\textwidth}{#2}}
\end{center}
}"""
PreambleCmds.subtitle = r"""
% subtitle (for topic/sidebar)
\providecommand*{\DUsubtitle}[2][class-arg]{\par\emph{#2}\smallskip}"""
PreambleCmds.table = r"""\usepackage{longtable,ltcaption,array}
\setlength{\extrarowheight}{2pt}
\newlength{\DUtablewidth} % internal use in tables"""
# Options [force,almostfull] prevent spurious error messages, see
# de.comp.text.tex/2005-12/msg01855
PreambleCmds.textcomp = """\
\\usepackage{textcomp} % text symbol macros"""
PreambleCmds.titlereference = r"""
% titlereference role
\providecommand*{\DUroletitlereference}[1]{\textsl{#1}}"""
PreambleCmds.title = r"""
% title for topics, admonitions and sidebar
\providecommand*{\DUtitle}[2][class-arg]{%
% call \DUtitle#1{#2} if it exists:
\ifcsname DUtitle#1\endcsname%
\csname DUtitle#1\endcsname{#2}%
\else
\smallskip\noindent\textbf{#2}\smallskip%
\fi
}"""
PreambleCmds.topic = r"""
% topic (quote with heading)
\providecommand{\DUtopic}[2][class-arg]{%
\ifcsname DUtopic#1\endcsname%
\csname DUtopic#1\endcsname{#2}%
\else
\begin{quote}#2\end{quote}
\fi
}"""
PreambleCmds.transition = r"""
% transition (break, fancybreak, anonymous section)
\providecommand*{\DUtransition}[1][class-arg]{%
\hspace*{\fill}\hrulefill\hspace*{\fill}
\vskip 0.5\baselineskip
}"""
class DocumentClass(object):
"""Details of a LaTeX document class."""
def __init__(self, document_class, with_part=False):
self.document_class = document_class
self._with_part = with_part
self.sections = ['section', 'subsection', 'subsubsection',
'paragraph', 'subparagraph']
if self.document_class in ('book', 'memoir', 'report',
'scrbook', 'scrreprt'):
self.sections.insert(0, 'chapter')
if self._with_part:
self.sections.insert(0, 'part')
def section(self, level):
"""Return the LaTeX section name for section `level`.
The name depends on the specific document class.
Level is 1,2,3..., as level 0 is the title.
"""
if level <= len(self.sections):
return self.sections[level-1]
else:
return self.sections[-1]
class Table(object):
"""Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
Table style might be
:standard: horizontal and vertical lines
:booktabs: only horizontal lines (requires "booktabs" LaTeX package)
:borderless: no borders around table cells
:nolines: alias for borderless
"""
def __init__(self,translator,latex_type,table_style):
self._translator = translator
self._latex_type = latex_type
self._table_style = table_style
self._open = 0
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
self.stubs = []
self._in_thead = 0
def open(self):
self._open = True
self._col_specs = []
self.caption = []
self._attrs = {}
self._in_head = False # maybe context with search
def close(self):
self._open = False
self._col_specs = None
self.caption = []
self._attrs = {}
self.stubs = []
def is_open(self):
return self._open
def set_table_style(self, table_style):
if not table_style in ('standard','booktabs','borderless','nolines'):
return
self._table_style = table_style
def get_latex_type(self):
if self._latex_type == 'longtable' and not self.caption:
# do not advance the "table" counter (requires "ltcaption" package)
return('longtable*')
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if attr in self._attrs:
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self._table_style == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row,
def get_opening(self):
return '\n'.join([r'\setlength{\DUtablewidth}{\linewidth}',
r'\begin{%s}[c]' % self.get_latex_type()])
def get_closing(self):
closing = []
if self._table_style == 'booktabs':
closing.append(r'\bottomrule')
# elif self._table_style == 'standard':
# closing.append(r'\hline')
closing.append(r'\end{%s}' % self.get_latex_type())
return '\n'.join(closing)
def visit_colspec(self, node):
self._col_specs.append(node)
# "stubs" list is an attribute of the tgroup element:
self.stubs.append(node.attributes.get('stub'))
def get_colspecs(self):
"""Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
self._col_width = []
self._rowspan = []
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
bar = self.get_vertical_bar()
latex_table_spec = ''
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
self._rowspan.append(0)
latex_table_spec += '%sp{%.3f\\DUtablewidth}' % (bar, colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
"""Return columnwidth for current cell (not multicell)."""
return '%.2f\\DUtablewidth' % self._col_width[self._cell_in_row-1]
def get_multicolumn_width(self, start, len_):
"""Return sum of columnwidths for multicell."""
mc_width = sum([width
for width in ([self._col_width[start + co - 1]
for co in range (len_)])])
return '%.2f\\DUtablewidth' % mc_width
def get_caption(self):
if not self.caption:
return ''
caption = ''.join(self.caption)
if 1 == self._translator.thead_depth():
return r'\caption{%s}\\' '\n' % caption
return r'\caption[]{%s (... continued)}\\' '\n' % caption
def need_recurse(self):
if self._latex_type == 'longtable':
return 1 == self._translator.thead_depth()
return 0
def visit_thead(self):
self._in_thead += 1
if self._table_style == 'standard':
return ['\\hline\n']
elif self._table_style == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self._table_style == 'standard':
# a.append('\\hline\n')
if self._table_style == 'booktabs':
a.append('\\midrule\n')
if self._latex_type == 'longtable':
if 1 == self._translator.thead_depth():
a.append('\\endfirsthead\n')
else:
a.append('\\endhead\n')
a.append(r'\multicolumn{%d}{c}' % len(self._col_specs) +
r'{\hfill ... continued on next page} \\')
a.append('\n\\endfoot\n\\endlastfoot\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead -= 1
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self._table_style == 'standard':
rowspans = [i+1 for i in range(len(self._rowspan))
if (self._rowspan[i]<=0)]
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while 1:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
def is_stub_column(self):
if len(self.stubs) >= self._cell_in_row:
return self.stubs[self._cell_in_row-1]
return False
class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
# Config setting defaults
# -----------------------
# TODO: use mixins for different implementations.
# list environment for docinfo. else tabularx
## use_optionlist_for_docinfo = False # TODO: NOT YET IN USE
# Use compound enumerations (1.A.1.)
compound_enumerators = 0
# If using compound enumerations, include section information.
section_prefix_for_enumerators = 0
# This is the character that separates the section ("." subsection ...)
# prefix from the regular list enumerator.
section_enumerator_separator = '-'
# Auxiliary variables
# -------------------
has_latex_toc = False # is there a toc in the doc? (needed by minitoc)
is_toc_list = False # is the current bullet_list a ToC?
section_level = 0
# Flags to encode():
# inside citation reference labels underscores dont need to be escaped
inside_citation_reference_label = False
verbatim = False # do not encode
insert_non_breaking_blanks = False # replace blanks by "~"
insert_newline = False # add latex newline commands
literal = False # literal text (block or inline)
def __init__(self, document, babel_class=Babel):
nodes.NodeVisitor.__init__(self, document)
# Reporter
# ~~~~~~~~
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
# ~~~~~~~~
self.settings = settings = document.settings
self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo
self._use_latex_citations = settings.use_latex_citations
self.embed_stylesheet = settings.embed_stylesheet
self._reference_label = settings.reference_label
self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators
self.font_encoding = getattr(settings, 'font_encoding', '')
self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators)
self.section_enumerator_separator = (
settings.section_enumerator_separator.replace('_', r'\_'))
# literal blocks:
self.literal_block_env = ''
self.literal_block_options = ''
if settings.literal_block_env != '':
(none,
self.literal_block_env,
self.literal_block_options,
none ) = re.split('(\w+)(.*)', settings.literal_block_env)
elif settings.use_verbatim_when_possible:
self.literal_block_env = 'verbatim'
#
if self.settings.use_bibtex:
self.bibtex = self.settings.use_bibtex.split(',',1)
# TODO avoid errors on not declared citations.
else:
self.bibtex = None
# language module for Docutils-generated text
# (labels, bibliographic_fields, and author_separators)
self.language_module = languages.get_language(settings.language_code,
document.reporter)
self.babel = babel_class(settings.language_code, document.reporter)
self.author_separator = self.language_module.author_separators[0]
d_options = [self.settings.documentoptions]
if self.babel.language not in ('english', ''):
d_options.append(self.babel.language)
self.documentoptions = ','.join(filter(None, d_options))
self.d_class = DocumentClass(settings.documentclass,
settings.use_part_section)
# graphic package options:
if self.settings.graphicx_option == '':
self.graphicx_package = r'\usepackage{graphicx}'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = PreambleCmds.graphicx_auto
else:
self.graphicx_package = (r'\usepackage[%s]{graphicx}' %
self.settings.graphicx_option)
# footnotes:
self.docutils_footnotes = settings.docutils_footnotes
if settings.use_latex_footnotes:
self.docutils_footnotes = True
self.warn('`use_latex_footnotes` is deprecated. '
'The setting has been renamed to `docutils_footnotes` '
'and the alias will be removed in a future version.')
self.figure_footnotes = settings.figure_footnotes
if self.figure_footnotes:
self.docutils_footnotes = True
self.warn('The "figure footnotes" workaround/setting is strongly '
'deprecated and will be removed in a future version.')
# Output collection stacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
(self.documentoptions, self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.requirements['__static'] = r'\usepackage{ifthen}'
self.latex_preamble = [settings.latex_preamble]
self.stylesheet = []
self.fallbacks = SortableDict() # made a list in depart_document()
self.pdfsetup = [] # PDF properties (hyperref package)
self.title = []
self.subtitle = []
self.titledata = [] # \title, \author, \date
## self.body_prefix = ['\\begin{document}\n']
self.body_pre_docinfo = [] # \maketitle
self.docinfo = []
self.dedication = []
self.abstract = []
self.body = []
## self.body_suffix = ['\\end{document}\n']
# A heterogenous stack used in conjunction with the tree traversal.
# Make sure that the pops correspond to the pushes:
self.context = []
# Title metadata:
self.title_labels = []
self.subtitle_labels = []
# (if use_latex_docinfo: collects lists of
# author/organization/contact/address lines)
self.author_stack = []
self.date = []
# PDF properties: pdftitle, pdfauthor
# TODO?: pdfcreator, pdfproducer, pdfsubject, pdfkeywords
self.pdfinfo = []
self.pdfauthor = []
# Stack of section counters so that we don't have to use_latex_toc.
# This will grow and shrink as processing occurs.
# Initialized for potential first-level sections.
self._section_number = [0]
# The current stack of enumerations so that we can expand
# them into a compound enumeration.
self._enumeration_counters = []
# The maximum number of enumeration counters we've used.
# If we go beyond this number, we need to create a new
# counter; otherwise, just reuse an old one.
self._max_enumeration_counters = 0
self._bibitems = []
# object for a table while proccessing.
self.table_stack = []
self.active_table = Table(self, 'longtable', settings.table_style)
# Where to collect the output of visitor methods (default: body)
self.out = self.body
self.out_stack = [] # stack of output collectors
# Process settings
# ~~~~~~~~~~~~~~~~
# Encodings:
# Docutils' output-encoding => TeX input encoding
if self.latex_encoding != 'ascii':
self.requirements['_inputenc'] = (r'\usepackage[%s]{inputenc}'
% self.latex_encoding)
# TeX font encoding
if self.font_encoding:
self.requirements['_fontenc'] = (r'\usepackage[%s]{fontenc}' %
self.font_encoding)
# page layout with typearea (if there are relevant document options)
if (settings.documentclass.find('scr') == -1 and
(self.documentoptions.find('DIV') != -1 or
self.documentoptions.find('BCOR') != -1)):
self.requirements['typearea'] = r'\usepackage{typearea}'
# Stylesheets
# get list of style sheets from settings
styles = utils.get_stylesheet_list(settings)
# adapt path if --stylesheet_path is used
if settings.stylesheet_path and not(self.embed_stylesheet):
styles = [utils.relative_path(settings._destination, sheet)
for sheet in styles]
for sheet in styles:
(base, ext) = os.path.splitext(sheet)
is_package = ext in ['.sty', '']
if self.embed_stylesheet:
if is_package:
sheet = base + '.sty' # adapt package name
# wrap in \makeatletter, \makeatother
wrapper = PreambleCmds.embedded_package_wrapper
else:
wrapper = '%% embedded stylesheet: %s\n%s'
settings.record_dependencies.add(sheet)
self.stylesheet.append(wrapper %
(sheet, io.FileInput(source_path=sheet, encoding='utf-8').read()))
else: # link to style sheet
if is_package:
self.stylesheet.append(r'\usepackage{%s}' % base)
else:
self.stylesheet.append(r'\input{%s}' % sheet)
# PDF setup
if self.hyperlink_color in ('0', 'false', 'False', ''):
self.hyperref_options = ''
else:
self.hyperref_options = 'colorlinks=true,linkcolor=%s,urlcolor=%s' % (
self.hyperlink_color, self.hyperlink_color)
if settings.hyperref_options:
self.hyperref_options += ',' + settings.hyperref_options
# LaTeX Toc
# include all supported sections in toc and PDF bookmarks
# (or use documentclass-default (as currently))?
## if self.use_latex_toc:
## self.requirements['tocdepth'] = (r'\setcounter{tocdepth}{%d}' %
## len(self.d_class.sections))
# LaTeX section numbering
if not self.settings.sectnum_xform: # section numbering by LaTeX:
# sectnum_depth:
# None "sectnum" directive without depth arg -> LaTeX default
# 0 no "sectnum" directive -> no section numbers
# else value of the "depth" argument: translate to LaTeX level
# -1 part (0 with "article" document class)
# 0 chapter (missing in "article" document class)
# 1 section
# 2 subsection
# 3 subsubsection
# 4 paragraph
# 5 subparagraph
if settings.sectnum_depth is not None:
# limit to supported levels
sectnum_depth = min(settings.sectnum_depth,
len(self.d_class.sections))
# adjust to document class and use_part_section settings
if 'chapter' in self.d_class.sections:
sectnum_depth -= 1
if self.d_class.sections[0] == 'part':
sectnum_depth -= 1
self.requirements['sectnum_depth'] = (
r'\setcounter{secnumdepth}{%d}' % sectnum_depth)
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
self.requirements['sectnum_start'] = (
r'\setcounter{%s}{%d}' % (self.d_class.sections[0],
settings.sectnum_start-1))
# currently ignored (configure in a stylesheet):
## settings.sectnum_prefix
## settings.sectnum_suffix
# Auxiliary Methods
# -----------------
def to_latex_encoding(self,docutils_encoding):
"""Translate docutils encoding name into LaTeX's.
Default method is remove "-" and "_" chars from docutils_encoding.
"""
tr = { 'iso-8859-1': 'latin1', # west european
'iso-8859-2': 'latin2', # east european
'iso-8859-3': 'latin3', # esperanto, maltese
'iso-8859-4': 'latin4', # north european, scandinavian, baltic
'iso-8859-5': 'iso88595', # cyrillic (ISO)
'iso-8859-9': 'latin5', # turkish
'iso-8859-15': 'latin9', # latin9, update to latin1.
'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
'windows-1251': 'cp1251', # cyrillic (on Windows)
'koi8-r': 'koi8-r', # cyrillic (Russian)
'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
'windows-1250': 'cp1250', #
'windows-1252': 'cp1252', #
'us-ascii': 'ascii', # ASCII (US)
# unmatched encodings
#'': 'applemac',
#'': 'ansinew', # windows 3.1 ansi
#'': 'ascii', # ASCII encoding for the range 32--127.
#'': 'cp437', # dos latin us
#'': 'cp850', # dos latin 1
#'': 'cp852', # dos latin 2
#'': 'decmulti',
#'': 'latin10',
#'iso-8859-6': '' # arabic
#'iso-8859-7': '' # greek
#'iso-8859-8': '' # hebrew
#'iso-8859-10': '' # latin6, more complete iso-8859-4
}
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
# convert: latin-1, latin_1, utf-8 and similar things
encoding = encoding.replace('_', '').replace('-', '')
# strip the error handler
return encoding.split(':')[0]
def language_label(self, docutil_label):
return self.language_module.labels[docutil_label]
def ensure_math(self, text):
if not hasattr(self, 'ensure_math_re'):
chars = { # lnot,pm,twosuperior,threesuperior,mu,onesuperior,times,div
'latin1' : '\xac\xb1\xb2\xb3\xb5\xb9\xd7\xf7' , # ¬±²³µ¹×÷
# TODO?: use texcomp instead.
}
self.ensure_math_re = re.compile('([%s])' % chars['latin1'])
text = self.ensure_math_re.sub(r'\\ensuremath{\1}', text)
return text
def encode(self, text):
"""Return text with 'problematic' characters escaped.
Escape the ten special printing characters ``# $ % & ~ _ ^ \ { }``,
square brackets ``[ ]``, double quotes and (in OT1) ``< | >``.
Separate ``-`` (and more in literal text) to prevent input ligatures.
Translate non-supported Unicode characters.
"""
if self.verbatim:
return text
# Separate compound characters, e.g. '--' to '-{}-'.
separate_chars = '-'
# In monospace-font, we also separate ',,', '``' and "''" and some
# other characters which can't occur in non-literal text.
if self.literal:
separate_chars += ',`\'"<>'
# LaTeX encoding maps:
special_chars = {
ord('#'): ur'\#',
ord('$'): ur'\$',
ord('%'): ur'\%',
ord('&'): ur'\&',
ord('~'): ur'\textasciitilde{}',
ord('_'): ur'\_',
ord('^'): ur'\textasciicircum{}',
ord('\\'): ur'\textbackslash{}',
ord('{'): ur'\{',
ord('}'): ur'\}',
# Square brackets are ordinary chars and cannot be escaped with '\',
# so we put them in a group '{[}'. (Alternative: ensure that all
# macros with optional arguments are terminated with {} and text
# inside any optional argument is put in a group ``[{text}]``).
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
ord('['): ur'{[}',
ord(']'): ur'{]}'
}
# Unicode chars that are not recognized by LaTeX's utf8 encoding
unsupported_unicode_chars = {
0x00A0: ur'~', # NO-BREAK SPACE
0x00AD: ur'\-', # SOFT HYPHEN
#
0x2008: ur'\,', # PUNCTUATION SPACE
0x2011: ur'\hbox{-}', # NON-BREAKING HYPHEN
0x202F: ur'\,', # NARROW NO-BREAK SPACE
0x21d4: ur'$\Leftrightarrow$',
# Docutils footnote symbols:
0x2660: ur'$\spadesuit$',
0x2663: ur'$\clubsuit$',
}
# Unicode chars that are recognized by LaTeX's utf8 encoding
unicode_chars = {
0x200C: ur'\textcompwordmark', # ZERO WIDTH NON-JOINER
0x2013: ur'\textendash{}',
0x2014: ur'\textemdash{}',
0x2018: ur'\textquoteleft{}',
0x2019: ur'\textquoteright{}',
0x201A: ur'\quotesinglbase{}', # SINGLE LOW-9 QUOTATION MARK
0x201C: ur'\textquotedblleft{}',
0x201D: ur'\textquotedblright{}',
0x201E: ur'\quotedblbase{}', # DOUBLE LOW-9 QUOTATION MARK
0x2030: ur'\textperthousand{}', # PER MILLE SIGN
0x2031: ur'\textpertenthousand{}', # PER TEN THOUSAND SIGN
0x2039: ur'\guilsinglleft{}',
0x203A: ur'\guilsinglright{}',
0x2423: ur'\textvisiblespace{}', # OPEN BOX
0x2020: ur'\dag{}',
0x2021: ur'\ddag{}',
0x2026: ur'\dots{}',
0x2122: ur'\texttrademark{}',
}
# Unicode chars that require a feature/package to render
pifont_chars = {
0x2665: ur'\ding{170}', # black heartsuit
0x2666: ur'\ding{169}', # black diamondsuit
0x2713: ur'\ding{51}', # check mark
0x2717: ur'\ding{55}', # check mark
}
# recognized with 'utf8', if textcomp is loaded
textcomp_chars = {
# Latin-1 Supplement
0x00a2: ur'\textcent{}', # ¢ CENT SIGN
0x00a4: ur'\textcurrency{}', # ¤ CURRENCY SYMBOL
0x00a5: ur'\textyen{}', # ¥ YEN SIGN
0x00a6: ur'\textbrokenbar{}', # ¦ BROKEN BAR
0x00a7: ur'\textsection{}', # § SECTION SIGN
0x00a8: ur'\textasciidieresis{}', # ¨ DIAERESIS
0x00a9: ur'\textcopyright{}', # © COPYRIGHT SIGN
0x00aa: ur'\textordfeminine{}', # ª FEMININE ORDINAL INDICATOR
0x00ac: ur'\textlnot{}', # ¬ NOT SIGN
0x00ae: ur'\textregistered{}', # ® REGISTERED SIGN
0x00af: ur'\textasciimacron{}', # ¯ MACRON
0x00b0: ur'\textdegree{}', # ° DEGREE SIGN
0x00b1: ur'\textpm{}', # ± PLUS-MINUS SIGN
0x00b2: ur'\texttwosuperior{}', # ² SUPERSCRIPT TWO
0x00b3: ur'\textthreesuperior{}', # ³ SUPERSCRIPT THREE
0x00b4: ur'\textasciiacute{}', # ´ ACUTE ACCENT
0x00b5: ur'\textmu{}', # µ MICRO SIGN
0x00b6: ur'\textparagraph{}', # ¶ PILCROW SIGN # not equal to \textpilcrow
0x00b9: ur'\textonesuperior{}', # ¹ SUPERSCRIPT ONE
0x00ba: ur'\textordmasculine{}', # º MASCULINE ORDINAL INDICATOR
0x00bc: ur'\textonequarter{}', # 1/4 FRACTION
0x00bd: ur'\textonehalf{}', # 1/2 FRACTION
0x00be: ur'\textthreequarters{}', # 3/4 FRACTION
0x00d7: ur'\texttimes{}', # × MULTIPLICATION SIGN
0x00f7: ur'\textdiv{}', # ÷ DIVISION SIGN
#
0x0192: ur'\textflorin{}', # LATIN SMALL LETTER F WITH HOOK
0x02b9: ur'\textasciiacute{}', # MODIFIER LETTER PRIME
0x02ba: ur'\textacutedbl{}', # MODIFIER LETTER DOUBLE PRIME
0x2016: ur'\textbardbl{}', # DOUBLE VERTICAL LINE
0x2022: ur'\textbullet{}', # BULLET
0x2032: ur'\textasciiacute{}', # PRIME
0x2033: ur'\textacutedbl{}', # DOUBLE PRIME
0x2035: ur'\textasciigrave{}', # REVERSED PRIME
0x2036: ur'\textgravedbl{}', # REVERSED DOUBLE PRIME
0x203b: ur'\textreferencemark{}', # REFERENCE MARK
0x203d: ur'\textinterrobang{}', # INTERROBANG
0x2044: ur'\textfractionsolidus{}', # FRACTION SLASH
0x2045: ur'\textlquill{}', # LEFT SQUARE BRACKET WITH QUILL
0x2046: ur'\textrquill{}', # RIGHT SQUARE BRACKET WITH QUILL
0x2052: ur'\textdiscount{}', # COMMERCIAL MINUS SIGN
0x20a1: ur'\textcolonmonetary{}', # COLON SIGN
0x20a3: ur'\textfrenchfranc{}', # FRENCH FRANC SIGN
0x20a4: ur'\textlira{}', # LIRA SIGN
0x20a6: ur'\textnaira{}', # NAIRA SIGN
0x20a9: ur'\textwon{}', # WON SIGN
0x20ab: ur'\textdong{}', # DONG SIGN
0x20ac: ur'\texteuro{}', # EURO SIGN
0x20b1: ur'\textpeso{}', # PESO SIGN
0x20b2: ur'\textguarani{}', # GUARANI SIGN
0x2103: ur'\textcelsius{}', # DEGREE CELSIUS
0x2116: ur'\textnumero{}', # NUMERO SIGN
0x2117: ur'\textcircledP{}', # SOUND RECORDING COYRIGHT
0x211e: ur'\textrecipe{}', # PRESCRIPTION TAKE
0x2120: ur'\textservicemark{}', # SERVICE MARK
0x2122: ur'\texttrademark{}', # TRADE MARK SIGN
0x2126: ur'\textohm{}', # OHM SIGN
0x2127: ur'\textmho{}', # INVERTED OHM SIGN
0x212e: ur'\textestimated{}', # ESTIMATED SYMBOL
0x2190: ur'\textleftarrow{}', # LEFTWARDS ARROW
0x2191: ur'\textuparrow{}', # UPWARDS ARROW
0x2192: ur'\textrightarrow{}', # RIGHTWARDS ARROW
0x2193: ur'\textdownarrow{}', # DOWNWARDS ARROW
0x2212: ur'\textminus{}', # MINUS SIGN
0x2217: ur'\textasteriskcentered{}', # ASTERISK OPERATOR
0x221a: ur'\textsurd{}', # SQUARE ROOT
0x2422: ur'\textblank{}', # BLANK SYMBOL
0x25e6: ur'\textopenbullet{}', # WHITE BULLET
0x25ef: ur'\textbigcircle{}', # LARGE CIRCLE
0x266a: ur'\textmusicalnote{}', # EIGHTH NOTE
0x26ad: ur'\textmarried{}', # MARRIAGE SYMBOL
0x26ae: ur'\textdivorced{}', # DIVORCE SYMBOL
0x27e8: ur'\textlangle{}', # MATHEMATICAL LEFT ANGLE BRACKET
0x27e9: ur'\textrangle{}', # MATHEMATICAL RIGHT ANGLE BRACKET
}
# TODO: greek alphabet ... ?
# see also LaTeX codec
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# and unimap.py from TeXML
# set up the translation table:
table = special_chars
# keep the underscore in citation references
if self.inside_citation_reference_label:
del(table[ord('_')])
# Workarounds for OT1 font-encoding
if self.font_encoding in ['OT1', '']:
# * out-of-order characters in cmtt
if self.literal:
# replace underscore by underlined blank,
# because this has correct width.
table[ord('_')] = u'\\underline{~}'
# the backslash doesn't work, so we use a mirrored slash.
# \reflectbox is provided by graphicx:
self.requirements['graphicx'] = self.graphicx_package
table[ord('\\')] = ur'\reflectbox{/}'
# * ``< | >`` come out as different chars (except for cmtt):
else:
table[ord('|')] = ur'\textbar{}'
table[ord('<')] = ur'\textless{}'
table[ord('>')] = ur'\textgreater{}'
if self.insert_non_breaking_blanks:
table[ord(' ')] = ur'~'
if self.literal:
# double quotes are 'active' in some languages
# TODO: use \textquotedbl if font encoding starts with T?
table[ord('"')] = self.babel.literal_double_quote
# Unicode chars:
table.update(unsupported_unicode_chars)
table.update(pifont_chars)
if not self.latex_encoding.startswith('utf8'):
table.update(unicode_chars)
table.update(textcomp_chars)
# Characters that require a feature/package to render
for ch in text:
if ord(ch) in pifont_chars:
self.requirements['pifont'] = '\\usepackage{pifont}'
if ord(ch) in textcomp_chars:
self.requirements['textcomp'] = PreambleCmds.textcomp
text = text.translate(table)
# Break up input ligatures
for char in separate_chars * 2:
# Do it twice ("* 2") because otherwise we would replace
# '---' by '-{}--'.
text = text.replace(char + char, char + '{}' + char)
# Literal line breaks (in address or literal blocks):
if self.insert_newline:
lines = text.split('\n')
# for blank lines, insert a protected space, to avoid
# ! LaTeX Error: There's no line here to end.
lines = [line + '~'*(not line.lstrip())
for line in lines[:-1]] + lines[-1:]
text = '\\\\\n'.join(lines)
if not self.literal:
text = self.babel.quote_quotes(text)
if self.literal and not self.insert_non_breaking_blanks:
# preserve runs of spaces but allow wrapping
text = text.replace(' ', ' ~')
if not self.latex_encoding.startswith('utf8'):
text = self.ensure_math(text)
return text
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, encode, and return attribute value text."""
return self.encode(whitespace.sub(' ', text))
# TODO: is this used anywhere? -> update (use template) or delete
## def astext(self):
## """Assemble document parts and return as string."""
## head = '\n'.join(self.head_prefix + self.stylesheet + self.head)
## body = ''.join(self.body_prefix + self.body + self.body_suffix)
## return head + '\n' + body
def is_inline(self, node):
"""Check whether a node represents an inline element"""
return isinstance(node.parent, nodes.TextElement)
def append_hypertargets(self, node):
"""Append hypertargets for all ids of `node`"""
# hypertarget places the anchor at the target's baseline,
# so we raise it explicitely
self.out.append('%\n'.join(['\\raisebox{1em}{\\hypertarget{%s}{}}' %
id for id in node['ids']]))
def ids_to_labels(self, node, set_anchor=True):
"""Return list of label definitions for all ids of `node`
If `set_anchor` is True, an anchor is set with \phantomsection.
"""
labels = ['\\label{%s}' % id for id in node.get('ids', [])]
if set_anchor and labels:
labels.insert(0, '\\phantomsection')
return labels
def push_output_collector(self, new_out):
self.out_stack.append(self.out)
self.out = new_out
def pop_output_collector(self):
self.out = self.out_stack.pop()
# Visitor methods
# ---------------
def visit_Text(self, node):
self.out.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_address(self, node):
self.visit_docinfo_item(node, 'address')
def depart_address(self, node):
self.depart_docinfo_item(node)
def visit_admonition(self, node):
self.fallbacks['admonition'] = PreambleCmds.admonition
if 'error' in node['classes']:
self.fallbacks['error'] = PreambleCmds.error
# strip the generic 'admonition' from the list of classes
node['classes'] = [cls for cls in node['classes']
if cls != 'admonition']
self.out.append('\n\\DUadmonition[%s]{\n' % ','.join(node['classes']))
def depart_admonition(self, node=None):
self.out.append('}\n')
def visit_author(self, node):
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
self.depart_docinfo_item(node)
def visit_authors(self, node):
# not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
pass
def visit_block_quote(self, node):
self.out.append( '%\n\\begin{quote}\n')
if node['classes']:
self.visit_inline(node)
def depart_block_quote(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '\n\\end{quote}\n')
def visit_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '%\n\\begin{list}{}{}\n' )
else:
self.out.append( '%\n\\begin{itemize}\n' )
def depart_bullet_list(self, node):
if self.is_toc_list:
self.out.append( '\n\\end{list}\n' )
else:
self.out.append( '\n\\end{itemize}\n' )
def visit_superscript(self, node):
self.out.append(r'\textsuperscript{')
if node['classes']:
self.visit_inline(node)
def depart_superscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_subscript(self, node):
self.out.append(r'\textsubscript{') # requires `fixltx2e`
if node['classes']:
self.visit_inline(node)
def depart_subscript(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_caption(self, node):
self.out.append( '\\caption{' )
def depart_caption(self, node):
self.out.append('}\n')
def visit_title_reference(self, node):
self.fallbacks['titlereference'] = PreambleCmds.titlereference
self.out.append(r'\DUroletitlereference{')
if node['classes']:
self.visit_inline(node)
def depart_title_reference(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.push_output_collector([])
else:
# TODO: do we need these?
## self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append(r'\begin{figure}[b]')
self.append_hypertargets(node)
def depart_citation(self, node):
if self._use_latex_citations:
label = self.out[0]
text = ''.join(self.out[1:])
self._bibitems.append([label, text])
self.pop_output_collector()
else:
self.out.append('\\end{figure}\n')
def visit_citation_reference(self, node):
if self._use_latex_citations:
if not self.inside_citation_reference_label:
self.out.append(r'\cite{')
self.inside_citation_reference_label = 1
else:
assert self.body[-1] in (' ', '\n'),\
'unexpected non-whitespace while in reference label'
del self.body[-1]
else:
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
self.out.append('\\hyperlink{%s}{[' % href)
def depart_citation_reference(self, node):
if self._use_latex_citations:
followup_citation = False
# check for a following citation separated by a space or newline
next_siblings = node.traverse(descend=0, siblings=1,
include_self=0)
if len(next_siblings) > 1:
next = next_siblings[0]
if (isinstance(next, nodes.Text) and
next.astext() in (' ', '\n')):
if next_siblings[1].__class__ == node.__class__:
followup_citation = True
if followup_citation:
self.out.append(',')
else:
self.out.append('}')
self.inside_citation_reference_label = False
else:
self.out.append(']}')
def visit_classifier(self, node):
self.out.append( '(\\textbf{' )
def depart_classifier(self, node):
self.out.append( '})\n' )
def visit_colspec(self, node):
self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
def visit_comment(self, node):
# Precede every line with a comment sign, wrap in newlines
self.out.append('\n%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
def depart_comment(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
def depart_contact(self, node):
self.depart_docinfo_item(node)
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_copyright(self, node):
self.visit_docinfo_item(node, 'copyright')
def depart_copyright(self, node):
self.depart_docinfo_item(node)
def visit_date(self, node):
self.visit_docinfo_item(node, 'date')
def depart_date(self, node):
self.depart_docinfo_item(node)
def visit_decoration(self, node):
# header and footer
pass
def depart_decoration(self, node):
pass
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.out.append('\n')
def visit_definition_list(self, node):
self.out.append( '%\n\\begin{description}\n' )
def depart_definition_list(self, node):
self.out.append( '\\end{description}\n' )
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_description(self, node):
self.out.append(' ')
def depart_description(self, node):
pass
def visit_docinfo(self, node):
self.push_output_collector(self.docinfo)
def depart_docinfo(self, node):
self.pop_output_collector()
# Some itmes (e.g. author) end up at other places
if self.docinfo:
# tabularx: automatic width of columns, no page breaks allowed.
self.requirements['tabularx'] = r'\usepackage{tabularx}'
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['docinfo'] = PreambleCmds.docinfo
#
self.docinfo.insert(0, '\n% Docinfo\n'
'\\begin{center}\n'
'\\begin{tabularx}{\\DUdocinfowidth}{lX}\n')
self.docinfo.append('\\end{tabularx}\n'
'\\end{center}\n')
def visit_docinfo_item(self, node, name):
if name == 'author':
self.pdfauthor.append(self.attval(node.astext()))
if self.use_latex_docinfo:
if name in ('author', 'organization', 'contact', 'address'):
# We attach these to the last author. If any of them precedes
# the first author, put them in a separate "author" group
# (in lack of better semantics).
if name == 'author' or not self.author_stack:
self.author_stack.append([])
if name == 'address': # newlines are meaningful
self.insert_newline = True
text = self.encode(node.astext())
self.insert_newline = False
else:
text = self.attval(node.astext())
self.author_stack[-1].append(text)
raise nodes.SkipNode
elif name == 'date':
self.date.append(self.attval(node.astext()))
raise nodes.SkipNode
self.out.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
self.insert_newline = 1
self.out.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
else:
self.context.append(' \\\\\n')
def depart_docinfo_item(self, node):
self.out.append(self.context.pop())
# for address we did set insert_newline
self.insert_newline = False
def visit_doctest_block(self, node):
self.visit_literal_block(node)
def depart_doctest_block(self, node):
self.depart_literal_block(node)
def visit_document(self, node):
# titled document?
if (self.use_latex_docinfo or len(node) and
isinstance(node[0], nodes.title)):
self.title_labels += self.ids_to_labels(node, set_anchor=False)
def depart_document(self, node):
# Complete header with information gained from walkabout
# * language setup
if (self.babel.otherlanguages or
self.babel.language not in ('', 'english')):
self.requirements['babel'] = self.babel()
# * conditional requirements (before style sheet)
self.requirements = self.requirements.sortedvalues()
# * coditional fallback definitions (after style sheet)
self.fallbacks = self.fallbacks.sortedvalues()
# * PDF properties
self.pdfsetup.append(PreambleCmds.linking % self.hyperref_options)
if self.pdfauthor:
authors = self.author_separator.join(self.pdfauthor)
self.pdfinfo.append(' pdfauthor={%s}' % authors)
if self.pdfinfo:
self.pdfsetup += [r'\hypersetup{'] + self.pdfinfo + ['}']
# Complete body
# * document title (with "use_latex_docinfo" also
# 'author', 'organization', 'contact', 'address' and 'date')
if self.title or (
self.use_latex_docinfo and (self.author_stack or self.date)):
# with the default template, titledata is written to the preamble
self.titledata.append('%%% Title Data')
# \title (empty \title prevents error with \maketitle)
if self.title:
self.title.insert(0, '\phantomsection%\n ')
title = [''.join(self.title)] + self.title_labels
if self.subtitle:
title += [r'\\ % subtitle',
r'\large{%s}' % ''.join(self.subtitle)
] + self.subtitle_labels
self.titledata.append(r'\title{%s}' % '%\n '.join(title))
# \author (empty \author prevents warning with \maketitle)
authors = ['\\\\\n'.join(author_entry)
for author_entry in self.author_stack]
self.titledata.append(r'\author{%s}' %
' \\and\n'.join(authors))
# \date (empty \date prevents defaulting to \today)
self.titledata.append(r'\date{%s}' % ', '.join(self.date))
# \maketitle in the body formats title with LaTeX
self.body_pre_docinfo.append('\\maketitle\n')
# * bibliography
# TODO insertion point of bibliography should be configurable.
if self._use_latex_citations and len(self._bibitems)>0:
if not self.bibtex:
widest_label = ''
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_','_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
(bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
self.bibtex[0])
self.out.append('\\bibliography{%s}\n' % self.bibtex[1])
# * make sure to generate a toc file if needed for local contents:
if 'minitoc' in self.requirements and not self.has_latex_toc:
self.out.append('\n\\faketableofcontents % for local ToCs\n')
def visit_emphasis(self, node):
self.out.append('\\emph{')
if node['classes']:
self.visit_inline(node)
def depart_emphasis(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_entry(self, node):
self.active_table.visit_entry()
# cell separation
# BUG: the following fails, with more than one multirow
# starting in the second column (or later) see
# ../../../test/functional/input/data/latex.txt
if self.active_table.get_entry_number() == 1:
# if the first row is a multirow, this actually is the second row.
# this gets hairy if rowspans follow each other.
if self.active_table.get_rowspan(0):
count = 0
while self.active_table.get_rowspan(count):
count += 1
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
else:
self.out.append(' & ')
# multirow, multicolumn
# IN WORK BUG TODO HACK continues here
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if 'morerows' in node and 'morecols' in node:
raise NotImplementedError('Cells that '
'span multiple rows *and* columns are not supported, sorry.')
if 'morerows' in node:
self.requirements['multirow'] = r'\usepackage{multirow}'
count = node['morerows'] + 1
self.active_table.set_rowspan(
self.active_table.get_entry_number()-1,count)
self.out.append('\\multirow{%d}{%s}{%%' %
(count,self.active_table.get_column_width()))
self.context.append('}')
elif 'morecols' in node:
# the vertical bar before column is missing if it is the first
# column. the one after always.
if self.active_table.get_entry_number() == 1:
bar1 = self.active_table.get_vertical_bar()
else:
bar1 = ''
count = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%sp{%s}%s}{' %
(count, bar1,
self.active_table.get_multicolumn_width(
self.active_table.get_entry_number(),
count),
self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
# header / not header
if isinstance(node.parent.parent, nodes.thead):
self.out.append('\\textbf{%')
self.context.append('}')
elif self.active_table.is_stub_column():
self.out.append('\\textbf{')
self.context.append('}')
else:
self.context.append('')
def depart_entry(self, node):
self.out.append(self.context.pop()) # header / not header
self.out.append(self.context.pop()) # multirow/column
# if following row is spanned from above.
if self.active_table.get_rowspan(self.active_table.get_entry_number()):
self.out.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.out.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ''
if 'suffix' in node:
enum_suffix = node['suffix']
enum_prefix = ''
if 'prefix' in node:
enum_prefix = node['prefix']
if self.compound_enumerators:
pref = ''
if self.section_prefix_for_enumerators and self.section_level:
for i in range(self.section_level):
pref += '%d.' % self._section_number[i]
pref = pref[:-1] + self.section_enumerator_separator
enum_prefix += pref
for ctype, cname in self._enumeration_counters:
enum_prefix += '\\%s{%s}.' % (ctype, cname)
enum_type = 'arabic'
if 'enumtype' in node:
enum_type = node['enumtype']
if enum_type in enum_style:
enum_type = enum_style[enum_type]
counter_name = 'listcnt%d' % len(self._enumeration_counters)
self._enumeration_counters.append((enum_type, counter_name))
# If we haven't used this counter name before, then create a
# new counter; otherwise, reset & reuse the old counter.
if len(self._enumeration_counters) > self._max_enumeration_counters:
self._max_enumeration_counters = len(self._enumeration_counters)
self.out.append('\\newcounter{%s}\n' % counter_name)
else:
self.out.append('\\setcounter{%s}{0}\n' % counter_name)
self.out.append('\\begin{list}{%s\\%s{%s}%s}\n' %
(enum_prefix,enum_type,counter_name,enum_suffix))
self.out.append('{\n')
self.out.append('\\usecounter{%s}\n' % counter_name)
# set start after usecounter, because it initializes to zero.
if 'start' in node:
self.out.append('\\addtocounter{%s}{%d}\n' %
(counter_name,node['start']-1))
## set rightmargin equal to leftmargin
self.out.append('\\setlength{\\rightmargin}{\\leftmargin}\n')
self.out.append('}\n')
def depart_enumerated_list(self, node):
self.out.append('\\end{list}\n')
self._enumeration_counters.pop()
def visit_field(self, node):
# real output is done in siblings: _argument, _body, _name
pass
def depart_field(self, node):
self.out.append('\n')
##self.out.append('%[depart_field]\n')
def visit_field_argument(self, node):
self.out.append('%[visit_field_argument]\n')
def depart_field_argument(self, node):
self.out.append('%[depart_field_argument]\n')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
if self.out is self.docinfo:
self.out.append(r'\\')
def visit_field_list(self, node):
if self.out is not self.docinfo:
self.fallbacks['fieldlist'] = PreambleCmds.fieldlist
self.out.append('%\n\\begin{DUfieldlist}\n')
def depart_field_list(self, node):
if self.out is not self.docinfo:
self.out.append('\\end{DUfieldlist}\n')
def visit_field_name(self, node):
if self.out is self.docinfo:
self.out.append('\\textbf{')
else:
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_field_name(self, node):
if self.out is self.docinfo:
self.out.append('}: &')
else:
self.out.append(':}]')
def visit_figure(self, node):
self.requirements['float_settings'] = PreambleCmds.float_settings
# ! the 'align' attribute should set "outer alignment" !
# For "inner alignment" use LaTeX default alignment (similar to HTML)
## if ('align' not in node.attributes or
## node.attributes['align'] == 'center'):
## align = '\n\\centering'
## align_end = ''
## else:
## # TODO non vertical space for other alignments.
## align = '\\begin{flush%s}' % node.attributes['align']
## align_end = '\\end{flush%s}' % node.attributes['align']
## self.out.append( '\\begin{figure}%s\n' % align )
## self.context.append( '%s\\end{figure}\n' % align_end )
self.out.append('\\begin{figure}')
if node.get('ids'):
self.out += ['\n'] + self.ids_to_labels(node)
def depart_figure(self, node):
self.out.append('\\end{figure}\n')
def visit_footer(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUfooter}{')
def depart_footer(self, node):
self.out.append('}')
self.requirements['~footer'] = ''.join(self.out)
self.pop_output_collector()
def visit_footnote(self, node):
try:
backref = node['backrefs'][0]
except IndexError:
backref = node['ids'][0] # no backref, use self-ref instead
if self.settings.figure_footnotes:
self.requirements['~fnt_floats'] = PreambleCmds.footnote_floats
self.out.append('\\begin{figure}[b]')
self.append_hypertargets(node)
if node.get('id') == node.get('name'): # explicite label
self.out += self.ids_to_labels(node)
elif self.docutils_footnotes:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
num,text = node.astext().split(None,1)
if self.settings.footnote_references == 'brackets':
num = '[%s]' % num
self.out.append('%%\n\\DUfootnotetext{%s}{%s}{%s}{' %
(node['ids'][0], backref, self.encode(num)))
if node['ids'] == node['names']:
self.out += self.ids_to_labels(node)
# mask newline to prevent spurious whitespace:
self.out.append('%')
## else: # TODO: "real" LaTeX \footnote{}s
def depart_footnote(self, node):
if self.figure_footnotes:
self.out.append('\\end{figure}\n')
else:
self.out.append('}\n')
def visit_footnote_reference(self, node):
href = ''
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
# if not self.docutils_footnotes:
# TODO: insert footnote content at (or near) this place
# print "footnote-ref to", node['refid']
# footnotes = (self.document.footnotes +
# self.document.autofootnotes +
# self.document.symbol_footnotes)
# for footnote in footnotes:
# # print footnote['ids']
# if node.get('refid', '') in footnote['ids']:
# print 'matches', footnote['ids']
format = self.settings.footnote_references
if format == 'brackets':
self.append_hypertargets(node)
self.out.append('\\hyperlink{%s}{[' % href)
self.context.append(']}')
else:
self.fallbacks['footnotes'] = PreambleCmds.footnotes
self.out.append(r'\DUfootnotemark{%s}{%s}{' %
(node['ids'][0], href))
self.context.append('}')
def depart_footnote_reference(self, node):
self.out.append(self.context.pop())
# footnote/citation label
def label_delim(self, node, bracket, superscript):
if isinstance(node.parent, nodes.footnote):
if not self.figure_footnotes:
raise nodes.SkipNode
if self.settings.footnote_references == 'brackets':
self.out.append(bracket)
else:
self.out.append(superscript)
else:
assert isinstance(node.parent, nodes.citation)
if not self._use_latex_citations:
self.out.append(bracket)
def visit_label(self, node):
"""footnote or citation label: in brackets or as superscript"""
self.label_delim(node, '[', '\\textsuperscript{')
def depart_label(self, node):
self.label_delim(node, ']', '}')
# elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_header(self, node):
self.push_output_collector([])
self.out.append(r'\newcommand{\DUheader}{')
def depart_header(self, node):
self.out.append('}')
self.requirements['~header'] = ''.join(self.out)
self.pop_output_collector()
def to_latex_length(self, length_str, pxunit='px'):
"""Convert `length_str` with rst lenght to LaTeX length
"""
match = re.match('(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
value, unit = match.groups()[:2]
# no unit or "DTP" points (called 'bp' in TeX):
if unit in ('', 'pt'):
length_str = '%sbp' % value
# percentage: relate to current line width
elif unit == '%':
length_str = '%.3f\\linewidth' % (float(value)/100.0)
elif (unit == 'px') and (pxunit != 'px'):
# length unit px not defined in some tex variants (e.g. XeTeX)
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['px'] = '\n\\DUprovidelength{%s}{1bp}\n' % pxunit
length_str = '%s%s' % (value, pxunit)
return length_str
def visit_image(self, node):
self.requirements['graphicx'] = self.graphicx_package
attrs = node.attributes
# Convert image URI to a local file path and add to dependency list
imagepath = urllib.url2pathname(attrs['uri']).replace('\\', '/')
self.settings.record_dependencies.add(imagepath)
# alignment defaults:
if not 'align' in attrs:
# Set default align of image in a figure to 'center'
if isinstance(node.parent, nodes.figure):
attrs['align'] = 'center'
# query 'align-*' class argument
for cls in node['classes']:
if cls.startswith('align-'):
attrs['align'] = cls.split('-')[1]
# pre- and postfix (prefix inserted in reverse order)
pre = []
post = []
include_graphics_options = []
display_style = ('block-', 'inline-')[self.is_inline(node)]
align_codes = {
# inline images: by default latex aligns the bottom.
'bottom': ('', ''),
'middle': (r'\raisebox{-0.5\height}{', '}'),
'top': (r'\raisebox{-\height}{', '}'),
# block level images:
'center': (r'\noindent\makebox[\textwidth][c]{', '}'),
'left': (r'\noindent{', r'\hfill}'),
'right': (r'\noindent{\hfill', '}'),}
if 'align' in attrs:
try:
align_code = align_codes[attrs['align']]
pre.append(align_code[0])
post.append(align_code[1])
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
include_graphics_options.append('height=%s' %
self.to_latex_length(attrs['height']))
if 'scale' in attrs:
include_graphics_options.append('scale=%f' %
(attrs['scale'] / 100.0))
if 'width' in attrs:
include_graphics_options.append('width=%s' %
self.to_latex_length(attrs['width']))
if not self.is_inline(node):
pre.append('\n')
post.append('\n')
pre.reverse()
self.out.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % (','.join(include_graphics_options))
self.out.append('\\includegraphics%s{%s}' % (options, imagepath))
self.out.extend(post)
def depart_image(self, node):
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
def visit_inline(self, node): # <span>, i.e. custom roles
# Make a copy to keep ``node['classes']`` True if a
# language argument is popped (used in conditional calls of
# depart_inline()):
classes = node['classes'][:]
self.context.append('}' * len(classes))
# handle language specification:
language_tags = [cls for cls in classes
if cls.startswith('language-')]
if language_tags:
language = self.babel.language_name(language_tags[0][9:])
if language:
self.babel.otherlanguages[language] = True
self.out.append(r'\otherlanguage{%s}{' % language)
classes.pop(classes.index(language_tags[0]))
if not classes:
return
# mark up for styling with custom macros
if 'align-center' in classes:
self.fallbacks['align-center'] = PreambleCmds.align_center
self.fallbacks['inline'] = PreambleCmds.inline
self.out += [r'\DUrole{%s}{' % cls for cls in classes]
def depart_inline(self, node):
self.out.append(self.context.pop())
def visit_interpreted(self, node):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self.visit_literal(node)
def depart_interpreted(self, node):
self.depart_literal(node)
def visit_legend(self, node):
self.fallbacks['legend'] = PreambleCmds.legend
self.out.append('\\begin{DUlegend}')
def depart_legend(self, node):
self.out.append('\\end{DUlegend}\n')
def visit_line(self, node):
self.out.append('\item[] ')
def depart_line(self, node):
self.out.append('\n')
def visit_line_block(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['lineblock'] = PreambleCmds.lineblock
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.out.append('\n\\begin{DUlineblock}{0em}\n')
if node['classes']:
self.visit_inline(node)
self.out.append('\n')
def depart_line_block(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('\n')
self.out.append('\\end{DUlineblock}\n')
def visit_list_item(self, node):
self.out.append('\n\\item ')
def depart_list_item(self, node):
pass
def visit_literal(self, node):
self.literal = True
self.out.append('\\texttt{')
if node['classes']:
self.visit_inline(node)
def depart_literal(self, node):
self.literal = False
if node['classes']:
self.depart_inline(node)
self.out.append('}')
# Literal blocks are used for '::'-prefixed literal-indented
# blocks of text, where the inline markup is not recognized,
# but are also the product of the "parsed-literal" directive,
# where the markup is respected.
#
# In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all
# the others we must use \mbox or \alltt.
#
# We can distinguish between the two kinds by the number of
# siblings that compose this node: if it is composed by a
# single element, it's either
# * a real one,
# * a parsed-literal that does not contain any markup, or
# * a parsed-literal containing just one markup construct.
def is_plaintext(self, node):
"""Check whether a node can be typeset verbatim"""
return (len(node) == 1) and isinstance(node[0], nodes.Text)
def visit_literal_block(self, node):
"""Render a literal block."""
# environments and packages to typeset literal blocks
packages = {'listing': r'\usepackage{moreverb}',
'lstlisting': r'\usepackage{listings}',
'Verbatim': r'\usepackage{fancyvrb}',
# 'verbatim': '',
'verbatimtab': r'\usepackage{moreverb}'}
if not self.active_table.is_open():
# no quote inside tables, to avoid vertical space between
# table border and literal block.
# BUG: fails if normal text preceeds the literal block.
self.out.append('%\n\\begin{quote}')
self.context.append('\n\\end{quote}\n')
else:
self.out.append('\n')
self.context.append('\n')
if self.literal_block_env != '' and self.is_plaintext(node):
self.requirements['literal_block'] = packages.get(
self.literal_block_env, '')
self.verbatim = True
self.out.append('\\begin{%s}%s\n' % (self.literal_block_env,
self.literal_block_options))
else:
self.literal = True
self.insert_newline = True
self.insert_non_breaking_blanks = True
self.out.append('{\\ttfamily \\raggedright \\noindent\n')
def depart_literal_block(self, node):
if self.verbatim:
self.out.append('\n\\end{%s}\n' % self.literal_block_env)
self.verbatim = False
else:
self.out.append('\n}')
self.insert_non_breaking_blanks = False
self.insert_newline = False
self.literal = False
self.out.append(self.context.pop())
## def visit_meta(self, node):
## self.out.append('[visit_meta]\n')
# TODO: set keywords for pdf?
# But:
# The reStructuredText "meta" directive creates a "pending" node,
# which contains knowledge that the embedded "meta" node can only
# be handled by HTML-compatible writers. The "pending" node is
# resolved by the docutils.transforms.components.Filter transform,
# which checks that the calling writer supports HTML; if it doesn't,
# the "pending" node (and enclosed "meta" node) is removed from the
# document.
# --- docutils/docs/peps/pep-0258.html#transformer
## def depart_meta(self, node):
## self.out.append('[depart_meta]\n')
def visit_math(self, node, math_env='$'):
"""math role"""
if node['classes']:
self.visit_inline(node)
self.requirements['amsmath'] = r'\usepackage{amsmath}'
math_code = node.astext().translate(unimathsymbols2tex.uni2tex_table)
if node.get('ids'):
math_code = '\n'.join([math_code] + self.ids_to_labels(node))
if math_env == '$':
wrapper = u'$%s$'
else:
wrapper = u'\n'.join(['%%',
r'\begin{%s}' % math_env,
'%s',
r'\end{%s}' % math_env])
# print repr(wrapper), repr(math_code)
self.out.append(wrapper % math_code)
if node['classes']:
self.depart_inline(node)
# Content already processed:
raise nodes.SkipNode
def depart_math(self, node):
pass # never reached
def visit_math_block(self, node):
math_env = pick_math_environment(node.astext())
self.visit_math(node, math_env=math_env)
def depart_math_block(self, node):
pass # never reached
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.out.append(', ')
def depart_option(self, node):
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""Append the delimiter betweeen an option and its argument to body."""
self.out.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.out.append('\n\\item[')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.out.append('] ')
def visit_option_list(self, node):
self.fallbacks['_providelength'] = PreambleCmds.providelength
self.fallbacks['optionlist'] = PreambleCmds.optionlist
self.out.append('%\n\\begin{DUoptionlist}\n')
def depart_option_list(self, node):
self.out.append('\n\\end{DUoptionlist}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
##self.out.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def depart_option_string(self, node):
##self.out.append('</span>')
pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
def depart_organization(self, node):
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
# insert blank line, if the paragraph is not first in a list item
# nor follows a non-paragraph node in a compound
index = node.parent.index(node)
if (index == 0 and (isinstance(node.parent, nodes.list_item) or
isinstance(node.parent, nodes.description))):
pass
elif (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
pass
else:
self.out.append('\n')
if node.get('ids'):
self.out += self.ids_to_labels(node) + ['\n']
if node['classes']:
self.visit_inline(node)
def depart_paragraph(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('\n')
def visit_problematic(self, node):
self.requirements['color'] = PreambleCmds.color
self.out.append('%\n')
self.append_hypertargets(node)
self.out.append(r'\hyperlink{%s}{\textbf{\color{red}' % node['refid'])
def depart_problematic(self, node):
self.out.append('}}')
def visit_raw(self, node):
if not 'latex' in node.get('format', '').split():
raise nodes.SkipNode
if not self.is_inline(node):
self.out.append('\n')
if node['classes']:
self.visit_inline(node)
# append "as-is" skipping any LaTeX-encoding
self.verbatim = True
def depart_raw(self, node):
self.verbatim = False
if node['classes']:
self.depart_inline(node)
if not self.is_inline(node):
self.out.append('\n')
def has_unbalanced_braces(self, string):
"""Test whether there are unmatched '{' or '}' characters."""
level = 0
for ch in string:
if ch == '{':
level += 1
if ch == '}':
level -= 1
if level < 0:
return True
return level != 0
def visit_reference(self, node):
# We need to escape #, \, and % if we use the URL in a command.
special_chars = {ord('#'): ur'\#',
ord('%'): ur'\%',
ord('\\'): ur'\\',
}
# external reference (URL)
if 'refuri' in node:
href = unicode(node['refuri']).translate(special_chars)
# problematic chars double caret and unbalanced braces:
if href.find('^^') != -1 or self.has_unbalanced_braces(href):
self.error(
'External link "%s" not supported by LaTeX.\n'
' (Must not contain "^^" or unbalanced braces.)' % href)
if node['refuri'] == node.astext():
self.out.append(r'\url{%s}' % href)
raise nodes.SkipNode
self.out.append(r'\href{%s}{' % href)
return
# internal reference
if 'refid' in node:
href = node['refid']
elif 'refname' in node:
href = self.document.nameids[node['refname']]
else:
raise AssertionError('Unknown reference.')
if not self.is_inline(node):
self.out.append('\n')
self.out.append('\\hyperref[%s]{' % href)
if self._reference_label:
self.out.append('\\%s{%s}}' %
(self._reference_label, href.replace('#', '')))
raise nodes.SkipNode
def depart_reference(self, node):
self.out.append('}')
if not self.is_inline(node):
self.out.append('\n')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
def depart_revision(self, node):
self.depart_docinfo_item(node)
def visit_section(self, node):
self.section_level += 1
# Initialize counter for potential subsections:
self._section_number.append(0)
# Counter for this section's level (initialized by parent section):
self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
# Remove counter for potential subsections:
self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['sidebar'] = PreambleCmds.sidebar
self.out.append('\n\\DUsidebar{\n')
def depart_sidebar(self, node):
self.out.append('}\n')
attribution_formats = {'dash': (u'—', ''), # EM DASH
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
def visit_attribution(self, node):
prefix, suffix = self.attribution_formats[self.settings.attribution]
self.out.append('\\nopagebreak\n\n\\raggedleft ')
self.out.append(prefix)
self.context.append(suffix)
def depart_attribution(self, node):
self.out.append(self.context.pop() + '\n')
def visit_status(self, node):
self.visit_docinfo_item(node, 'status')
def depart_status(self, node):
self.depart_docinfo_item(node)
def visit_strong(self, node):
self.out.append('\\textbf{')
if node['classes']:
self.visit_inline(node)
def depart_strong(self, node):
if node['classes']:
self.depart_inline(node)
self.out.append('}')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
self.unimplemented_visit(node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.push_output_collector(self.subtitle)
self.subtitle_labels += self.ids_to_labels(node, set_anchor=False)
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
self.d_class.section(self.section_level + 1))
else:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
self.out.append('\n\\DUsubtitle[%s]{' % node.parent.tagname)
def depart_subtitle(self, node):
if isinstance(node.parent, nodes.document):
self.pop_output_collector()
else:
self.out.append('}\n')
def visit_system_message(self, node):
self.requirements['color'] = PreambleCmds.color
self.fallbacks['title'] = PreambleCmds.title
node['classes'] = ['system-message']
self.visit_admonition(node)
self.out.append('\\DUtitle[system-message]{system-message}\n')
self.append_hypertargets(node)
try:
line = ', line~%s' % node['line']
except KeyError:
line = ''
self.out.append('\n\n{\color{red}%s/%s} in \\texttt{%s}%s\n' %
(node['type'], node['level'],
self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
else:
backrefs = ['\\hyperlink{%s}{%d}' % (href, i+1)
for (i, href) in enumerate(node['backrefs'])]
self.context.append('backrefs: ' + ' '.join(backrefs))
def depart_system_message(self, node):
self.out.append(self.context.pop())
self.depart_admonition()
def visit_table(self, node):
self.requirements['table'] = PreambleCmds.table
if self.active_table.is_open():
self.table_stack.append(self.active_table)
# nesting longtable does not work (e.g. 2007-04-18)
self.active_table = Table(self,'tabular',self.settings.table_style)
# A longtable moves before \paragraph and \subparagraph
# section titles if it immediately follows them:
if (self.active_table._latex_type == 'longtable' and
isinstance(node.parent, nodes.section) and
node.parent.index(node) == 1 and
self.d_class.section(self.section_level).find('paragraph') != -1):
self.out.append('\\leavevmode')
self.active_table.open()
for cls in node['classes']:
self.active_table.set_table_style(cls)
if self.active_table._table_style == 'booktabs':
self.requirements['booktabs'] = r'\usepackage{booktabs}'
self.push_output_collector([])
def depart_table(self, node):
# wrap content in the right environment:
content = self.out
self.pop_output_collector()
self.out.append('\n' + self.active_table.get_opening())
self.out += content
self.out.append(self.active_table.get_closing() + '\n')
self.active_table.close()
if len(self.table_stack)>0:
self.active_table = self.table_stack.pop()
else:
self.active_table.set_table_style(self.settings.table_style)
# Insert hyperlabel after (long)table, as
# other places (beginning, caption) result in LaTeX errors.
if node.get('ids'):
self.out += self.ids_to_labels(node, set_anchor=False) + ['\n']
def visit_target(self, node):
# Skip indirect targets:
if ('refuri' in node # external hyperlink
or 'refid' in node # resolved internal link
or 'refname' in node): # unresolved internal link
## self.out.append('%% %s\n' % node) # for debugging
return
self.out.append('%\n')
# do we need an anchor (\phantomsection)?
set_anchor = not(isinstance(node.parent, nodes.caption) or
isinstance(node.parent, nodes.title))
# TODO: where else can/must we omit the \phantomsection?
self.out += self.ids_to_labels(node, set_anchor)
def depart_target(self, node):
pass
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if not self.active_table.get('preamble written'):
self.visit_thead(None)
self.depart_thead(None)
def depart_tbody(self, node):
pass
def visit_term(self, node):
"""definition list term"""
# Commands with optional args inside an optional arg must be put
# in a group, e.g. ``\item[{\hyperref[label]{text}}]``.
self.out.append('\\item[{')
def depart_term(self, node):
# \leavevmode results in a line break if the
# term is followed by an item list.
self.out.append('}] \leavevmode ')
def visit_tgroup(self, node):
#self.out.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def depart_tgroup(self, node):
pass
_thead_depth = 0
def thead_depth (self):
return self._thead_depth
def visit_thead(self, node):
self._thead_depth += 1
if 1 == self.thead_depth():
self.out.append('{%s}\n' % self.active_table.get_colspecs())
self.active_table.set('preamble written',1)
self.out.append(self.active_table.get_caption())
self.out.extend(self.active_table.visit_thead())
def depart_thead(self, node):
if node is not None:
self.out.extend(self.active_table.depart_thead())
if self.active_table.need_recurse():
node.walkabout(self)
self._thead_depth -= 1
def bookmark(self, node):
"""Return label and pdfbookmark string for titles."""
result = ['']
if self.settings.sectnum_xform: # "starred" section cmd
# add to the toc and pdfbookmarks
section_name = self.d_class.section(max(self.section_level, 1))
section_title = self.encode(node.astext())
result.append(r'\addcontentsline{toc}{%s}{%s}' %
(section_name, section_title))
result += self.ids_to_labels(node.parent, set_anchor=False)
return '%\n '.join(result) + '%\n'
def visit_title(self, node):
"""Append section and other titles."""
# Document title
if node.parent.tagname == 'document':
self.push_output_collector(self.title)
self.context.append('')
self.pdfinfo.append(' pdftitle={%s},' %
self.encode(node.astext()))
# Topic titles (topic, admonition, sidebar)
elif (isinstance(node.parent, nodes.topic) or
isinstance(node.parent, nodes.admonition) or
isinstance(node.parent, nodes.sidebar)):
self.fallbacks['title'] = PreambleCmds.title
classes = ','.join(node.parent['classes'])
if not classes:
classes = node.tagname
self.out.append('\\DUtitle[%s]{' % classes)
self.context.append('}\n')
# Table caption
elif isinstance(node.parent, nodes.table):
self.push_output_collector(self.active_table.caption)
self.context.append('')
# Section title
else:
self.out.append('\n\n')
self.out.append('%' + '_' * 75)
self.out.append('\n\n')
#
section_name = self.d_class.section(self.section_level)
section_star = ''
pdfanchor = ''
# number sections?
if (self.settings.sectnum_xform # numbering by Docutils
or (self.section_level > len(self.d_class.sections))):
section_star = '*'
pdfanchor = '\\phantomsection%\n '
self.out.append(r'\%s%s{%s' %
(section_name, section_star, pdfanchor))
# System messages heading in red:
if ('system-messages' in node.parent['classes']):
self.requirements['color'] = PreambleCmds.color
self.out.append('\color{red}')
# label and ToC entry:
self.context.append(self.bookmark(node) + '}\n')
# MAYBE postfix paragraph and subparagraph with \leavemode to
# ensure floats stay in the section and text starts on a new line.
def depart_title(self, node):
self.out.append(self.context.pop())
if (isinstance(node.parent, nodes.table) or
node.parent.tagname == 'document'):
self.pop_output_collector()
def minitoc(self, node, title, depth):
"""Generate a local table of contents with LaTeX package minitoc"""
section_name = self.d_class.section(self.section_level)
# name-prefix for current section level
minitoc_names = {'part': 'part', 'chapter': 'mini'}
if 'chapter' not in self.d_class.sections:
minitoc_names['section'] = 'sect'
try:
minitoc_name = minitoc_names[section_name]
except KeyError: # minitoc only supports part- and toplevel
self.warn('Skipping local ToC at %s level.\n' % section_name +
' Feature not supported with option "use-latex-toc"',
base_node=node)
return
# Requirements/Setup
self.requirements['minitoc'] = PreambleCmds.minitoc
self.requirements['minitoc-'+minitoc_name] = (r'\do%stoc' %
minitoc_name)
# depth: (Docutils defaults to unlimited depth)
maxdepth = len(self.d_class.sections)
self.requirements['minitoc-%s-depth' % minitoc_name] = (
r'\mtcsetdepth{%stoc}{%d}' % (minitoc_name, maxdepth))
# Process 'depth' argument (!Docutils stores a relative depth while
# minitoc expects an absolute depth!):
offset = {'sect': 1, 'mini': 0, 'part': 0}
if 'chapter' in self.d_class.sections:
offset['part'] = -1
if depth:
self.out.append('\\setcounter{%stocdepth}{%d}' %
(minitoc_name, depth + offset[minitoc_name]))
# title:
self.out.append('\\mtcsettitle{%stoc}{%s}\n' % (minitoc_name, title))
# the toc-generating command:
self.out.append('\\%stoc\n' % minitoc_name)
def visit_topic(self, node):
# Topic nodes can be generic topic, abstract, dedication, or ToC.
# table of contents:
if 'contents' in node['classes']:
self.out.append('\n')
self.out += self.ids_to_labels(node)
# add contents to PDF bookmarks sidebar
if isinstance(node.next_node(), nodes.title):
self.out.append('\n\\pdfbookmark[%d]{%s}{%s}\n' %
(self.section_level+1,
node.next_node().astext(),
node.get('ids', ['contents'])[0]
))
if self.use_latex_toc:
title = ''
if isinstance(node.next_node(), nodes.title):
title = self.encode(node.pop(0).astext())
depth = node.get('depth', 0)
if 'local' in node['classes']:
self.minitoc(node, title, depth)
self.context.append('')
return
if depth:
self.out.append('\\setcounter{tocdepth}{%d}\n' % depth)
if title != 'Contents':
self.out.append('\\renewcommand{\\contentsname}{%s}\n' %
title)
self.out.append('\\tableofcontents\n\n')
self.has_latex_toc = True
else: # Docutils generated contents list
# set flag for visit_bullet_list() and visit_title()
self.is_toc_list = True
self.context.append('')
elif ('abstract' in node['classes'] and
self.settings.use_latex_abstract):
self.push_output_collector(self.abstract)
self.out.append('\\begin{abstract}')
self.context.append('\\end{abstract}\n')
if isinstance(node.next_node(), nodes.title):
node.pop(0) # LaTeX provides its own title
else:
self.fallbacks['topic'] = PreambleCmds.topic
# special topics:
if 'abstract' in node['classes']:
self.fallbacks['abstract'] = PreambleCmds.abstract
self.push_output_collector(self.abstract)
if 'dedication' in node['classes']:
self.fallbacks['dedication'] = PreambleCmds.dedication
self.push_output_collector(self.dedication)
self.out.append('\n\\DUtopic[%s]{\n' % ','.join(node['classes']))
self.context.append('}\n')
def depart_topic(self, node):
self.out.append(self.context.pop())
self.is_toc_list = False
if ('abstract' in node['classes'] or
'dedication' in node['classes']):
self.pop_output_collector()
def visit_rubric(self, node):
self.fallbacks['rubric'] = PreambleCmds.rubric
self.out.append('\n\\DUrubric{')
self.context.append('}\n')
def depart_rubric(self, node):
self.out.append(self.context.pop())
def visit_transition(self, node):
self.fallbacks['transition'] = PreambleCmds.transition
self.out.append('\n\n')
self.out.append('%' + '_' * 75 + '\n')
self.out.append(r'\DUtransition')
self.out.append('\n\n')
def depart_transition(self, node):
pass
def visit_version(self, node):
self.visit_docinfo_item(node, 'version')
def depart_version(self, node):
self.depart_docinfo_item(node)
def unimplemented_visit(self, node):
raise NotImplementedError('visiting unimplemented node type: %s' %
node.__class__.__name__)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
|
chirilo/remo
|
vendor-local/lib/python/docutils/writers/latex2e/__init__.py
|
Python
|
bsd-3-clause
| 120,566
|
[
"VisIt"
] |
61ad27d7598b91abfa21bb410612c7e5733d631ef309344762f89ee3e47c8a35
|
from builtins import object
import json
import os
import os.path as op
import time
import traceback
from abc import abstractmethod
from ..splunktacollectorlib.common import log as stulog
from ..splunktalib import kv_client as kvc
from ..splunktalib.common import util
def get_state_store(meta_configs,
appname,
collection_name="talib_states",
use_kv_store=False,
use_cache_file=True,
max_cache_seconds=5):
if util.is_true(use_kv_store):
# KV store based checkpoint
return StateStore(appname, meta_configs['server_uri'], meta_configs['session_key'], collection_name)
checkpoint_dir = meta_configs['checkpoint_dir']
if util.is_true(use_cache_file):
return CachedFileStateStore(appname, checkpoint_dir, max_cache_seconds)
return FileStateStore(appname, checkpoint_dir)
class BaseStateStore(object):
def __init__(self, app_name):
self._app_name = app_name
@abstractmethod
def update_state(self, key, states):
pass
@abstractmethod
def get_state(self, key):
pass
@abstractmethod
def delete_state(self, key):
pass
def close(self, key=None):
pass
class StateStore(BaseStateStore):
def __init__(self, app_name, server_uri, session_key, collection_name="talib_states"):
"""
:meta_configs: dict like and contains checkpoint_dir, session_key,
server_uri etc
:app_name: the name of the app
:collection_name: the collection name to be used.
Don"t use other method to visit the collection if you are using
StateStore to visit it.
"""
super(StateStore, self).__init__(app_name)
# State cache is a dict from _key to value
self._states_cache = {}
self._kv_client = None
self._collection = collection_name
self._kv_client = kvc.KVClient(
splunkd_host=server_uri,
session_key=session_key
)
kvc.create_collection(self._kv_client, self._collection, self._app_name)
self._load_states_cache()
def update_state(self, key, states):
"""
:state: Any JSON serializable
:return: None if successful, otherwise throws exception
"""
data = {'value': json.dumps(states)}
if key not in self._states_cache:
data['_key'] = key
self._kv_client.insert_collection_data(
collection=self._collection, data=data, app=self._app_name
)
else:
self._kv_client.update_collection_data(
collection=self._collection, key_id=key, data=data, app=self._app_name
)
self._states_cache[key] = states
def get_state(self, key=None):
if key:
return self._states_cache.get(key, None)
return self._states_cache
def delete_state(self, key=None):
if key:
self._delete_state(key)
else:
for key in list(self._states_cache.keys()):
self._delete_state(key)
def _delete_state(self, key):
if key not in self._states_cache:
return
self._kv_client.delete_collection_data(
self._collection, key, self._app_name)
del self._states_cache[key]
def _load_states_cache(self):
states = self._kv_client.get_collection_data(
self._collection, None, self._app_name)
if not states:
return
for state in states:
value = state['value'] if 'value' in state else state
key = state['_key']
try:
value = json.loads(value)
except Exception:
stulog.logger.warning(
'Unable to load state from cache, key=%s, error=%s',
key, traceback.format_exc())
pass
self._states_cache[key] = value
def _create_checkpoint_dir_if_needed(checkpoint_dir):
if os.path.isdir(checkpoint_dir):
return
stulog.logger.info(
"Checkpoint dir '%s' doesn't exist, try to create it",
checkpoint_dir)
try:
os.mkdir(checkpoint_dir)
except OSError:
stulog.logger.exception(
"Failure creating checkpoint dir '%s'", checkpoint_dir
)
raise Exception(
"Unable to create checkpoint dir '{}'".format(checkpoint_dir)
)
class FileStateStore(BaseStateStore):
def __init__(self, app_name, checkpoint_dir):
super(FileStateStore, self).__init__(app_name)
self._checkpoint_dir = checkpoint_dir
def _get_checkpoint_file(self, filename):
return op.join(self._checkpoint_dir, filename)
@staticmethod
def _remove_if_exist(filename):
if op.exists(filename):
os.remove(filename)
def update_state(self, key, states):
"""
:state: Any JSON serializable
:return: None if successful, otherwise throws exception
"""
_create_checkpoint_dir_if_needed(self._checkpoint_dir)
filename = self._get_checkpoint_file(key)
with open(filename + ".new", "w") as json_file:
json.dump(states, json_file)
self._remove_if_exist(filename)
os.rename(filename + ".new", filename)
def get_state(self, key):
filename = self._get_checkpoint_file(key)
if op.exists(filename):
with open(filename) as json_file:
state = json.load(json_file)
return state
else:
return None
def delete_state(self, key):
self._remove_if_exist(self._get_checkpoint_file(key))
class CachedFileStateStore(FileStateStore):
def __init__(self, app_name, checkpoint_dir, max_cache_seconds=5):
"""
:meta_configs: dict like and contains checkpoint_dir, session_key,
server_uri etc
"""
super(CachedFileStateStore, self).__init__(app_name, checkpoint_dir)
self._states_cache = {} # item: time, dict
self._states_cache_lmd = {} # item: time, dict
self.max_cache_seconds = max_cache_seconds
def update_state(self, key, states):
now = time.time()
if key in self._states_cache:
last = self._states_cache_lmd[key][0]
if now - last >= self.max_cache_seconds:
self._update_and_flush_state(now, key, states)
else:
self._update_and_flush_state(now, key, states)
self._states_cache[key] = (now, states)
def _update_and_flush_state(self, now, key, states):
"""
:state: Any JSON serializable
:return: None if successful, otherwise throws exception
"""
self._states_cache_lmd[key] = (now, states)
super(CachedFileStateStore, self).update_state(key, states)
def get_state(self, key):
if key in self._states_cache:
return self._states_cache[key][1]
filename = self._get_checkpoint_file(key)
if op.exists(filename):
with open(filename) as json_file:
state = json.load(json_file)
now = time.time()
self._states_cache[key] = now, state
self._states_cache_lmd[key] = now, state
return state
else:
return None
def delete_state(self, key):
super(CachedFileStateStore, self).delete_state(key)
if self._states_cache.get(key):
del self._states_cache[key]
if self._states_cache_lmd.get(key):
del self._states_cache_lmd[key]
def close(self, key=None):
if not key:
for k, (t, s) in self._states_cache.items():
self._update_and_flush_state(t, k, s)
self._states_cache.clear()
self._states_cache_lmd.clear()
elif key in self._states_cache:
self._update_and_flush_state(self._states_cache[key][0], key,
self._states_cache[key][1])
del self._states_cache[key]
del self._states_cache_lmd[key]
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks
|
Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/cloudconnectlib/splunktalib/state_store.py
|
Python
|
isc
| 8,194
|
[
"VisIt"
] |
ae9abab48b992ee71efc1110743c07972f66828d0b6172928ee27b082cfcff6d
|
# -*- coding:utf-8 -*-
"""
Provides different functions to analyse the neuronal network activity.
Synchronization indexes:
- Spike Train Synchrony (STS) index
- Membrane Potential Synchrony (MPS) index
- FFT peak
"""
import numpy as np
from model import PARAMETERS as ps
from scipy.misc import comb
from scipy.signal import resample
from scipy import argmax
from matplotlib.mlab import psd
from pylab import detrend_mean
from brian.stdunits import *
from brian.units import *
if ps:
PSIN = ps['Input']
TAU = PSIN['tau_Ein']
def sts(netw_act, spikes, neur_start, neur_stop, sig_start, time_start):
"""
Returns the STS index [1] for the given network activity.
Parameters
----------
netw_act : brian.StateMonitor.values
signal that represents the network activity in one variable
spikes : brian.SpikeMonitor
set of spikes during the simulation
neur_start : int
neuron index left border for the slice of neuron we want
neur_stop : int
neuron index right border
sig_start : int
index of the signal where we want to start (eg. for burnin)
References
----------
[1] Brunel & Wang, 2003
"""
cut_sig = netw_act[sig_start:]
# Then compute the autocorrelation at zero time.
autocorr = autocorr_zero(cut_sig)
# Finally, normalize it by nu*tau
nspikes = get_nspikes(spikes, time_start, neur_start, neur_stop)
if nspikes == 0:
return 0 # No spikes to compute STS, so return the minimum
else:
nu = nspikes/(spikes.clock.end - time_start)
return float(autocorr/(nu*TAU))
def autocorr_zero(signal):
"""Returns the autocorrelation of the signal at zero time."""
mean_sig = np.mean(signal)
return np.mean((signal - mean_sig)*(signal - mean_sig))
def get_nspikes(spikes, time_treshold, start, stop):
"""Returns the number of spikes, keeping only the last portion of the
simulation."""
spike_neurons = spikes.it[0]
spike_times = spikes.it[1]
neurons_mask = (spike_neurons >= start) & (spike_neurons < stop)
times_mask = (spike_times > time_treshold)
good_spikes = neurons_mask & times_mask
return good_spikes.sum()
def mps(memb_pot, start, stop, sig_start):
"""
Returns the MPS index [1] of the given network.
Parameters
----------
memb_pot : StateMonitor
Membrane potential for a whole category (eg. mitral) of neurons.
start, stop : int
indices of the first and last neuron to take
sig_start : int
index of the signal where we want to start
References
----------
[1] Brunel & Wang, 2003
"""
all_corr = np.corrcoef(memb_pot.values[start:stop, sig_start:])
nneur = stop - start
ncomb = comb(nneur, 2, exact=True)
assert ncomb > 0, \
"No mitral combination are possible, are you using 1 mitral?"
# Compute the sum of all neuron combinations, that is, get the lower
# triangle of all_corr without the diagonal (k=-1)
sum_comb_corr = np.tril(all_corr, k=-1).sum()
return sum_comb_corr/ncomb
def fftmax(signal, n_subpop, signal_dt, sig_start):
"""Return the peak in the FFT frequency of the signal values."""
res = {}
ntimes = int(len(signal.times[sig_start:]))
cut_signal = signal.values[:, sig_start:]
# Compute FFT for each subpopulation
for unit in xrange(n_subpop):
power, freqs = psd(cut_signal[unit], Fs=int(1/signal_dt),
NFFT=int(0.5/signal_dt), noverlap=int(0.25/signal_dt),
detrend=detrend_mean)
res[unit] = freqs[argmax(power)]
# Compute FFT for the whole population by the mean of activities
mean_signal = np.mean(cut_signal, axis=0)
power, freqs = psd(mean_signal, Fs=int(1/signal_dt),
NFFT=int(0.5/signal_dt), noverlap=int(0.25/signal_dt),
detrend=detrend_mean)
res['mean'] = freqs[argmax(power)]
return res
def crosscorr_phase_angle(sig1, sig2, x, max_length=10000):
"""Return the cross correlation phase angle between 2 signals
Parameters
----------
sig1 : array
signal of length L
sig2 : array
another signal of length L
x : array
time axis for the signals sig1 and sig2
max_length : int, optional
Maximum length for the signals, signals are resampled otherwise.
Default is 10 000.
"""
assert len(sig1) == len(sig2) == len(x), \
"The signals don't have the same length."
sig_length = len(sig1)
# Resample if signal is too big thus slowing down correlation computation
if sig_length > max_length:
sig1, x = resample(sig1, max_length, x)
sig2 = resample(sig2, max_length)
sig_length = max_length
corr = np.correlate(sig1, sig2, mode="same")
xmean = sig_length/2
return float(argmax(corr) - xmean)/sig_length*x[-1] # *x[-1] to scale
def peak_dist_index(sig1, sig2, xaxis=None):
"""Return the mean and std of peak-distances between the signals"""
peak_dist = get_dist(sig1, sig2, xaxis)
return np.mean(peak_dist), np.std(peak_dist)
def peak_dist_circ_index(sig1, sig2):
"""Return the *circular* mean and std of the distances between peaks"""
# Make the distances directional
peak_dists = get_directional_distances(sig1, sig2)
# Apply circular statistcs to the directional distances
return circ_mean(peak_dists), circ_disp(peak_dists)
def get_directional_distances(sig1, sig2):
"""Return the directional distances between peaks of `sig1` and `sig2`"""
first_sig, _ = get_ordered_sig([sig1, sig2])
n_peaks = len(get_ind_local_max(first_sig))
mean_peak_dist = len(first_sig)/n_peaks
peak_dists = np.array(get_dist(sig1, sig2))
peak_dists = peak_dists*2.*np.pi/mean_peak_dist
return peak_dists
def circ_disp(sig):
"""Statistical circular dispertion
References
----------
[1] http://cran.r-project.org/web/packages/CircStats/
"""
n = len(sig)
scos = np.sum(np.cos(sig))
ssin = np.sum(np.sin(sig))
root = np.sqrt(scos*scos + ssin*ssin)
rbar = root/n
var = 1 - rbar
return var
def circ_mean(sig):
"""Statistical circular mean
References
----------
[1] http://cran.r-project.org/web/packages/CircStats/
"""
sinr = np.sum(np.sin(sig))
cosr = np.sum(np.cos(sig))
return np.arctan2(sinr, cosr)
def get_dist(sig1, sig2, xaxis=None):
"""Return the distances between the peaks of two signals"""
distances = []
max_sig1 = get_ind_local_max(sig1)
max_sig2 = get_ind_local_max(sig2)
first_sig, second_sig = get_ordered_sig((max_sig1, max_sig2))
ind_peak_fs = 0
ind_peak_ss = 0
last_peak = False
while ind_peak_fs < len(first_sig) and ind_peak_ss < len(second_sig):
# Take the indexes of the peaks
peak_fs = first_sig[ind_peak_fs]
peak_ss = second_sig[ind_peak_ss]
if ind_peak_fs == len(first_sig) - 1: # if this is the last peak,
peak_fs_next = sig1[-1] # make up a last peak at the very end
last_peak = True
else:
peak_fs_next = first_sig[ind_peak_fs + 1]
# If an x-axis is given, get the peak x values from it
if xaxis is not None:
peak_fs = xaxis[peak_fs]
peak_fs_next = xaxis[peak_fs_next]
peak_ss = xaxis[peak_ss]
dist_intra_fs = peak_fs_next - peak_fs
dist_inter = peak_ss - peak_fs
# No SS peak in between two FS peaks
if dist_intra_fs < dist_inter and not last_peak:
ind_peak_fs += 1
# There is one or more SS peaks in between two FS peaks, or it's the last peak
else:
dist_left = peak_ss - peak_fs
dist_right = peak_ss - peak_fs_next
if abs(dist_left) < abs(dist_right):
distances.append(dist_left)
else:
distances.append(dist_right)
ind_peak_ss += 1
return distances
def get_ind_local_max(sig, treshold_ratio=0.25):
"""Return indexes of the local maxima of sig.
Parameters
----------
sig : list or np.array
signal to get the local maxima from
treshold_ratio : float
ratio relative to the distance median to keep the peaks,
e.g. if the distance median is 10 and the ratio is 0.25 then only
peak distances above 2.5 will be kept.
"""
peak_indexes = np.nonzero((np.diff(sig[:-1]) > 0) & (np.diff(sig[1:]) < 0))[0] + 1
peak_distances = np.diff(peak_indexes)
peak_dist_median = np.median(peak_distances)
suptreshold_peaks = (peak_distances > treshold_ratio*peak_dist_median)
return peak_indexes[suptreshold_peaks]
def get_ordered_sig(sig_list):
"""Return the signals ordered according to their first value"""
first_values = []
# Get the first values of each signal to sort them
for i, sig in enumerate(sig_list):
first_values.append((sig[0], i))
first_values.sort()
ordered_list = []
for fv in first_values:
sig = sig_list[fv[1]]
ordered_list.append(sig)
return ordered_list
def slope(x1, y1, x2, y2):
"""Return the slope between (x1, y1) and (x2, y2)."""
return (y2 - y1)/(x2 - x1)
def sign(val):
"""Return the sign of the value: -1 if negative, +1 if positive or null."""
if val < 0:
return -1
elif val >= 0:
return 1
|
neuro-lyon/multiglom-model
|
src/analysis.py
|
Python
|
mit
| 9,474
|
[
"Brian",
"NEURON"
] |
8406812f64e0d7bc693361232e3b0567faa3ba0593f35e5117d4bf1b0999a339
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Subpackage containing extraction methods for vasp parameters from vasp output.
Extaction objects are implemented as a mix and mash of bases classes. The
reason for this is we want to isolate functionality specific to DFT and GW,
and specific to reading *real* OUTCAR files and *database* OUTCAR files.
"""
__docformat__ = 'restructuredtext en'
__all__ = ['Extract', 'MassExtract']
from ...tools.extract import AbstractExtractBase
from .base import ExtractBase
from .mixin import IOMixin
from ...jobfolder import AbstractMassExtract
class Extract(AbstractExtractBase, IOMixin, ExtractBase):
""" Extracts DFT data from an OUTCAR. """
def __init__(self, directory=None, **kwargs):
""" Initializes extraction object.
:param directory:
Directory where the OUTCAR resides.
It may also be the path to an OUTCAR itself, if the file is not
actually called OUTCAR.
"""
from os.path import exists, isdir, basename, dirname
from ...misc import RelativePath
outcar = None
if directory is not None:
directory = RelativePath(directory).path
if exists(directory) and not isdir(directory):
outcar = basename(directory)
directory = dirname(directory)
AbstractExtractBase.__init__(self, directory)
ExtractBase.__init__(self)
IOMixin.__init__(self, directory, OUTCAR=outcar, **kwargs)
@property
def success(self):
""" True if calculation was successfull. """
return ExtractBase.success.__get__(self)
class MassExtract(AbstractMassExtract):
""" Extracts all Vasp calculations in directory and sub-directories.
Trolls through all subdirectories for vasp calculations, and organises
results as a dictionary where keys are the name of the diretories.
Usage is simply:
>>> from pylada.vasp import MassExtract
>>> a = MassExtract('path') # or nothing if path is current directory.
>>> a.success
{
'/some/path/': True,
'/some/other/path': True
}
>>> a.eigenvalues
"""
def __init__(self, path=None, **kwargs):
""" Initializes MassExtract.
:Parameters:
path : str or None
Root directory for which to investigate all subdirectories.
If None, uses current working directory.
kwargs : dict
Keyword parameters passed on to AbstractMassExtract.
:kwarg naked_end: True if should return value rather than dict when only one item.
:kwarg unix_re: converts regex patterns from unix-like expression.
"""
from os import getcwd
if path is None:
path = getcwd()
# this will throw on unknown kwargs arguments.
super(MassExtract, self).__init__(path=path, **kwargs)
def __iter_alljobs__(self):
""" Goes through all directories with an OUTVAR. """
from os import walk
from os.path import relpath, join
from . import Extract as VaspExtract
from ..relax import RelaxExtract
for dirpath, dirnames, filenames in walk(self.rootpath, topdown=True, followlinks=True):
if 'OUTCAR' not in filenames:
continue
if 'relax_cellshape' in dirnames or 'relax_ions' in dirnames:
dirnames[:] = [u for u in dirnames if u not in ['relax_cellshape', 'relax_ions']]
try:
result = RelaxExtract(join(self.rootpath, dirpath))
except:
try:
result = VaspExtract(join(self.rootpath, dirpath))
except:
continue
else:
try:
result = VaspExtract(join(self.rootpath, dirpath))
except:
continue
yield join('/', relpath(dirpath, self.rootpath)), result
def __copy__(self):
""" Returns a shallow copy. """
result = self.__class__(self.rootpath)
result.__dict__.update(self.__dict__)
return result
@property
def _attributes(self):
""" Returns __dir__ set special to the extraction itself. """
from . import Extract as VaspExtract
return list(set([u for u in dir(VaspExtract) if u[0] != '_'] + ['details']))
|
pylada/pylada-light
|
src/pylada/vasp/extract/__init__.py
|
Python
|
gpl-3.0
| 5,629
|
[
"CRYSTAL",
"VASP"
] |
80dac1f8243d2c27eba26331971004f368058ad93e421c840a7e4a2f54ad688d
|
# _
# |\ | / |_ _ ._ _
# | \| \/\/ \_ | | (/_ | | |
#
import json
def extract_basis_nwchem(data, name):
"""Extract atomic orbital, charge density fitting, or exchange
correlation functional basis data from a text region passed in as
data. The charge density fitting and exchange correlation functional
basis set data are employed for density functional calculations.
@param data: text region containing basis set data
@type data : str
@param name: name of basis type: "ao basis", "cd basis", or "xc basis"
@type name : str
@return: per-element basis set chunks
@rtype : list
"""
begin_marker = """BASIS "{0}" PRINT""".format(name)
end_marker = "END"
# search for the basis set data begin marker
# calling "upper" on data because original data has inconsistent
# capitalization
begin = data.upper().find(begin_marker.upper())
end = data.upper().find(end_marker, begin)
# No basis data found
if begin == -1:
return []
trimmed = data[begin + len(begin_marker): end - len(end_marker)].strip()
chunks = []
lines = []
# group lines of data delimited by #BASIS SET... into per-element chunks
for line in trimmed.split("\n"):
if line.upper().startswith("#BASIS SET"):
if lines:
chunks.append(lines)
lines = [line]
else:
lines.append(line)
# handle trailing chunk that is not followed by another #BASIS SET...
if lines and (not chunks or lines != chunks[-1]):
chunks.append(lines)
# join lines back into solid text blocks
chunks = ["\n".join(c) for c in chunks]
return chunks
def extract_ecp_nwchem(data):
"""Extract the effective core potential basis data from a text region
passed in as data.
@param data: text region containing ECP data
@type data : str
@return: per-element effective core potential chunks
@rtype : list
"""
ecp_begin_mark = "ECP\n"
ecp_end_mark = "END"
ecp_begin = data.upper().find(ecp_begin_mark)
ecp_end = data.upper().find(ecp_end_mark, ecp_begin)
ecp_region = ""
if ecp_begin > -1 and ecp_end > -1:
ecp_region = data[
ecp_begin +
len(ecp_begin_mark): ecp_end -
len(ecp_end_mark)].strip()
# No ECP data, so return empty list
else:
return []
chunks = []
lines = []
# group lines of data delimited by XX nelec YY into chunks, e.g.
# "Zn nelec 18" begins a zinc ECP
for line in ecp_region.split("\n"):
if line.lower().find(" nelec ") > -1:
if lines:
chunks.append(lines)
lines = [line]
else:
lines.append(line)
# handle trailing chunk that is not followed by another XX nelec YY..
if lines and (not chunks or lines != chunks[-1]):
chunks.append(lines)
# join lines back into solid text blocks
chunks = ["\n".join(c) for c in chunks]
return chunks
def unpack_nwchem_basis_block(data):
"""Unserialize a NWChem basis data block and extract components
@param data: a JSON of basis set data, perhaps containing many types
@type data : str
@return: unpacked data
@rtype : dict
"""
unpacked = json.loads(data)
return unpacked
def parse_basis_data_nwchem(data, name, description, elements, debug=True):
"""Parse the NWChem basis data raw html to get a nice tuple.
The data-pairs item is actually expected to be a 2 item list:
[symbol, data]
e.g. ["Ca", "#BASIS SET..."]
@param data: raw HTML from BSE
@type data : unicode
@param name: basis set name
@type name : str
@param des: basis set description
@type des : str
@param elements: element symbols e.g. ['H', 'C', 'N', 'O', 'Cl']
@type elements : list
@return: (name, description, data-pairs)
@rtype : tuple
"""
unused_elements = set([e.upper() for e in elements])
def extract_symbol(txt):
for sline in txt.split("\n"):
if not sline.startswith("#"):
try:
symbol = sline[:3].strip().split()[0]
return symbol
except IndexError:
continue
raise ValueError("Can't find element symbol in {0}".format(txt))
ao_chunks = extract_basis_nwchem(data, "ao basis")
cd_chunks = extract_basis_nwchem(data, "cd basis")
xc_chunks = extract_basis_nwchem(data, "xc basis")
ecp_chunks = extract_ecp_nwchem(data)
if not any([ao_chunks, cd_chunks, xc_chunks, ecp_chunks]):
str_ = "No basis set data found while attempting to process {0} ({1})"
raise ValueError(str_.format(name, description))
# Tag all used elements, whether from ordinary AO basis or ECP section
for chunk in ao_chunks + cd_chunks + xc_chunks + ecp_chunks:
try:
symbol = extract_symbol(chunk)
unused_elements.remove(symbol.upper())
except KeyError:
pass
if unused_elements:
msg = "Warning: elements {0} left over for {1}"
print msg.format(list(unused_elements), name)
# Form packed chunks, turn packed chunks into pairs
used_elements = set()
packed = {}
for cgroup, gname in [(ao_chunks, "ao basis"), (cd_chunks, "cd basis"),
(xc_chunks, "xc basis"), (ecp_chunks, "ecp")]:
for chunk in cgroup:
symbol = extract_symbol(chunk)
# Expand entry, e.g. add ecp data for Na after it has ao basis
try:
idx, ch = packed[symbol]
ch[gname] = chunk
chunk_dict = ch.copy()
# Create fresh entry, e.g. add Na with initial ao basis
except KeyError:
chunk_dict = {gname: chunk}
idx = len(used_elements)
used_elements.add(symbol)
packed[symbol] = (idx, chunk_dict)
"""
for chunk in ao_chunks:
symbol = extract_symbol(chunk)
chunk_dict = {"ao basis" : chunk}
idx = len(used_elements)
used_elements.add(symbol)
packed[symbol] = (idx, chunk_dict)
for chunk in ecp_chunks:
symbol = extract_symbol(chunk)
#add ECP data if existing chunk, else create fresh chunk
try:
idx, ch = packed[symbol]
ch["ecp"] = chunk
chunk_dict = ch.copy()
except KeyError:
chunk_dict = {"ecp" : chunk}
idx = len(used_elements)
used_elements.add(symbol)
packed[symbol] = (idx, chunk_dict)
"""
values = sorted(packed.values())
# Assign (Symbol, Serialized) to final pairs
pairs = []
for idx, chunk in values:
symbol = extract_symbol(chunk.get("ao basis")
or chunk.get("cd basis")
or chunk.get("xc basis")
or chunk.get("ecp"))
serialized = json.dumps(chunk)
pairs.append([symbol, serialized])
return [name, description, pairs]
def check_NWChem(str_type):
"""Check is the orbital type is handle by gamess"""
assert len(str_type) == 1
if str_type in "S P D".split():
return True
elif str_type > "I" or str_type in "K L M".split():
raise BaseException
else:
return True
|
TApplencourt/EMSL_Basis_Set_Exchange_Local
|
src/parser/nwchem.py
|
Python
|
mit
| 7,502
|
[
"GAMESS",
"NWChem"
] |
0f8058d4a11634381954620775aa2864fe5914efb96d6c00540e6d28913103bc
|
from __future__ import unicode_literals
from __future__ import absolute_import
from functools import reduce
import logging
from docker.errors import APIError
from .config import get_service_name_from_net, ConfigurationError
from .const import DEFAULT_TIMEOUT, LABEL_PROJECT, LABEL_SERVICE, LABEL_ONE_OFF
from .container import Container
from .legacy import check_for_legacy_containers
from .service import Service
from .utils import parallel_execute
log = logging.getLogger(__name__)
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def get_service_names(links):
return [link.split(':')[0] for link in links]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in service.get('volumes_from', []) or
name == get_service_name_from_net(service.get('net')))
]
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
else:
raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client):
self.name = name
self.services = services
self.client = client
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client)
for service_dict in sort_service_dicts(service_dicts):
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(Service(client=client, project=name, links=links, net=net,
volumes_from=volumes_from, **service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_name in service_dict.get('volumes_from', []):
try:
service = self.get_service(volume_name)
volumes_from.append(service)
except NoSuchService:
try:
container = Container.from_id(self.client, volume_name)
volumes_from.append(container)
except APIError:
raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name))
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
if 'net' in service_dict:
net_name = get_service_name_from_net(service_dict.get('net'))
if net_name:
try:
net = self.get_service(net_name)
except NoSuchService:
try:
net = Container.from_id(self.client, net_name)
except APIError:
raise ConfigurationError('Service "%s" is trying to use the network of "%s", which is not the name of a service or container.' % (service_dict['name'], net_name))
else:
net = service_dict['net']
del service_dict['net']
else:
net = None
return net
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.stop(**options),
msg_index=lambda c: c.name,
msg="Stopping"
)
def pause(self, service_names=None, **options):
for service in reversed(self.get_services(service_names)):
service.pause(**options)
def unpause(self, service_names=None, **options):
for service in self.get_services(service_names):
service.unpause(**options)
def kill(self, service_names=None, **options):
parallel_execute(
objects=self.containers(service_names),
obj_callable=lambda c: c.kill(**options),
msg_index=lambda c: c.name,
msg="Killing"
)
def remove_stopped(self, service_names=None, **options):
all_containers = self.containers(service_names, stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
parallel_execute(
objects=stopped_containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def restart(self, service_names=None, **options):
for service in self.get_services(service_names):
service.restart(**options)
def build(self, service_names=None, no_cache=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
allow_recreate=True,
force_recreate=False,
do_build=True,
timeout=DEFAULT_TIMEOUT):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(
services,
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
do_build=do_build,
timeout=timeout
)
]
def _get_convergence_plans(self,
services,
allow_recreate=True,
force_recreate=False):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action == 'recreate'
]
if updated_dependencies and allow_recreate:
log.debug(
'%s has upstream changes (%s)',
service.name, ", ".join(updated_dependencies),
)
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=True,
)
else:
plan = service.convergence_plan(
allow_recreate=allow_recreate,
force_recreate=force_recreate,
)
plans[service.name] = plan
return plans
def pull(self, service_names=None):
for service in self.get_services(service_names, include_deps=True):
service.pull()
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})])
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
if not containers:
check_for_legacy_containers(
self.client,
self.name,
self.service_names,
)
return filter(matches_service_names, containers)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class DependencyError(ConfigurationError):
pass
|
bbirand/compose
|
compose/project.py
|
Python
|
apache-2.0
| 12,598
|
[
"VisIt"
] |
b51fa6c974e996ac1c77ad0d9762a955d05d12e8e511e9a188567343fa57d6e9
|
# Copyright (C) 2014 The ESPResSo project
# Copyright (C) 2012,2013 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Check whether all features used in the code are defined
#
from __future__ import print_function
import sys, os, re, fileinput
sys.path.append(os.path.join(sys.path[0], '..', '..', 'config'))
import featuredefs
if len(sys.argv) < 3:
print("Usage: %s DEFFILE [FILE...]" % sys.argv[0])
exit(2)
print("Checking for completeness of features in test configurations...")
fdefs = featuredefs.defs(sys.argv[1])
featurefound = set()
featurere = re.compile('^#define (\w+)')
for line in fileinput.input(sys.argv[2:]):
res = featurere.match(line)
if res is not None:
feature = res.group(1)
featurefound.add(feature)
unused = fdefs.features.difference(featurefound)
unused = unused.difference(fdefs.notestfeatures)
if len(unused) > 0:
for feature in unused:
print("check_myconfig_complete: %s is not used" % feature)
else:
print("check_myconfig_complete: All features are used!")
|
olenz/espresso
|
config/check_myconfig_complete.py
|
Python
|
gpl-3.0
| 1,676
|
[
"ESPResSo"
] |
27607a105355a73c589c70ec69ed5fc695050f15ec0b859b7a36a01a940a24bc
|
# -*- coding: utf-8 -*-
"""
CyNetwork class is a simple wrapper for network-related cyREST raw REST API.
It does not hold the actual network data. It's a reference to a network in current Cytoscape session.
With CyNetwork API, you can access Cytoscape data objects in more Pythonista-friendly way.
"""
import json
import pandas as pd
import requests
from py2cytoscape.data.network_view import CyNetworkView
from ..util import util_networkx as nx_util
from ..util import dataframe as df_util
from . import BASE_URL, HEADERS
BASE_URL_NETWORK = BASE_URL + 'networks'
class CyNetwork(object):
"""
"""
def __init__(self, suid=None, session=None, url=None):
if pd.isnull(url):
raise ValueError("URL is missing.")
# Validate required argument
if pd.isnull(suid):
raise ValueError("SUID is missing.")
else:
self.__id = suid
self.__url = url + '/' + str(self.__id) + '/'
self.session = session if session is not None else requests.Session()
def get_id(self):
"""
Get session-unique ID of this network
:return: SUID as integer
"""
return self.__id
def to_json(self):
"""
Return this network in Cytoscape.js format.
:return: Cytoscape.js Style JSON as dictionary.
"""
return self.session.get(self.__url).json()
def to_networkx(self):
"""
Return this network in NetworkX graph object.
:return: Network as NetworkX graph object
"""
return nx_util.to_networkx(self.session.get(self.__url).json())
def to_dataframe(self):
"""
Return this network in pandas DataFrame.
:return: Network as DataFrame. This is equivalent to SIF.
"""
return df_util.to_dataframe(self.session.get(self.__url).json())
def get_nodes(self):
"""
Get all nodes as a list of SUIDs
:return:
"""
return self.session.get(self.__url + 'nodes').json()
def get_edges(self, format='suid'):
"""
Get edges as a json format.
:param format: Now the 'suid' format is only available.
:return: If the method's input is 'suid', then this method returns the edges with json format.
"""
if format is 'suid':
return self.session.get(self.__url + 'edges').json()
elif format is 'edgelist':
# TODO: implement this
pass
else:
raise ValueError(format + ' is not supported for edge format.')
def add_node(self, node_name, dataframe=False):
"""
Add a single node to the network.
:param node_name: the node name that you want to get.
:param dataframe: The default value is False. If True, return a pandas dataframe instead of a dict.
:return : A dict mapping names to SUIDs for the newly-created nodes. If node_name is 'None', the return is 'None'.
"""
if node_name is None:
return None
return self.add_nodes([node_name], dataframe=dataframe)
def add_nodes(self, node_name_list, dataframe=False):
"""
Add new nodes to the network
:param node_name_list: list of node names, e.g. ['a', 'b', 'c']
:param dataframe: If True, return a pandas dataframe instead of a dict.
:return: A dict mapping names to SUIDs for the newly-created nodes.
"""
res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS)
check_response(res)
nodes = res.json()
if dataframe:
return pd.DataFrame(nodes).set_index(['SUID'])
else:
return {node['name']: node['SUID'] for node in nodes}
def add_edge(self, source, target, interaction='-', directed=True, dataframe=True):
"""
Add a single edge from source to target.
:param source: String. This is the source node name.
:param target: String. This is the target node name.
:param interaction:
:param directed: You can choose this edge is directed or not. The default value is True.
:param dataframe: If dataframe is True (default), return a Pandas DataFrame.
If dataframe is False, return a list of dicts with keys 'SUID', 'source' and 'target'.
:return : If parameter:dataframe is True (default), return a Pandas DataFrame.
If parameter:dataframe is False, return a list of dicts with keys 'SUID', 'source' and 'target'.
"""
new_edge = {
'source': source,
'target': target,
'interaction': interaction,
'directed': directed
}
return self.add_edges([new_edge], dataframe=dataframe)
def add_edges(self, edge_list, dataframe=True):
"""
Add a all edges in edge_list.
:return: A data structure with Cytoscape SUIDs for the newly-created edges.
:param edge_list: List of (source, target, interaction) tuples *or*
list of dicts with 'source', 'target', 'interaction', 'direction' keys.
:param dataframe: If dataframe is True (default), return a Pandas DataFrame.
If dataframe is False, return a list of dicts with keys 'SUID', 'source' and 'target'.
:return : If parameter:dataframe is True (default), return a Pandas DataFrame.
If parameter:dataframe is False, return a list of dicts with keys 'SUID', 'source' and 'target'.
"""
# It might be nice to have an option pass a list of dicts instead of list of tuples
if not isinstance(edge_list[0], dict):
edge_list = [{'source': edge_tuple[0],
'target': edge_tuple[1],
'interaction': edge_tuple[2]}
for edge_tuple in edge_list]
res = self.session.post(self.__url + 'edges', data=json.dumps(edge_list), headers=HEADERS)
check_response(res)
edges = res.json()
if dataframe:
return pd.DataFrame(edges).set_index(['SUID'])
else:
return edges
def delete_node(self, id):
"""
Delete node.
:param id: the node id.
"""
url = self.__url + 'nodes/' + str(id)
self.session.delete(url)
def delete_edge(self, id):
"""
Delete edge.
:param id: the edge id.
"""
url = self.__url + 'edges/' + str(id)
self.session.delete(url)
def __get_table(self, type, format=None):
"""
This method return the table data. You can get node or edge table data by using this.
Cytoscape has two main data types: Network and Table.
Network is the graph topology, and Tables are properties for those graphs.
For simplicity, this method has access to three basic table objects.
:param type: If the value is 'node', this method return node table.
On the other hand, if the value is 'edge', this method return edge table.
:param format: You can choose data format from these: TSV,CSV,cytoscapejs.
If the value of format is None, dataframe or tsv, the return format is TSV.
If the value of format is csv, the return value is csv.
If the value of format is cytoscapejs, the return value is Cytoscape.js style JSON.
:return : If the value of format is None, dataframe or tsv, the return format is TSV.
If the value of format is csv, the return value is csv.
If the value of format is cytoscapejs, the return value is Cytoscape.js style JSON.
"""
url = self.__url + 'tables/default' + type
if format is None or format is 'dataframe':
uri = url + '.tsv'
return pd.read_csv(uri, sep='\t', index_col=0, header=0)
elif format is 'csv' or format is 'tsv':
return self.session.get(url + '.' + format).content
elif format is 'cytoscapejs':
return self.session.get(url).json()['rows']
else:
raise ValueError('Unsupported format: ' + format)
def get_node_table(self, format=None):
"""
Get node table.
Cytoscape has two main data types: Network and Table.
Network is the graph topology, and Tables are properties for those graphs.
For simplicity, this method has access to three basic node table objects.
:param format: You can choose data format from these: TSV,CSV,cytoscapejs.
If the value of format is None, dataframe or tsv, the return format is TSV.
If the value of format is csv, the return value is csv.
If the value of format is cytoscapejs, the return value is Cytoscape.js style JSON.
:return : If the value of format is None, dataframe or tsv, the return format is TSV.
If the value of format is csv, the return value is csv.
If the value of format is cytoscapejs, the return value is Cytoscape.js style JSON.
"""
return self.__get_table('node', format)
def get_edge_table(self, format=None):
"""
Get edge table.
Cytoscape has two main data types: Network and Table.
Network is the graph topology, and Tables are properties for those graphs.
For simplicity, this method has access to three basic edge table objects.
:param format: You can choose data format from these: TSV,CSV,cytoscapejs.
If the value of format is None, dataframe or tsv, the return format is TSV.
If the value of format is csv, the return value is csv.
If the value of format is cytoscapejs, the return value is Cytoscape.js style JSON.
:return : If the value of format is None, dataframe or tsv, the return format is TSV.
If the value of format is csv, the return value is csv.
If the value of format is cytoscapejs, the return value is Cytoscape.js style JSON.
"""
return self.__get_table('edge', format)
def get_network_table(self, format=None):
"""
Get network table.
Cytoscape has two main data types: Network and Table.
Network is the graph topology, and Tables are properties for those graphs.
For simplicity, this method has access to three basic network table objects.
:param format: You can choose data format from these: TSV,CSV,cytoscapejs.
If the value of format is None, dataframe or tsv, the return format is TSV.
If the value of format is csv, the return value is csv.
If the value of format is cytoscapejs, the return value is Cytoscape.js style JSON.
:return : If the value of format is None, dataframe or tsv, the return format is TSV.
If the value of format is csv, the return value is csv.
If the value of format is cytoscapejs, the return value is Cytoscape.js style JSON.
"""
return self.__get_table('network', format)
def __get_columns(self, type=None):
url = self.__url + 'tables/default' + type + '/columns'
df = pd.DataFrame(self.session.get(url).json())
return df.set_index(['name'])
def get_node_columns(self):
"""
Get node table columns information as DataFrame
:return: Node columns information ad DataFrame
"""
return self.__get_columns('node')
def get_edge_columns(self):
"""
Get edge table columns information as DataFrame
:return: Edge columns information ad DataFrame
"""
return self.__get_columns('edge')
def get_network_columns(self):
"""
Get network table columns information as DataFrame
:return: Network columns information as DataFrame
"""
return self.__get_columns('networks')
def __get_column(self, type=None, column=None):
url = self.__url + 'tables/default' + type + '/columns/' + column
result = self.session.get(url).json()
return pd.Series(result['values'])
def get_node_column(self, column):
"""
Get node table column information as DataFrame
:param column: you can choose column that you want to get.
:return : Node column imformation as DataFrame
"""
return self.__get_column('node', column=column)
def get_edge_column(self, column):
"""
Get edge table column information as DataFrame
:param column: you can choose column that you want to get.
:return : edge column imformation as DataFrame
"""
return self.__get_column('edge', column=column)
def __get_value(self, type=None, id=None, column=None):
if column is None and id is not None:
# Extract a row in table
url = self.__url + 'tables/default' + type + '/rows/' + str(id)
return pd.Series(self.session.get(url).json())
elif column is not None and id is not None:
url = self.__url + 'tables/default' + type + '/rows/' + str(id) + '/' + column
return self.session.get(url).content
else:
raise ValueError('ID is required.')
def get_node_value(self, id, column=None):
"""
Get node value information.
:param id: the node id.
:param column: input column that you want to get. If you input value in this parameter,
you can get all columns' information about node.
:return : node value information
"""
return self.__get_value(type='node', id=id, column=column)
def get_edge_value(self, id, column=None):
"""
Get edge value information.
:param id: the edge id.
:param column: input column that you want to get. If you input value in this parameter,
you can get all columns' information about edge.
:return : edge value information
"""
return self.__get_value(type='edge', id=id, column=column)
def get_network_value(self, column):
"""
Get network value information.
:param column: input column that you want to get. If you input value in this parameter,
you can get all columns' information about network.
:return : network value information
"""
return self.__get_value(type='network', id=self.__id, column=column)
def update_node_table(self, df=None, network_key_col='name',
data_key_col=None):
"""
We can update node table by using this method.
:param df: pandas data frame.
:param network_key_col: This is the network key column name. This parameter is used to update existing data with adding data.
:param data_key_col: This is the adding data key column name. This parameter is used to update existing data with adding data.
:return :
"""
return self.__update_table('node', df=df, network_key_col=network_key_col, data_key_col=data_key_col)
def __update_table(self, type, df, network_key_col='name',
data_key_col=None):
is_index_col = False
if data_key_col is None:
# Use index
data_key = network_key_col
is_index_col = True
else:
data_key = data_key_col
table = {
'key': network_key_col,
'dataKey': data_key
}
if is_index_col:
# Use DataFrame's index as the mapping key
df2 = pd.DataFrame(df)
df2[network_key_col] = df.index
data = df2.to_json(orient='records')
del df2
else:
data = df.to_json(orient='records')
table['data'] = json.loads(data)
url = self.__url + 'tables/default' + type
self.session.put(url, json=table, headers=HEADERS)
def __delete_column(self, type, column):
url = self.__url + 'tables/default' + type + '/columns/' + column
self.session.delete(url)
def delete_node_table_column(self, column):
"""
Delete node table column that you want to delete.
:param column: the column that you want to delete.
"""
self.__delete_column('node', column=column)
def delete_edge_table_column(self, column):
"""
Delete edge table column that you want to delete.
:param column: the column that you want to delete.
"""
self.__delete_column('edge', column=column)
def delete_network_table_column(self, column):
"""
Delete network table column that you want to delete.
:param column: the column that you want to delete.
"""
self.__delete_column('network', column=column)
def __create_column(self, type, name, data_type, immutable, list):
url = self.__url + 'tables/default' + type + '/columns'
new_column = {
'name': name,
'type': data_type,
'immutable': immutable,
'list': list
}
self.session.post(url, data=json.dumps(new_column), headers=HEADERS)
def create_node_column(self, name, data_type='String', is_immutable=False, is_list=False):
"""
Create new node column.
:param name: This is the column name.
:param data_type: This is the column data type. The default value is 'String'. If you want to change type, you put type.
:param is_immutable: The default value is 'False'. If you want to set this clumn's value as immutable, you input 'True' in this parameter.
:param is_list: The default value is 'False'
"""
self.__create_column('node', name=name, data_type=data_type, immutable=is_immutable, list=is_list)
def create_edge_column(self, name, data_type='String', is_immutable=False, is_list=False):
"""
Create new edge column.
:param name: This is the column name.
:param data_type: This is the column data type. The default value is 'String'. If you want to change type, you put type.
:param is_immutable: The default value is 'False'. If you want to set this clumn's value as immutable, you input 'True' in this parameter.
:param is_list: The default value is 'False'
"""
self.__create_column('edge', name=name, data_type=data_type, immutable=is_immutable, list=is_list)
def create_network_column(self, name, data_type='String', is_immutable=False, is_list=False):
"""
Create new network column.
:param name: This is the column name.
:param data_type: This is the column data type. The default value is 'String'. If you want to change type, you put type.
:param is_immutable: The default value is 'False'. If you want to set this clumn's value as immutable, you input 'True' in this parameter.
:param is_list: The default value is 'False'
"""
self.__create_column('network', name=name, data_type=data_type, immutable=is_immutable, list=is_list)
# Utility functions
def get_neighbours(self, node_id):
"""
Get the node's neighbours.
:param node_id: the node id that you want to focus.
:return : the json value of neighbors' node.
"""
url = self.__url + 'nodes/' + str(node_id) + '/neighbors'
return self.session.get(url).json()
def get_adjacent_edges(self, node_id):
"""
Get node's adjacent edges that you want to get.
:param node_id: the node id that you want to focus.
:return :the json value of adjacent edges.
"""
url = self.__url + 'nodes/' + str(node_id) + '/adjEdges'
return self.session.get(url).json()
# Views
def get_views(self):
"""
Get views as a list of SUIDs
:return:
"""
url = self.__url + 'views'
return self.session.get(url).json()
def get_png(self, height=1200):
"""
Get the graph as png image.
:param height: The default height is 1200.
:return : The object image of png.
"""
url = self.__url + 'views/first.png?h=' + str(height)
return self.session.get(url).content
def get_svg(self, height=1200):
"""
Get the graph as svg image.
:param height: The default height is 1200.
:return : The object image of svg.
"""
url = self.__url + 'views/first.svg?h=' + str(height)
return self.session.get(url).content
def get_pdf(self):
"""
Get the graph as pdf image.
:return : The object image of png.
"""
url = self.__url + 'views/first.pdf'
return self.session.get(url).content
def get_first_view(self, format='json'):
"""
Get a first view model as dict
:return:
"""
url = self.__url + 'views/first'
return self.session.get(url).json()
def get_view(self, view_id, format='json'):
"""
:param view_id:
:param format:
:return :
"""
if format is 'json':
url = self.__url + 'views/' + str(view_id)
return self.session.get(url).json()
elif format is 'view':
return self.__get_view_object(view_id)
else:
return None
def __get_view_object(self, view_id):
"""
Create a new CyNetworkView object for the given ID.
:param view_id:
:return:
"""
view = CyNetworkView(self, view_id)
return view
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__id == other.__id
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def check_response(res):
"""
Check HTTP response and raise exception if response is not OK.
:param res:
"""
try:
res.raise_for_status() # ALternative is res.ok
except Exception as exc:
# Bad response code, e.g. if adding an edge with nodes that doesn't exist
try:
err_info = res.json()
err_msg = err_info['message'] # or 'localizeMessage'
except ValueError:
err_msg = res.text[:40] # Take the first 40 chars of the response
except KeyError:
err_msg = res.text[:40] + ("(No 'message' in err_info dict: %s"
% list(err_info.keys()))
exc.args += (err_msg,)
raise exc
|
idekerlab/cyrest-examples
|
py2cytoscape_doc/py2cytoscape/data/cynetwork.py
|
Python
|
mit
| 22,908
|
[
"Cytoscape"
] |
6a5235f348979ab7e9a8d36647e7a7f53bffeb8977ad3c6a98ddbcda74458c23
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate impulse responses for several type of filters."""
import math
import tensorflow.compat.v2 as tf
def amplitude(filters: tf.Tensor, i: int) -> float:
return tf.math.abs(tf.signal.fftshift(tf.signal.fft(filters[:, i])))
def gabor_impulse_response(t: tf.Tensor, center: tf.Tensor,
fwhm: tf.Tensor) -> tf.Tensor:
"""Computes the gabor impulse response."""
denominator = 1.0 / (tf.math.sqrt(2.0 * math.pi) * fwhm)
gaussian = tf.exp(tf.tensordot(1.0 / (2. * fwhm**2), -t**2, axes=0))
center_frequency_complex = tf.cast(center, tf.complex64)
t_complex = tf.cast(t, tf.complex64)
sinusoid = tf.math.exp(
1j * tf.tensordot(center_frequency_complex, t_complex, axes=0))
denominator = tf.cast(denominator, dtype=tf.complex64)[:, tf.newaxis]
gaussian = tf.cast(gaussian, dtype=tf.complex64)
return denominator * sinusoid * gaussian
def gabor_filters(kernel, size: int = 401) -> tf.Tensor:
"""Computes the gabor filters from its parameters for a given size.
Args:
kernel: tf.Tensor<float>[filters, 2] the parameters of the Gabor kernels.
size: the size of the output tensor.
Returns:
A tf.Tensor<float>[filters, size].
"""
return gabor_impulse_response(
tf.range(-(size // 2), (size + 1) // 2, dtype=tf.float32),
center=kernel[:, 0], fwhm=kernel[:, 1])
def sinc_impulse_response(t: tf.Tensor, frequency: tf.Tensor) -> tf.Tensor:
"""Computes the sinc impulse response."""
return tf.sin(2*math.pi*frequency*t) / (2*math.pi*frequency*t)
def sinc_filters(cutoff_freq_low: tf.Tensor,
cutoff_freq_high: tf.Tensor,
size: int = 401,
sample_rate: int = 16000) -> tf.Tensor:
"""Computes the sinc filters from its parameters for a given size.
Sinc is not defined in zero so we need to separately compute negative
(left_range) and positive part (right_range).
Args:
cutoff_freq_low: tf.Tensor<float>[1, filters] the lower cutoff frequencies
of the bandpass.
cutoff_freq_high: tf.Tensor<float>[1, filters] the upper cutoff frequencies
of the bandpass.
size: the size of the output tensor.
sample_rate: audio sampling rate
Returns:
A tf.Tensor<float>[size, filters].
"""
left_range = tf.range(
-(size // 2), 0, dtype=tf.float32)[:, tf.newaxis] / tf.cast(
sample_rate, dtype=tf.float32)
right_range = tf.range(
1, size // 2 + 1, dtype=tf.float32)[:, tf.newaxis] / tf.cast(
sample_rate, dtype=tf.float32)
high_pass_left_range = 2 * cutoff_freq_high * sinc_impulse_response(
left_range, cutoff_freq_high)
high_pass_right_range = 2 * cutoff_freq_high * sinc_impulse_response(
right_range, cutoff_freq_high)
low_pass_left_range = 2 * cutoff_freq_low * sinc_impulse_response(
left_range, cutoff_freq_low)
low_pass_right_range = 2 * cutoff_freq_low * sinc_impulse_response(
right_range, cutoff_freq_low)
high_pass = tf.concat(
[high_pass_left_range, 2 * cutoff_freq_high, high_pass_right_range],
axis=0)
low_pass = tf.concat(
[low_pass_left_range, 2 * cutoff_freq_low, low_pass_right_range], axis=0)
band_pass = high_pass - low_pass
return band_pass / tf.reduce_max(band_pass, axis=0, keepdims=True)
def gaussian_lowpass(sigma: tf.Tensor, filter_size: int):
"""Generates gaussian windows centered in zero, of std sigma.
Args:
sigma: tf.Tensor<float>[1, 1, C, 1] for C filters.
filter_size: length of the filter.
Returns:
A tf.Tensor<float>[1, filter_size, C, 1].
"""
sigma = tf.clip_by_value(
sigma, clip_value_min=(2. / filter_size), clip_value_max=0.5)
t = tf.range(0, filter_size, dtype=tf.float32)
t = tf.reshape(t, (1, filter_size, 1, 1))
numerator = t - 0.5 * (filter_size - 1)
denominator = sigma * 0.5 * (filter_size - 1)
return tf.math.exp(-0.5 * (numerator / denominator)**2)
|
google-research/leaf-audio
|
leaf_audio/impulse_responses.py
|
Python
|
apache-2.0
| 4,492
|
[
"Gaussian"
] |
131ebb18e0a68919ce538c59b8283827210801910a45d550dbbe35fee70273f3
|
# -*- coding: utf-8 -*-
"""
.. _ex-sim-raw-sub:
=======================================
Simulate raw data using subject anatomy
=======================================
This example illustrates how to generate source estimates and simulate raw data
using subject anatomy with the :class:`mne.simulation.SourceSimulator` class.
Once the raw data is simulated, generated source estimates are reconstructed
using dynamic statistical parametric mapping (dSPM) inverse operator.
"""
# Author: Ivana Kojcic <ivana.kojcic@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Kostiantyn Maksymenko <kostiantyn.maksymenko@gmail.com>
# Samuel Deslauriers-Gauthier <sam.deslauriers@gmail.com>
# License: BSD-3-Clause
# %%
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
print(__doc__)
# In this example, raw data will be simulated for the sample subject, so its
# information needs to be loaded. This step will download the data if it not
# already on your machine. Subjects directory is also set so it doesn't need
# to be given to functions.
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'sample'
meg_path = op.join(data_path, 'MEG', subject)
# First, we get an info structure from the sample subject.
fname_info = op.join(meg_path, 'sample_audvis_raw.fif')
info = mne.io.read_info(fname_info)
tstep = 1 / info['sfreq']
# To simulate sources, we also need a source space. It can be obtained from the
# forward solution of the sample subject.
fwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
src = fwd['src']
# To simulate raw data, we need to define when the activity occurs using events
# matrix and specify the IDs of each event.
# Noise covariance matrix also needs to be defined.
# Here, both are loaded from the sample dataset, but they can also be specified
# by the user.
fname_event = op.join(meg_path, 'sample_audvis_raw-eve.fif')
fname_cov = op.join(meg_path, 'sample_audvis-cov.fif')
events = mne.read_events(fname_event)
noise_cov = mne.read_cov(fname_cov)
# Standard sample event IDs. These values will correspond to the third column
# in the events matrix.
event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'button': 32}
# Take only a few events for speed
events = events[:80]
# %%
# In order to simulate source time courses, labels of desired active regions
# need to be specified for each of the 4 simulation conditions.
# Make a dictionary that maps conditions to activation strengths within
# aparc.a2009s :footcite:`DestrieuxEtAl2010` labels.
# In the aparc.a2009s parcellation:
#
# - 'G_temp_sup-G_T_transv' is the label for primary auditory area
# - 'S_calcarine' is the label for primary visual area
#
# In each of the 4 conditions, only the primary area is activated. This means
# that during the activations of auditory areas, there are no activations in
# visual areas and vice versa.
# Moreover, for each condition, contralateral region is more active (here, 2
# times more) than the ipsilateral.
activations = {
'auditory/left':
[('G_temp_sup-G_T_transv-lh', 30), # label, activation (nAm)
('G_temp_sup-G_T_transv-rh', 60)],
'auditory/right':
[('G_temp_sup-G_T_transv-lh', 60),
('G_temp_sup-G_T_transv-rh', 30)],
'visual/left':
[('S_calcarine-lh', 30),
('S_calcarine-rh', 60)],
'visual/right':
[('S_calcarine-lh', 60),
('S_calcarine-rh', 30)],
}
annot = 'aparc.a2009s'
# Load the 4 necessary label names.
label_names = sorted(set(activation[0]
for activation_list in activations.values()
for activation in activation_list))
region_names = list(activations.keys())
# %%
# Create simulated source activity
# --------------------------------
#
# Generate source time courses for each region. In this example, we want to
# simulate source activity for a single condition at a time. Therefore, each
# evoked response will be parametrized by latency and duration.
def data_fun(times, latency, duration):
"""Function to generate source time courses for evoked responses,
parametrized by latency and duration."""
f = 15 # oscillating frequency, beta band [Hz]
sigma = 0.375 * duration
sinusoid = np.sin(2 * np.pi * f * (times - latency))
gf = np.exp(- (times - latency - (sigma / 4.) * rng.rand(1)) ** 2 /
(2 * (sigma ** 2)))
return 1e-9 * sinusoid * gf
# %%
# Here, :class:`~mne.simulation.SourceSimulator` is used, which allows to
# specify where (label), what (source_time_series), and when (events) event
# type will occur.
#
# We will add data for 4 areas, each of which contains 2 labels. Since add_data
# method accepts 1 label per call, it will be called 2 times per area.
#
# Evoked responses are generated such that the main component peaks at 100ms
# with a duration of around 30ms, which first appears in the contralateral
# cortex. This is followed by a response in the ipsilateral cortex with a peak
# about 15ms after. The amplitude of the activations will be 2 times higher in
# the contralateral region, as explained before.
#
# When the activity occurs is defined using events. In this case, they are
# taken from the original raw data. The first column is the sample of the
# event, the second is not used. The third one is the event id, which is
# different for each of the 4 areas.
times = np.arange(150, dtype=np.float64) / info['sfreq']
duration = 0.03
rng = np.random.RandomState(7)
source_simulator = mne.simulation.SourceSimulator(src, tstep=tstep)
for region_id, region_name in enumerate(region_names, 1):
events_tmp = events[np.where(events[:, 2] == region_id)[0], :]
for i in range(2):
label_name = activations[region_name][i][0]
label_tmp = mne.read_labels_from_annot(subject, annot,
subjects_dir=subjects_dir,
regexp=label_name,
verbose=False)
label_tmp = label_tmp[0]
amplitude_tmp = activations[region_name][i][1]
if region_name.split('/')[1][0] == label_tmp.hemi[0]:
latency_tmp = 0.115
else:
latency_tmp = 0.1
wf_tmp = data_fun(times, latency_tmp, duration)
source_simulator.add_data(label_tmp,
amplitude_tmp * wf_tmp,
events_tmp)
# To obtain a SourceEstimate object, we need to use `get_stc()` method of
# SourceSimulator class.
stc_data = source_simulator.get_stc()
# %%
# Simulate raw data
# -----------------
#
# Project the source time series to sensor space. Three types of noise will be
# added to the simulated raw data:
#
# - multivariate Gaussian noise obtained from the noise covariance from the
# sample data
# - blink (EOG) noise
# - ECG noise
#
# The :class:`~mne.simulation.SourceSimulator` can be given directly to the
# :func:`~mne.simulation.simulate_raw` function.
raw_sim = mne.simulation.simulate_raw(info, source_simulator, forward=fwd)
raw_sim.set_eeg_reference(projection=True)
mne.simulation.add_noise(raw_sim, cov=noise_cov, random_state=0)
mne.simulation.add_eog(raw_sim, random_state=0)
mne.simulation.add_ecg(raw_sim, random_state=0)
# Plot original and simulated raw data.
raw_sim.plot(title='Simulated raw data')
# %%
# Extract epochs and compute evoked responsses
# --------------------------------------------
#
epochs = mne.Epochs(raw_sim, events, event_id, tmin=-0.2, tmax=0.3,
baseline=(None, 0))
evoked_aud_left = epochs['auditory/left'].average()
evoked_vis_right = epochs['visual/right'].average()
# Visualize the evoked data
evoked_aud_left.plot(spatial_colors=True)
evoked_vis_right.plot(spatial_colors=True)
# %%
# Reconstruct simulated source time courses using dSPM inverse operator
# ---------------------------------------------------------------------
#
# Here, source time courses for auditory and visual areas are reconstructed
# separately and their difference is shown. This was done merely for better
# visual representation of source reconstruction.
# As expected, when high activations appear in primary auditory areas, primary
# visual areas will have low activations and vice versa.
method, lambda2 = 'dSPM', 1. / 9.
inv = mne.minimum_norm.make_inverse_operator(epochs.info, fwd, noise_cov)
stc_aud = mne.minimum_norm.apply_inverse(
evoked_aud_left, inv, lambda2, method)
stc_vis = mne.minimum_norm.apply_inverse(
evoked_vis_right, inv, lambda2, method)
stc_diff = stc_aud - stc_vis
brain = stc_diff.plot(subjects_dir=subjects_dir, initial_time=0.1,
hemi='split', views=['lat', 'med'])
# %%
# References
# ----------
# .. footbibliography::
|
mne-tools/mne-python
|
examples/simulation/simulated_raw_data_using_subject_anatomy.py
|
Python
|
bsd-3-clause
| 8,955
|
[
"Gaussian"
] |
5a819a48d63a8fedb5fa3a83f96607b96a989e7a7daa8edb62256450bae081de
|
#!/usr/bin/env python2.7
# Copyright 2014 Mark Chilenski
# This program is distributed under the terms of the GNU General Purpose License (GPL).
# Refer to http://www.gnu.org/licenses/gpl.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
__version__ = '1.1.3'
PROG_NAME = 'gpfit'
import collections
# Define the systems that can be selected for each signal:
SYSTEM_OPTIONS = collections.OrderedDict([
('ne', ['CTS', 'ETS', 'TCI', 'reflect']),
('Te', ['CTS', 'ETS', 'GPC', 'GPC2', 'FRCECE', 'Mic']),
('emiss', ['AXA', 'AXJ'])
])
# List of all valid systems:
valid_systems = set()
for sig, sys in SYSTEM_OPTIONS.iteritems():
for s in sys:
valid_systems.add(s)
# Shortcut flag for ETS+CTS:
valid_systems.add('TS')
# Define which systems are excluded by default:
DEFAULT_EXCLUDE = ['TCI', 'reflect', 'Mic']
# Define the coordinates that can be specified:
COORDINATE_OPTIONS = [
'r/a', 'psinorm', 'Rmid', 'volnorm', 'phinorm',
'sqrtpsinorm', 'sqrtr/a', 'sqrtvolnorm', 'sqrtphinorm'
]
# Define averaging methods available. First entry is default.
METHOD_OPTIONS = ['conventional', 'robust', 'all points']
# Define uncertainty methods available. First entry is default.
ERROR_METHOD_OPTIONS = ['sample', 'RMS', 'total', 'of mean', 'of mean sample']
# Define uncertainty fudging methods available. First entry is default.
FUDGE_METHOD_OPTIONS = ['override', 'minimum', 'add']
# Define unceratinty fudging types available. First entry is default.
FUDGE_TYPE_OPTIONS = ['absolute', 'relative']
# Make form suitable for command line entry:
error_method_cl = [s.replace(' ', '_') for s in ERROR_METHOD_OPTIONS]
# Define the kernels supported and their hyperparameters.
# The first entry is the label, the second is the description.
HYPERPARAMETERS = collections.OrderedDict([
(
'gibbstanh',
collections.OrderedDict([
(u"\u03C3f", "signal variance"),
(u"\u21131", "core length scale"),
(u"\u21132", "edge length scale"),
(u"\u2113w", "transition width"),
(u"x0", "transition location")
])
),
(
'gibbsdoubletanh',
collections.OrderedDict([
(u"\u03C3f", "signal variance"),
(u"\u2113c", "core length scale"),
(u"\u2113m", "mid length scale"),
(u"\u2113e", "edge length scale"),
(u"\u2113a", "first transition width"),
(u"\u2113b", "second transition width"),
(u"xa", "first transition"),
(u"xb", "second transition")
])
),
(
'SE',
collections.OrderedDict([
(u"\u03C3f", "signal variance"),
(u"\u2113", "length scale"),
])
),
(
'SEsym1d',
collections.OrderedDict([
(u"1", "VOID"),
(u"2", "VOID"),
(u"\u03C3f", "signal variance"),
(u"\u2113", "length scale")
])
),
(
'SEbeta',
collections.OrderedDict([
(u"\u03C3f", "signal variance"),
(u"\u2113", "length scale"),
(u"\u03B1", "warping alpha"),
(u"\u03B2", "warping beta")
])
),
(
'RQ',
collections.OrderedDict([
(u"\u03C3f", "signal variance"),
(u"a", "order"),
(u"\u2113", "length scale")
])
),
(
'matern',
collections.OrderedDict([
(u"\u03C3f", "signal variance"),
(u"\u03BD", "order"),
(u"\u2113", "length scale")
])
),
(
'matern52',
collections.OrderedDict([
(u"\u03C3f", "signal variance"),
(u"\u2113", "length scale")
])
),
(
'matern52beta',
collections.OrderedDict([
(u"\u03C3f", "signal variance"),
(u"\u2113", "length scale"),
(u"\u03B1", "warping alpha"),
(u"\u03B2", "warping beta")
])
),
])
# Define the (univariate) hyperpriors and their (hyperhyper)parameters:
HYPERPRIORS = collections.OrderedDict(
[
('uniform', [u"lb", u"ub"]),
('gamma', [u"\u03b1", u"\u03b2"]),
('alt-gamma', [u"m", u"\u03c3"]),
('normal', [u"\u03bc", u"\u03c3"]),
('log-normal', [u"\u03bc", u"\u03c3"]),
]
)
# Define some (vaguely) sensible defaults for the hyperpriors:
# Key is the (unicode) short name for the hyperparameter (as used as a key in
# the inner dictionaries of HYPERPARAMETERS, above).
# Value is a tuple with ('name', [p1, p2, ...]) (i.e., a key-value pair ordered
# like in HYPERPRIORS above, but with the specific initial values given for the
# hyperhyperparameters).
HYPERPRIOR_DEFAULTS = {
u"\u03C3f": ('uniform', [0.0, 20.0]),
u"\u21131": ('alt-gamma', [1.0, 0.3]),
u"\u21132": ('alt-gamma', [0.5, 0.25]),
u"\u2113w": ('alt-gamma', [0.0, 0.1]),
u"x0": ('alt-gamma', [1.0, 0.1]),
u"\u2113c": ('alt-gamma', [1.0, 0.3]),
u"\u2113m": ('alt-gamma', [1.0, 0.3]),
u"\u2113e": ('alt-gamma', [0.5, 0.25]),
u"\u2113a": ('alt-gamma', [0.0, 0.1]),
u"\u2113b": ('alt-gamma', [0.0, 0.1]),
u"xa": ('uniform', [0.0, 1.0]),
u"xb": ('alt-gamma', [1.0, 0.1]),
u"\u2113": ('alt-gamma', [1.0, 0.3]),
u"1": ('uniform', [0.0, 20.0]),
u"2": ('alt-gamma', [1.0, 0.3]),
u"\u03B1": ('log-normal', [0.0, 0.25]),
u"\u03B2": ('log-normal', [1.0, 1.0]),
u"a": ('uniform', [0.0, 100.0]),
u"\u03BD": ('uniform', [1.0, 50.0])
}
# Define ASCII-only names for the hyperparameters:
HYPERPARAMETER_NAMES = collections.OrderedDict(
[
('sigma_f', u"\u03C3f"),
('l_1', u"\u21131"),
('l_2', u"\u21132"),
('l_w', u"\u2113w"),
('x_0', u"x0"),
('l_c', u"\u2113c"),
('l_m', u"\u2113m"),
('l_e', u"\u2113e"),
('l_a', u"\u2113a"),
('l_b', u"\u2113b"),
('x_a', u"xa"),
('x_b', u"xb"),
('l', u"\u2113"),
('alpha', u"\u03B1"),
('beta', u"\u03B2"),
('a', u"a"),
('nu', u"\u03BD")
]
)
# Configure and parse command line arguments:
import argparse
# class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
# pass
parser = argparse.ArgumentParser(
description="""Fit univariate profile using gptools/profiletools.
BASIC USAGE DETAILS:
Calling without arguments will enter an interactive mode, or you can use the
command line flags to completely specify the options you wish to use. This
program can operate on ne, Te data from the tree, or you can provide data in a
NetCDF or CSV file.
You can choose whether to average over a time window, a specific set of points
or to use a single time slice. Even if providing data in a file, you should
specify the shot number and time window so that appropriate constraints can be
imposed on the fit at the magnetic axis and edge, and so that coordinate
transformations can be performed.
EXAMPLES:
Basic way to fit ne profile from shot 1101014006, averaged over the flat top
from 0.965s to 1.365s, using core and edge TS:
%s --shot 1101014006 --signal ne --t-min 0.965 --t-max 1.365 --system TS
Basic way to fit data from NetCDF file foo.nc, assuming the data are from the
time window 0.965s to 1.365s of shot 1101014006. The abscissa is specified to be
normalized poloidal flux and is stored in the variable psin in the NetCDF file.
The ordinate is stored in the variable q and its uncertainty in err_q:
%s -i foo.nc --coordinate psinorm --t-min 0.965 --t-max 1.365 -x psin -y q --shot 1101014006
FIXING/IMPROVING THE FIT:
Several things can go wrong in the fit. If there are bad points/outliers in your
data you can attempt to remove them by specifying one or both of
--change-threshold and --outlier-threshold, or you can flag specific points by
their indices using --remove-points. Change threshold rejects points that are
too distant from their neighbors, outlier threshold rejects points that are too
distant from the fit.
If there are not apparent outliers, but the fit still looks bad, then there is
likely an issue with the estimation of the fit's properties -- namely the
so-called hyperparameters that dictate the spatial correlation between points.
Try increasing the --random-starts flag to at least 8 as a first cut. This may
make the fit take quite a bit longer, but is parallelized, so the more cores
your computer has, the faster you will have your answer. If this still yields
unsatisfactory fits, try adjusting the bounds for the hyperparameters using
--bounds.
Note that many warnings regarding overflow in cosh and casting complex values
will be emitted -- these are usually benign. You will also see warnings that
the minimizer failed. These indicate that a particular random guess for the
hyperparameters walked the minimizer into a bad state. At the end of the
optimization you will be told how many starts were accepted. Try to increase
--random-starts and/or adjust --bounds until this number is at least 4.
READING FROM FILES:
The support for reading data from NetCDF and CSV files is fairly powerful. With
either type of file, you can specify the column/variable names to be of the form
"name [units]" which will be automatically parsed to generate the right plot
labels. (Though it is better to just set the "units" attribute of each variable
in your NetCDF file, which is the preferred approach there.) The CSV reader is
smart enough to figure out your column names, as long as you put the time column
first when using data you haven't time-averaged yet. In either type of file you
can include the metadata needed to apply core/edge constraints. For CSV files,
start the file with as many lines needed of the form "name data" or
"name data,data,..." Be sure to either make the first line be "metadata N" where
N is the number of metadata lines used or specify --metadata-lines when doing
this! For NetCDF files, simply place the metadata in the appropriate attributes
of the file. The supported metadata are:
========== =======================================================
shot shot number
times comma-separated list of times included in the data
t_min minimum time included in the data
t_max maximum time included in the data
coordinate the abscissa the data are represented as a function of,
valid choices are:
{psinorm,Rmid,r/a,volnorm,phinorm,sqrtpsinorm,sqrtr/a,
sqrtvolnorm,sqrtphinorm}
========== =======================================================
""" % (PROG_NAME, PROG_NAME,),
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--signal',
choices=SYSTEM_OPTIONS.keys(),
help="Which signal to fit when pulling data from the tree."
)
parser.add_argument(
'--shot',
type=int,
help="Shot number to use. Required when pulling data from the tree. When "
"pulling data from a file, this is needed to specify constraints at the "
"magnetic axis and limiter."
)
parser.add_argument(
'--EFIT-tree',
help="EFIT tree to use. Default is ANALYSIS. Otherwise, give a name like "
"'EFIT20'."
)
parser.add_argument(
'--t-min',
type=float,
help="Starting time of period to average over. If you are reading data from "
"a file, you can set this flag to tell the program what time window to "
"average over when finding the location of the limiter/magnetic axis "
"when applying constraints."
)
parser.add_argument(
'--t-max',
type=float,
help="Ending time of period to average over. If you are reading data from a "
"file, you can set this flag to tell the program what time window to "
"average over when finding the location of the limiter/magnetic axis "
"when applying constraints."
)
parser.add_argument(
'-t', '--t-points',
type=float,
metavar='T_POINT',
nargs='+',
help="Individual time values to use. The nearest time to each will be "
"selected for each channel. You can use this, for instance, to specify "
"the times you have determined are at a particular sawtooth/ELM phase. "
"You must either specify --t-min and --t-max, or -t."
)
parser.add_argument(
'--t-tol',
type=float,
help="Tolerance for how close a point must be to a value in '--t-points' to "
"be included. Default is to allow points to be arbitrarily far away."
)
parser.add_argument(
'--npts',
type=int,
# default=400,
help="Number of evenly-spaced points to evaluate the fit at. Default is 400."
)
parser.add_argument(
'--x-min',
type=float,
# default=0,
help="Starting point for the evenly-spaced points to evaluate the fit at. "
"Default is 0.0."
)
parser.add_argument(
'--x-max',
type=float,
# default=1.2,
help="Ending point for the evenly-spaced points to evaluate the fit at. "
"Default is 1.2."
)
parser.add_argument(
'--x-pts',
type=float,
metavar='X_PT',
nargs='+',
help="Discrete points to evaluate the fit at. If present, this overrides the "
"effect of npts, x-min and x-max."
)
parser.add_argument(
'--system',
nargs='+',
choices=valid_systems,
help="Which system(s) to take data from. If not provided, all applicable "
"systems will be used. The 'TS' option is a shortcut to include both "
"the core (CTS) and edge (ETS) Thomson systems. Note that working with "
"TCI data is rather slow. Also note that the statistics of including "
"the SOL reflectometer are questionable, so your uncertainties should "
"be taken with a grain of salt when using those data."
)
parser.add_argument(
'--TCI-quad-points',
type=int,
# default=100,
help="Number of quadrature points to use when approximating the TCI line "
"integrals. The higher this number is, the more accurate the "
"integration will be, but the slower all operations on the Gaussian "
"process will be. The default of 100 is a preliminary, conservative "
"estimate of the minimum necessary to perform an accurate fit."
)
parser.add_argument(
'--TCI-thin',
type=int,
# default=1,
help="Amount by which the TCI data are thinned. The TCI data taken at a much "
"higher time resolution than most applications need. This will allow "
"you to skip some samples when performing the very "
"computationally-expensive computation of the quadrature weights. Note "
"that this takes effect during the loading of the data, so to reverse "
"this you will have to reload all data. Default is 1 (no thinning)."
)
parser.add_argument(
'--TCI-ds',
type=float,
# default=1e-3,
help="Step size (in m) to use when constructing the TCI quadrature weights. "
"The smaller this is the more accurate the integration will be, but at "
"the expense of making the loading of the TCI data take much longer. "
"The default value of 1e-3 is what is recommended by TRIPPy and is "
"somewhat conservative."
)
parser.add_argument(
'--kernel',
choices=HYPERPARAMETERS.keys(),
# default='gibbstanh',
help="Which covariance kernel to use. This dictates the properties of the "
"fit. "
"* gibbstanh is the Gibbs kernel with tanh warping of the length scale. "
"This kernel allows the entire profile to be fit at once, and should be "
"used if you have edge data. "
"* gibbsdoubletanh is an experimental Gibbs kernel whose warping "
"function is the sum of two hyperbolic tangents. This may be useful for "
"whole profiles with complicated shapes. "
"* SE is the squared exponential kernel, which is good for core data. "
"* SEsym1d is an experimental SE kernel with symmetry constraint "
"imposed by construction. This is primarily useful for core data. "
"* SEbeta is an experimental SE kernel whose arguments are warped using "
"the regularized incomplete beta function. This is good when you have "
"edge data. "
"* RQ is the rational quadratic kernel, good for core data. "
"* matern is the Matern kernel, which is also potentially useful for "
"core data. Note that the matern kernel is VERY SLOW to evaluate, "
"particularly if you need gradients. "
"* matern52 is a task-specific implementation of the Matern kernel with "
"the order fixed at nu=5/2. This is MUCH faster than the basic matern. "
"This is mostly suitable for core data. "
"* matern52beta is the same as matern52, but with the same warping as "
"SEbeta applied. This is potentially suitable for fitting entire "
"profiles. "
"You will typically want to set --no-edge-constraint and/or --core-only "
"if you specify any kernel other gibbstanh and gibbsdoubletanh. See "
"also --core-only. The default is gibbstanh, or SE if --core-only is "
"set."
)
parser.add_argument(
'--coordinate',
choices=COORDINATE_OPTIONS,
# default='',
help="Which coordinate to fit against. Defaults to r/a when pulling data "
"from the tree. Used to determine how to apply core/edge constraints "
"when pulling data from a file."
)
parser.add_argument(
'--no-core-constraint',
action='store_true',
help="Set this flag to disable the slope=0 constraint at the magnetic axis."
)
parser.add_argument(
'--no-edge-constraint',
action='store_true',
help="Set this flag to disable the slope, value=0 constraint at/outside the "
"GH limiter."
)
parser.add_argument(
'--core-constraint-location',
type=float,
metavar='LOC',
nargs='+',
help="Location to impose slope=0 constraint at. Typically this is the "
"magnetic axis. If you specify a shot number and times then this will "
"be found automatically, but you can override it with this flag. Note "
"that you can specify multiple locations if you want to have multiple "
"points where the slope goes to exactly zero."
)
parser.add_argument(
'--edge-constraint-locations',
type=float,
metavar='LOC',
nargs='+',
help="Location to impose slope~0, value~0 constraints at. Typically this is "
"at the location of the GH limiter. If you specify a shot number and "
"times then this will be found automatically, but you can override it "
"with this flag. It helps to specify a couple of points outside the GH "
"limiter, as well."
)
parser.add_argument(
'--core-only',
action='store_true',
help="Set this flag to only fit the data inside the LCFS. This will switch "
"to using a squared exponential kernel, and will disable the edge value, "
"slope constraints."
)
parser.add_argument(
'--robust',
action='store_true',
help="Set this flag to use robust estimators (median, IQR) when performing "
"time-averages. Note that using robust weighted estimators will not "
"work for small numbers of data points."
)
parser.add_argument(
'--uncertainty-method',
choices=error_method_cl,
# default='sample',
help="Method by which the uncertainty should be propagated when "
"time-averaging. "
"* sample (the default) will take the sample standard deviation, and is "
"usually appropriate for cases where you have many points to average "
"over and the data are not completely stationary in time. "
"* RMS uses the root-mean-square standard deviation, and is appropriate "
"for small sample sizes. Note that this is questionable when applied to "
"diagnostics other than TS for which the individual error bars are "
"estimated as some fixed percent of the value. "
"* total uses the law of total variance which is the square root of the "
"sum of the mean square uncertainty and sample variance. This is "
"appropriate when the given points already represent actual sample "
"means/variances. "
"* of_mean uses the uncertainty in the mean using the individual error "
"bars on the points, and is only appropriate if the data are steady in "
"time. It is very questionable to use this with robust estimators."
"* of_mean_sample uses the uncertainty in the mean using the sample "
"standard deviation, and is only appropriate if the data are steady in "
"time. It is very questionable to use this with robust estimators."
)
parser.add_argument(
'--unweighted',
action='store_true',
help="Set this flag to use unweighted estimators when averaging the data. "
"Otherwise the weights used are 1/sigma_i^2. Note that using robust "
"weighted estimators will not work for small numbers of data points. "
"Note that weighting is only ever applied to diagnostics like CTS and "
"ETS for which there are computed error bars in the tree."
)
parser.add_argument(
'--all-points', '--no-average',
action='store_true',
help="Set this flag to keep all points from the time window selected instead "
"of performing a time average. This will make the fit take longer and "
"is statistically questionable, but may be useful in some cases."
)
parser.add_argument(
'--uncertainty-adjust-value',
type=float,
help="The value by which the uncertainty is adjusted (if at all). Use "
"--uncertainty-adjust-method to pick how this value is employed and "
"--uncertainty-adjust-type to indicate whether this is an absolute or "
"relative uncertainty."
)
parser.add_argument(
'--uncertainty-adjust-method',
choices=FUDGE_METHOD_OPTIONS,
help="The method by which the uncertainty is adjusted. "
"* override will override all of the uncertainties with the given value. "
"* minimum will only override uncertainties which are smaller than the "
" given value. "
"* add will add the given uncertainty (in quadrature) to the uncertainty "
"computed in the usual manner. "
"Default is %s." % (FUDGE_METHOD_OPTIONS[0],)
)
parser.add_argument(
'--uncertainty-adjust-type',
choices=FUDGE_TYPE_OPTIONS,
help="The type of uncertainty (relative or absolute) that is specified with "
"--uncertainty-adjust-value. Default is %s." % (FUDGE_TYPE_OPTIONS[0],)
)
parser.add_argument(
'--change-threshold',
type=float,
help="If provided, any points whose differences with respect to either of "
"their neighbors are more than this many times their own error bar will "
"be rejected. This is useful for getting rid of bad channels. A value "
"of 9 is often useful. Note that this does not take into account the "
"uncertainties on the neighbors -- it is primarily designed to catch "
"bad channels that don't get caught by the method employed by "
"--outlier-threshold. This can lead to good data getting thrown out if "
"the threshold is too low."
)
parser.add_argument(
'--outlier-threshold',
type=float,
help="If provided, any points whose values are more than this many times "
"their own error bar outside of the fit will be rejected. A value of 3 "
"is often useful. Note that this can get thrown off by extremely bad "
"channels that drag the whole fit off."
)
parser.add_argument(
'--remove-points',
type=int,
nargs='+',
help="Indices of points to remove. These are the indices in the combined "
"Profile object. These will usually be the same from shot-to-shot, but "
"may change if entire channels are removed during data loading. Use "
"--plot-idxs to see the indices to use."
)
parser.add_argument(
'--plot-idxs',
action='store_true',
help="Set this flag to overplot the indices of the points. These are the "
"indices to use with --remove-points."
)
parser.add_argument(
'--random-starts',
type=int,
help="The number of random starts to use when trying to find the MAP "
"estimate for the hyperparameters. If you are getting bad fits, try "
"increasing this. If not specified, this is set to the number of "
"processors available on your machine or 20, whichever is smaller."
)
parser.add_argument(
'--bounds',
type=float,
nargs='+',
help="Bounds to use for each of the hyperparameters. Specified as pairs of "
"lower, upper bounds. Causes uniform hyperpriors to be used for all "
"hyperparameters. If present, there should be two such pairs for the "
"squared exponential kernel and five such pairs for the Gibbs kernel "
"with tanh length scale warping. If not specified, somewhat intelligent "
"guesses are made. If you are getting bad fits, try tweaking these. "
"Note that this is overridden by --hyperprior if present."
)
parser.add_argument(
'--hyperprior',
nargs='+',
help="Specifies the (hyper)prior to use for some or all of the "
"hyperparameters. This flag should be followed by one or more "
"specifications of the form: '[NAME] [TYPE] [p1] [p2] ...' where [NAME] "
"is the hyperparameter name (one of {{{names}}}), [TYPE] is the type of "
"prior distribution to use for the hyperparameter (one of "
"{{{distributions}}}) and [p1], [p2] and so on are the values for the "
"parameters of the distribution. An example of this is "
"'--hyperprior sigma_f uniform 0 20' which sets the hyperprior on the "
"signal variance to be uniform between 0 and 20. If present, this "
"overrides --bounds. If not present, reasonable guesses will be used.".format(
names=', '.join(map(str, HYPERPARAMETER_NAMES.keys())),
distributions=', '.join(map(str, HYPERPRIORS.keys()))
)
)
parser.add_argument(
'--use-MCMC',
action='store_true',
help="Set this flag to use MCMC integration over the hyperparameters instead "
"of MAP estimation. This is the most rigorous way of capturing all "
"uncertainty, and should always be used if you are interested in "
"gradients and/or the details of the edge. Note that this is very "
"computationally expensive, but benefits strongly from having many "
"cores to run on."
)
parser.add_argument(
'--walkers',
type=int,
# default=200,
help="The number of walkers to use to explore the parameter space. This "
"number should be high, on the order of a few hundred. If you are "
"getting poor mixing of the MCMC integration, try increasing this by a "
"hundred at a time. Default is 200."
)
parser.add_argument(
'--MCMC-samp',
type=int,
# default=200,
help="The number of samples to take with each walker. The default of 200 is "
"a good number to get a look at the sample space and dial in the bounds."
)
parser.add_argument(
'--burn',
type=int,
# default=100,
help="The number of samples to discard at the start of each MCMC chain. This "
"will usually need to be on the order of a few hundred. If your chains "
"are taking too long to mix, try narrowing the bounds on the "
"hyperparameters and/or increasing --sampler-a. Default is 100."
)
parser.add_argument(
'--keep',
type=int,
# default=200,
help="The number of MCMC samples to keep when fitting the profiles. This "
"lets you get a full picture of the parameter space but only fit on the "
"number of profiles needed. Default is 200."
)
parser.add_argument(
'--sampler-a',
type=float,
# default=2.0,
help="The width of the sampler proposal distribution. If you observe "
"multiple modes with no mixing, try doubling this. This should always "
"be greater than unity. Default is 2.0."
)
parser.add_argument(
'--full-monte-carlo',
action='store_true',
help="Set this flag to compute these mean samples using a full Monte Carlo "
"simulation instead of error propagation."
)
parser.add_argument(
'--monte-carlo-samples',
type=int,
# default=500,
help="The number of Monte Carlo samples to use when --full-monte-carlo is "
"set and MAP estimation is used. Default is 500."
)
parser.add_argument(
'--reject-negative',
action='store_true',
help="Set this flag to reject any Monte Carlo samples that go negative "
"during the full Monte Carlo simulation. Only has an effect if "
"--full-monte-carlo is set."
)
parser.add_argument(
'--reject-non-monotonic',
action='store_true',
help="Set this flag to reject any Monte Carlo samples that are not monotonic "
"when performing the full Monte Carlo simulation. Only has an effect if "
"--full-monte-carlo is set."
)
parser.add_argument(
'--no-a-over-L',
action='store_true',
help="Set this flag to turn off the computation of a/L, which can save some "
"time if you don't need gradients/scale lengths."
)
parser.add_argument(
'--compute-vol-avg',
action='store_true',
help="Set this flag to compute the volume average of the profile."
)
parser.add_argument(
'--compute-peaking',
action='store_true',
help="Set this flag to compute the peaking figure of merit of the profile."
)
parser.add_argument(
'--compute-TCI',
action='store_true',
help="Set this flag to compute the integrals along the TCI chords. This will "
"only work if the TCI data are loaded."
)
parser.add_argument(
'-i', '--input-filename',
help="Filename/path to a CSV or NetCDF file containing the profile data to "
"be fit. Note that if you wish to make use of the core/edge value, "
"slope constraints you must provide t-min and t-max bracketing the "
"times used so that the program can find the locations of the magnetic "
"axis and GH limiter in the relevant coordinates. (Though it will "
"always be able to find the magnetic axis if you use a normalized "
"coordinate.) If the extension of the file is .csv it will be treated "
"as a comma-separated values file, all other extensions will be treated "
"as NetCDF files. If using a CSV file, the first row should be a "
"comma-separated list of the field names, as defined with "
"--abscissa-name and --ordinate-name. These columns can be in any order "
"in the actual file."
)
parser.add_argument(
'-o', '--output-filename',
help="Filename/path to write a NetCDF or CSV file to containing the results "
"of the fit. If not specified, you will be prompted for a filename upon "
"completing the fit."
)
parser.add_argument(
'-x', '--abscissa-name',
nargs='+',
help="Name(s) of the variable(s) in the input/output NetCDF/CSV files that "
"contain the values of the abscissa (independent variable(s)). The "
"uncertainty in the abscissa must then be in err_ABSCISSA_NAME, if "
"present. Note that uncertainties in the abscissa are NOT used in the "
"profile fit at present, but will be shown on the plot. If you do not "
"provide this when using a CSV file, the names will automatically be "
"inferred by looking at the order of the header of the CSV file. This "
"argument is required when using a NetCDF file. You must always put "
"your time variable first for this to work properly."
)
parser.add_argument(
'-y', '--ordinate-name',
help="Name of the variable in the input/output NetCDF/CSV files that "
"contains the values of the ordinate (dependent variable). The "
"uncertainty in the ordinate must then be in err_ORDINATE_NAME. If you "
"do not provide this when using a CSV file, the names will "
"automatically be inferred by looking at the order of the header of the "
"CSV file. This argument is required when using a NetCDF file."
)
parser.add_argument(
'--metadata-lines',
type=int,
help="Number of lines of metadata at the start of your CSV file to read. You "
"can include the shot, times and coordinate in the CSV file itself in "
"this manner. See the documentation on "
"profiletools.CMod.read_plasma_csv for more details. If you leave this "
"out, the program will check to see if the first line of your file is "
"of the form 'metadata LINES', where LINES is the number of lines of "
"metadata present."
)
parser.add_argument(
'--no-save-state',
action='store_true',
help="By default, pickle and NetCDF files will contain a representation of "
"the internal state of the program which can be reloaded at a later "
"time. You can set this flag to turn this feature off to make smaller "
"files. Note that there is no way to control this through the GUI."
)
parser.add_argument(
'--cov-in-save-state',
action='store_true',
help="By default, the state information saved (either into a fit result or "
"as a standalone file) will not contain the very large covariance "
"matrix. If you wish to have access to this information, pass this flag. "
"Note that there is no way to control this through the GUI."
)
parser.add_argument(
'--sampler-in-save-state',
action='store_true',
help="By default, the state information saved (either into a fit result or "
"as a standalone file) will not contain the very large MCMC sampler "
"instance. If you wish to have access to this information, pass this "
"flag. Note that there is no way to control this through the GUI."
)
parser.add_argument(
'--full-auto',
action='store_true',
help="Set this flag to disable all prompting for missing/optional arguments "
"and run fully automatically. The program will exit with status 1 if "
"any required parameters are missing. The program will still stop to "
"allow the user to assess the quality of the fit."
)
parser.add_argument(
'--no-interaction',
action='store_true',
help="Set this flag to not let the user interact with the GUI. The fit will "
"be automatically run and saved, along with a picture of the plot, to "
"the output file specified."
)
parser.add_argument(
'--no-mainloop',
action='store_true',
help="Set this flag to disable starting of the Tkinter main loop. This is "
"useful for debugging."
)
parser.add_argument(
'--x-lim',
type=float,
nargs=2,
help="The upper and lower bounds for the horizontal plot axis. If not "
"provided, these will be set from the data."
)
parser.add_argument(
'--y-lim',
type=float,
nargs=2,
help="The upper and lower bounds for the vertical plot axis. If not provided, "
"these will be set from the data."
)
parser.add_argument(
'--dy-lim',
type=float,
nargs=2,
help="The upper and lower bounds for the gradient plot. If not provided, "
"these will be set from the data."
)
parser.add_argument(
'--aLy-lim',
type=float,
nargs=2,
help="The upper and lower bounds for the inverse gradient scale length plot. "
"If not provided, these will be set from the data."
)
parser.add_argument(
'--load',
help="Name of a file to load the settings from. Any command line flags used "
"will override the settings in the file. This can either be a .gpfit "
"file with only settings or a Pickle or NetCDF file that was produced "
"with the 'save fit' button in gpfit."
)
if __name__ == "__main__":
args = parser.parse_args()
### ======================== START OF MAIN PROGRAM ======================== ###
# Set up the GUI:
import sys
import socket
# Hackishly augment the path for now:
hostname = socket.gethostname().lower()
if ('juggernaut' not in hostname and
'sydney' not in hostname and
'cosmonaut' not in hostname):
sys.path.insert(0, "/home/markchil/codes/gptools")
sys.path.insert(0, "/home/markchil/codes/profiletools")
sys.path.insert(0, "/home/markchil/codes/TRIPPy")
sys.path.insert(0, "/home/markchil/codes/efit/development/EqTools")
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.gridspec as mplgs
import Tkinter as tk
import tkFileDialog
import tkFont
import ttk
import time
import multiprocessing
import profiletools
import gptools
import eqtools
import MDSplus
import os.path
import copy
import re
import scipy
import scipy.io
import scipy.linalg
import numpy
import numpy.linalg
import itertools
import getpass
import inspect
import csv
import cPickle as pickle
# What key to use for keyboard shortcuts: command on Mac, control otherwise:
COMMAND_KEY = 'Command' if sys.platform == 'darwin' else 'Control'
# Define the format used to print the date:
DATE_FORMAT = '%d %b %Y %H:%M:%S'
# Define the parameters for the basic Frame elements:
FRAME_PARAMS = {'relief': tk.RAISED, 'borderwidth': 2}
# Regex used to split lists up. This will let the list be delimted by any
# non-numeric characters, where the decimal point and minus sign are considered
# numeric.
LIST_REGEX = r'([-0-9.]+)[^-0-9.]*'
# Regex used to split lists which include ranges up. This will let the list be
# delimted by any non-numeric characters, where the decimal point and colon
# are considered numeric.
RANGE_LIST_REGEX = r'(-?[0-9]+[:-]*-?[0-9]+|-?[0-9]+)[^-0-9:]*'
# Define the JointPrior objects corresponding to each hyperprior:
HYPERPRIOR_MAP = {
'uniform': gptools.UniformJointPrior,
'gamma': gptools.GammaJointPrior,
'normal': gptools.NormalJointPrior,
'log-normal': gptools.LogNormalJointPrior,
'alt-gamma': gptools.GammaJointPriorAlt
}
class TreeFileFrame(tk.Frame):
"""Frame to hold the buttons to choose between using the tree or a file,
as well as the file specification.
All arguments to the constructor are passed to :py:class:`tk.Frame`.
"""
TREE_MODE = 1
FILE_MODE = 2
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create radio buttons to select tree versus file:
# When the buttons are pressed, they will enable/disable the file
# selection Entry, Button and the variable specification Entries.
self.source_state = tk.IntVar(self)
self.tree_button = tk.Radiobutton(
self,
text="tree",
variable=self.source_state,
value=self.TREE_MODE,
command=self.master.update_source
)
self.file_button = tk.Radiobutton(
self,
text="file:",
variable=self.source_state,
value=self.FILE_MODE,
command=self.master.update_source
)
self.tree_button.grid(row=0, column=0)
self.file_button.grid(row=0, column=1)
# Create text entry to input file path:
self.path_entry = tk.Entry(self)
self.path_entry.grid(row=0, column=2, stick=tk.E + tk.W)
# Create button to select file:
self.choose_file_button = tk.Button(
self,
text="choose file",
command=self.choose_file
)
self.choose_file_button.grid(row=0, column=3)
# Set file path entry to expand:
self.grid_columnconfigure(2, weight=1)
def choose_file(self):
"""Create a dialog to let the user choose which file to read data from.
"""
filepath = tkFileDialog.askopenfilename()
if filepath:
self.path_entry.delete(0, tk.END)
self.path_entry.insert(0, filepath)
class VariableNameFrame(tk.Frame):
"""Frame to hold the variable name specification frames.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create label for variables:
self.variables_master_label = tk.Label(self, text="variable/column names:")
self.variables_master_label.grid(row=0, column=0, columnspan=6, sticky=tk.W)
# Create label for time:
self.time_label = tk.Label(self, text="time:")
self.time_label.grid(row=1, column=0, sticky=tk.E)
# Create box for time:
self.time_box = tk.Entry(self, width=4)
self.time_box.grid(row=1, column=1, sticky='EW')
# Create label for space:
self.space_label = tk.Label(self, text="space:")
self.space_label.grid(row=1, column=2, sticky=tk.E)
# Create box for space:
self.space_box = tk.Entry(self, width=4)
self.space_box.grid(row=1, column=3, sticky='EW')
# Create label for data:
self.data_label = tk.Label(self, text="data:")
self.data_label.grid(row=1, column=4, sticky=tk.E)
# Create box for data:
self.data_box = tk.Entry(self, width=4)
self.data_box.grid(row=1, column=5, sticky='EW')
# Create label and box for number of metadata lines:
self.meta_label = tk.Label(self, text="metadata:")
self.meta_label.grid(row=1, column=6, sticky='E')
self.meta_box = tk.Entry(self, width=4)
self.meta_box.grid(row=1, column=7, sticky='EW')
# Configure boxes to grow:
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(3, weight=1)
self.grid_columnconfigure(5, weight=1)
self.grid_columnconfigure(7, weight=1)
class ShotFrame(tk.Frame):
"""Frame to hold specification of the shot number.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create label for shot:
self.shot_label = tk.Label(self, text="shot:")
self.shot_label.grid(row=0, column=0)
# Create box for shot:
self.shot_box = tk.Entry(self)
self.shot_box.grid(row=0, column=1, sticky=tk.E + tk.W)
# Allow shot entry to expand to fill:
self.grid_columnconfigure(1, weight=1)
class SignalCoordinateFrame(tk.Frame):
"""Frame to hold the specification of which signal and coordinates to use.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create label for signal:
self.signal_label = tk.Label(self, text="signal:")
self.signal_label.grid(row=0, column=0, sticky='E')
# Create option menu for signal:
self.signal_var = tk.StringVar(self)
self.signal_var.set(SYSTEM_OPTIONS.keys()[0])
self.signal_menu = tk.OptionMenu(
self,
self.signal_var,
*SYSTEM_OPTIONS.keys(),
command=self.master.update_signal
)
self.signal_menu.grid(row=0, column=1, sticky='EW')
# Create label for coordinate:
self.coordinate_label = tk.Label(self, text="coordinate:")
self.coordinate_label.grid(row=0, column=2, sticky='E')
# Create option menu for coordinate:
self.coordinate_var = tk.StringVar(self)
self.coordinate_var.set(COORDINATE_OPTIONS[0])
self.coordinate_menu = tk.OptionMenu(
self,
self.coordinate_var,
*COORDINATE_OPTIONS
)
self.coordinate_menu.grid(row=0, column=3, sticky='EW')
class OptionBox(tk.Frame):
"""Frame to hold a Checkbutton corresponding to a given system.
"""
def __init__(self, system, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
self.system = system
self.state_var = tk.IntVar(self)
self.button = tk.Checkbutton(
self,
text=self.system,
variable=self.state_var,
command=self.invoke_TCI if self.system == 'TCI' else None
)
self.button.grid(row=0, column=0)
# Set default value:
if self.system not in DEFAULT_EXCLUDE:
self.button.select()
if self.system == 'TCI':
self.invoke_TCI()
def invoke_TCI(self):
"""Set the state of the TCI settings accordingly.
"""
self.master.master.set_TCI_state(self.state_var.get())
class SystemFrame(tk.Frame):
"""Frame to handle selection of systems to include.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
self.signal = None
self.buttons = []
self.update_systems(self.master.signal_coordinate_frame.signal_var.get())
def update_systems(self, signal):
"""Update the list of system options shown to correspond to `signal`.
"""
# Only update the signal if necessary:
if signal != self.signal:
self.signal = signal
# Delete all of the old buttons:
for b in self.buttons:
b.destroy()
# Create the new buttons:
self.buttons = [OptionBox(sys, self) for sys in SYSTEM_OPTIONS[signal]]
for k in xrange(0, len(self.buttons)):
self.buttons[k].grid(row=0, column=k)
class TCIFrame(tk.Frame):
"""Frame to handle selection of the settings for the TCI data.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
self.TCI_points_label = tk.Label(self, text="TCI quadrature points:")
self.TCI_points_label.grid(row=0, column=0, sticky='E')
self.TCI_points_box = tk.Entry(self, width=3)
self.TCI_points_box.grid(row=0, column=1, sticky='EW')
self.TCI_points_box.insert(0, '100')
self.TCI_thin_label = tk.Label(self, text='thin:')
self.TCI_thin_label.grid(row=0, column=2, sticky='E')
self.TCI_thin_box = tk.Entry(self, width=3)
self.TCI_thin_box.grid(row=0, column=3, sticky='EW')
self.TCI_thin_box.insert(0, '1')
self.TCI_ds_label = tk.Label(self, text='ds:')
self.TCI_ds_label.grid(row=0, column=4, sticky='E')
self.TCI_ds_box = tk.Entry(self, width=3)
self.TCI_ds_box.grid(row=0, column=5, sticky='EW')
self.TCI_ds_box.insert(0, '1e-3')
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(3, weight=1)
self.grid_columnconfigure(5, weight=1)
class EFITFrame(tk.Frame):
"""Frame to handle selection of the EFIT tree to use.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
self.EFIT_label = tk.Label(self, text="EFIT tree:")
self.EFIT_label.grid(row=0, column=0, sticky='E')
# Entry to put the tree to use in:
self.EFIT_field = tk.Entry(self)
self.EFIT_field.grid(row=0, column=1, sticky='EW')
self.grid_columnconfigure(1, weight=1)
class DataSourceFrame(tk.Frame):
"""Frame to hold all of the components that dictate where the data come from.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create main label for frame:
self.frame_label = tk.Label(self, text="Data Source", font=tkFont.Font(weight=tkFont.BOLD))
self.frame_label.grid(row=0, sticky='W')
# Create frame to hold tree/file selector row:
self.tree_file_frame = TreeFileFrame(self)
self.tree_file_frame.grid(row=1, sticky='EW')
# Create frame to hold variable name selector row:
self.variable_name_frame = VariableNameFrame(self)
self.variable_name_frame.grid(row=2, sticky='EW')
# Create frame to hold shot selector row:
self.shot_frame = ShotFrame(self)
self.shot_frame.grid(row=3, sticky='EW')
# Create frame to hold signal/coordinate selection menus:
self.signal_coordinate_frame = SignalCoordinateFrame(self)
self.signal_coordinate_frame.grid(row=4, sticky='EW')
# Create frame to hold TCI settings. This needs to be done BEFORE the
# systems frame so we can set the state of the TCI stuff properly:
self.TCI_frame = TCIFrame(self)
self.TCI_frame.grid(row=6, sticky='EW')
# Create frame to hold signal selection check buttons:
self.system_frame = SystemFrame(self)
self.system_frame.grid(row=5, sticky='W')
# Create frame to hold EFIT tree selection:
self.EFIT_frame = EFITFrame(self)
self.EFIT_frame.grid(row=7, sticky='EW')
# Allow columns to grow:
self.grid_columnconfigure(0, weight=1)
# Set default conditions:
self.tree_file_frame.tree_button.invoke()
def set_TCI_state(self, state):
"""Set the TCI boxes to the indicated state.
"""
if state:
self.TCI_frame.TCI_points_label.config(state=tk.NORMAL)
self.TCI_frame.TCI_points_box.config(state=tk.NORMAL)
self.TCI_frame.TCI_thin_label.config(state=tk.NORMAL)
self.TCI_frame.TCI_thin_box.config(state=tk.NORMAL)
self.TCI_frame.TCI_ds_label.config(state=tk.NORMAL)
self.TCI_frame.TCI_ds_box.config(state=tk.NORMAL)
else:
self.TCI_frame.TCI_points_label.config(state=tk.DISABLED)
self.TCI_frame.TCI_points_box.config(state=tk.DISABLED)
self.TCI_frame.TCI_thin_label.config(state=tk.DISABLED)
self.TCI_frame.TCI_thin_box.config(state=tk.DISABLED)
self.TCI_frame.TCI_ds_label.config(state=tk.DISABLED)
self.TCI_frame.TCI_ds_box.config(state=tk.DISABLED)
def update_source(self):
"""Update changes between tree and file mode.
In tree mode:
* File name, selection and variable name boxes are disabled.
* Signal and system selectors are enabled.
In file mode:
* File name, selection and variable name boxes are enabled.
* Signal and system selectors are disabled.
"""
if self.tree_file_frame.source_state.get() == self.tree_file_frame.TREE_MODE:
self.tree_file_frame.path_entry.config(state=tk.DISABLED)
self.tree_file_frame.choose_file_button.config(state=tk.DISABLED)
for w in self.variable_name_frame.winfo_children():
w.config(state=tk.DISABLED)
self.signal_coordinate_frame.signal_label.config(state=tk.NORMAL)
self.signal_coordinate_frame.signal_menu.config(state=tk.NORMAL)
for b in self.system_frame.buttons:
b.button.config(state=tk.NORMAL)
elif self.tree_file_frame.source_state.get() == self.tree_file_frame.FILE_MODE:
self.tree_file_frame.path_entry.config(state=tk.NORMAL)
self.tree_file_frame.choose_file_button.config(state=tk.NORMAL)
for w in self.variable_name_frame.winfo_children():
w.config(state=tk.NORMAL)
self.signal_coordinate_frame.signal_label.config(state=tk.DISABLED)
self.signal_coordinate_frame.signal_menu.config(state=tk.DISABLED)
for b in self.system_frame.buttons:
b.button.config(state=tk.DISABLED)
def update_signal(self, signal):
"""Updates the available systems when the `signal` changes.
"""
self.set_TCI_state(False)
self.system_frame.update_systems(signal)
class TimeWindowFrame(tk.Frame):
"""Frame to specify time window/points.
"""
WINDOW_MODE = 1
POINT_MODE = 2
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create radio buttons to select between window and points:
self.method_state = tk.IntVar(self)
self.window_button = tk.Radiobutton(self,
text="time window:",
variable=self.method_state,
value=self.WINDOW_MODE,
command=self.update_method)
self.point_button = tk.Radiobutton(self,
text="time points:",
variable=self.method_state,
value=self.POINT_MODE,
command=self.update_method)
self.window_button.grid(row=0, column=0, sticky='W')
self.point_button.grid(row=1, column=0, sticky='W')
# Create labels and fields to hold time window:
self.t_min_box = tk.Entry(self, width=6)
self.t_min_box.grid(row=0, column=1, sticky='EW')
self.t_max_box = tk.Entry(self, width=6)
self.t_max_box.grid(row=0, column=3, sticky='EW')
self.t_min_units = tk.Label(self, text="s to")
self.t_min_units.grid(row=0, column=2)
self.t_max_units = tk.Label(self, text="s")
self.t_max_units.grid(row=0, column=4)
# Create labels and fields to hold time points:
self.times_box = TimePointsFrame(self)
self.times_box.grid(row=1, column=1, columnspan=4, sticky='EW')
# Allow elements to resize:
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(3, weight=1)
def update_method(self):
"""Update whether the window or points boxes are enabled.
"""
if self.method_state.get() == self.WINDOW_MODE:
self.times_box.set_state(tk.DISABLED)
self.t_min_box.config(state=tk.NORMAL)
self.t_min_units.config(state=tk.NORMAL)
self.t_max_box.config(state=tk.NORMAL)
self.t_max_units.config(state=tk.NORMAL)
else:
self.times_box.set_state(tk.NORMAL)
self.t_min_box.config(state=tk.DISABLED)
self.t_min_units.config(state=tk.DISABLED)
self.t_max_box.config(state=tk.DISABLED)
self.t_max_units.config(state=tk.DISABLED)
class TimePointsFrame(tk.Frame):
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.times_box = tk.Entry(self)
self.times_box.grid(row=0, column=0, sticky='EW')
self.times_tol_label = tk.Label(self, text='s tol:')
self.times_tol_label.grid(row=0, column=1)
self.times_tol_box = tk.Entry(self, width=3)
self.times_tol_box.grid(row=0, column=2, sticky='EW')
self.times_tol_units_label = tk.Label(self, text='s')
self.times_tol_units_label.grid(row=0, column=3)
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(2, weight=1)
def set_state(self, state):
self.times_box.config(state=state)
self.times_tol_label.config(state=state)
self.times_tol_box.config(state=state)
self.times_tol_units_label.config(state=state)
class MethodFrame(tk.Frame):
"""Frame to select averaging/uncertainty methods.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create label for method:
self.method_label = tk.Label(self, text="averaging method:")
self.method_label.grid(row=0, column=0, sticky='E')
# Create option menu for methods:
self.method_var = tk.StringVar(self)
self.method_var.set(METHOD_OPTIONS[0])
self.method_menu = tk.OptionMenu(self, self.method_var, *METHOD_OPTIONS, command=self.update_method)
self.method_menu.grid(row=0, column=1, sticky='W')
# Create label for error method:
self.error_method_label = tk.Label(self, text="uncertainty method:")
self.error_method_label.grid(row=1, column=0, sticky='E')
# Create option menu for error methods:
self.error_method_var = tk.StringVar(self)
self.error_method_var.set(ERROR_METHOD_OPTIONS[0])
self.error_method_menu = tk.OptionMenu(self, self.error_method_var, *ERROR_METHOD_OPTIONS)
self.error_method_menu.grid(row=1, column=1, sticky='W')
# Create checkbox for weighted averaging:
self.weighted_state = tk.IntVar(self)
self.weighted_button = tk.Checkbutton(self, text="weighted", variable=self.weighted_state)
self.weighted_button.select()
self.weighted_button.grid(row=0, column=2, sticky='W')
def update_method(self, new_meth):
"""Update averaging method.
For 'all points', the ability to specify weighting and uncertainty
method is disabled.
"""
if new_meth == 'all points':
self.error_method_menu.config(state=tk.DISABLED)
self.error_method_label.config(state=tk.DISABLED)
self.weighted_button.config(state=tk.DISABLED)
else:
self.error_method_menu.config(state=tk.NORMAL)
self.error_method_label.config(state=tk.NORMAL)
self.weighted_button.config(state=tk.NORMAL)
class UncertaintyAdjustFrame(tk.Frame):
"""Frame to hold controls to adjust uncertainties.
"""
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.fudge_state = tk.IntVar(self)
self.fudge_button = tk.Checkbutton(
self,
text="adjust uncertainty",
command=self.set_state,
variable=self.fudge_state
)
self.fudge_button.grid(row=0, column=0)
self.fudge_method_var = tk.StringVar(self)
self.fudge_method_var.set(FUDGE_METHOD_OPTIONS[0])
self.fudge_method_menu = tk.OptionMenu(self, self.fudge_method_var, *FUDGE_METHOD_OPTIONS)
self.fudge_method_menu.grid(row=0, column=1)
self.fudge_value_label = tk.Label(self, text='value:')
self.fudge_value_label.grid(row=0, column=2)
self.fudge_value_box = tk.Entry(self, width=3)
self.fudge_value_box.grid(row=0, column=3)
self.fudge_type_var = tk.StringVar(self)
self.fudge_type_var.set(FUDGE_TYPE_OPTIONS[0])
self.fudge_type_menu = tk.OptionMenu(self, self.fudge_type_var, *FUDGE_TYPE_OPTIONS)
self.fudge_type_menu.grid(row=0, column=4)
self.set_state()
def set_state(self):
if self.fudge_state.get():
self.fudge_method_menu.config(state=tk.NORMAL)
self.fudge_value_label.config(state=tk.NORMAL)
self.fudge_value_box.config(state=tk.NORMAL)
self.fudge_type_menu.config(state=tk.NORMAL)
else:
self.fudge_method_menu.config(state=tk.DISABLED)
self.fudge_value_label.config(state=tk.DISABLED)
self.fudge_value_box.config(state=tk.DISABLED)
self.fudge_type_menu.config(state=tk.DISABLED)
class AveragingFrame(tk.Frame):
"""Frame to hold the components specifying how averaging is performed.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create main label for frame:
self.frame_label = tk.Label(self, text="Time Points/Averaging", font=tkFont.Font(weight=tkFont.BOLD))
self.frame_label.grid(row=0, sticky='W')
# Create frame to hold time window selection:
self.time_window_frame = TimeWindowFrame(self)
self.time_window_frame.grid(row=1, sticky='EW')
# Create frame to hold averaging selection:
self.method_frame = MethodFrame(self)
self.method_frame.grid(row=2, sticky='W')
# Create frame to hold fudge selection:
self.fudge_frame = UncertaintyAdjustFrame(self)
self.fudge_frame.grid(row=3, sticky='EW')
# Allow elements to resize:
self.grid_columnconfigure(0, weight=1)
# Set default conditions:
self.time_window_frame.window_button.invoke()
class OutlierFrame(tk.Frame):
"""Frame to control how outlier rejection is performed.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create main label for frame:
self.frame_label = tk.Label(
self,
text="Outlier Rejection",
font=tkFont.Font(weight=tkFont.BOLD)
)
self.frame_label.grid(row=0, sticky='W')
# Create checkbuttons to select types:
self.extreme_state = tk.IntVar(self)
self.extreme_button = tk.Checkbutton(self,
text="extreme change",
variable=self.extreme_state,
command=self.update_extreme)
self.extreme_button.grid(row=1, column=0, sticky='W')
self.outlier_state = tk.IntVar(self)
self.outlier_button = tk.Checkbutton(self,
text="distance from fit",
variable=self.outlier_state,
command=self.update_outlier)
self.outlier_button.grid(row=2, column=0, sticky='W')
# Create boxes and labels to specify thresholds:
self.extreme_thresh_label = tk.Label(self, text="threshold:")
self.extreme_thresh_label.grid(row=1, column=1)
self.extreme_thresh_box = tk.Entry(self, width=6)
self.extreme_thresh_box.insert(tk.END, '9')
self.extreme_thresh_box.grid(row=1, column=2, sticky='EW')
self.extreme_thresh_unit_label = tk.Label(self, text=u"\u03C3")
self.extreme_thresh_unit_label.grid(row=1, column=3, sticky='W')
self.outlier_thresh_label = tk.Label(self, text="threshold:")
self.outlier_thresh_label.grid(row=2, column=1)
self.outlier_thresh_box = tk.Entry(self, width=6)
self.outlier_thresh_box.insert(tk.END, '3')
self.outlier_thresh_box.grid(row=2, column=2, sticky='EW')
self.outlier_thresh_unit_label = tk.Label(self, text=u"\u03C3")
self.outlier_thresh_unit_label.grid(row=2, column=3, sticky='W')
# Create label, entry to remove specific points:
self.specific_frame = tk.Frame(self)
self.specific_label = tk.Label(self.specific_frame, text="remove points:")
self.specific_label.grid(row=0, column=0, sticky='E')
self.specific_box = tk.Entry(self.specific_frame)
self.specific_box.grid(row=0, column=1, sticky='EW')
# Create checkbutton to show/remove indices:
self.show_idx_state = tk.IntVar(self.specific_frame)
self.show_idx_button = tk.Checkbutton(
self.specific_frame,
text="plot indices",
variable=self.show_idx_state,
command=self.update_show_idx
)
self.show_idx_button.grid(row=0, column=2, sticky='W')
self.specific_frame.grid_columnconfigure(1, weight=1)
self.specific_frame.grid(row=3, column=0, columnspan=4, sticky='EW')
self.grid_columnconfigure(3, weight=1)
self.idx_plotted = None
self.update_extreme()
self.update_outlier()
def update_extreme(self):
"""Update the state of extreme change rejection.
"""
if self.extreme_state.get():
self.extreme_thresh_label.config(state=tk.NORMAL)
self.extreme_thresh_box.config(state=tk.NORMAL)
self.extreme_thresh_unit_label.config(state=tk.NORMAL)
else:
self.extreme_thresh_label.config(state=tk.DISABLED)
self.extreme_thresh_box.config(state=tk.DISABLED)
self.extreme_thresh_unit_label.config(state=tk.DISABLED)
def update_outlier(self):
"""Update the state of outlier rejection.
"""
if self.outlier_state.get():
self.outlier_thresh_label.config(state=tk.NORMAL)
self.outlier_thresh_box.config(state=tk.NORMAL)
self.outlier_thresh_unit_label.config(state=tk.NORMAL)
else:
self.outlier_thresh_label.config(state=tk.DISABLED)
self.outlier_thresh_box.config(state=tk.DISABLED)
self.outlier_thresh_unit_label.config(state=tk.DISABLED)
def update_show_idx(self):
"""Update whether or not indices are shown.
"""
# Remove previous labels, if present.
if self.idx_plotted is not None:
for l in self.idx_plotted:
try:
l.remove()
except ValueError:
pass
self.idx_plotted = None
if self.show_idx_state.get():
# Only do anything if the data have been loaded.
if self.master.master.master.combined_p is not None and self.master.master.master.combined_p.X is not None:
self.idx_plotted = [
self.master.master.master.plot_frame.a_val.text(x, y, str(i))
for i, x, y in zip(
range(0, len(self.master.master.master.combined_p.y)),
self.master.master.master.combined_p.X[:, 0],
self.master.master.master.combined_p.y
)
]
self.master.master.master.plot_frame.canvas.draw()
class KernelTypeFrame(tk.Frame):
"""Frame to handle specification of which kernel to use.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create label for type:
self.k_label = tk.Label(self, text="kernel type:")
self.k_label.grid(row=0, column=0, sticky='E')
# Create option menu for type:
self.k_var = tk.StringVar(self)
self.k_var.set(HYPERPARAMETERS.keys()[0])
self.k_menu = tk.OptionMenu(
self,
self.k_var,
*HYPERPARAMETERS.keys(),
command=self.master.update_kernel
)
self.k_menu.grid(row=0, column=1, sticky='W')
# Create check button to select only core data:
self.core_only_state = tk.IntVar(self)
self.core_only_button = tk.Checkbutton(
self,
text="core only",
variable=self.core_only_state
)
self.core_only_button.grid(row=0, column=2, sticky='W')
class KernelBoundsFrame(tk.Frame):
"""Frame to handle specification of bounds on the hyperparameters.
Parameters
----------
hyperparameters : list
List of the hyperparameters to make boxes for.
All other parameters/kwargs are passed to :py:class:`tk.Frame`.
"""
def __init__(self, hyperparameters, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
self.hyperparameters = hyperparameters
self.hyperprior_frames = []
for k, hp in zip(range(0, len(self.hyperparameters)), self.hyperparameters):
self.hyperprior_frames.append(
HyperpriorFrame(
hp,
"%s, %s:" % (self.hyperparameters[hp], hp),
self
)
)
self.hyperprior_frames[-1].grid(row=k, column=0, sticky='EW')
self.grid_columnconfigure(0, weight=1)
class HyperpriorFrame(tk.Frame):
"""Frame to handle the selection of the hyperprior for a given hyperparameter.
Parameters
----------
name : str
The name of the hyperparameter this applies to.
"""
def __init__(self, name, long_name, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.name = name
self.row_label = tk.Label(self, text=long_name)
self.row_label.grid(row=0, column=0, sticky='E')
self.hp_type_var = tk.StringVar(self)
self.hp_type_var.set(HYPERPRIOR_DEFAULTS[name][0])
self.hp_type = self.hp_type_var.get()
self.hp_type_menu = tk.OptionMenu(
self,
self.hp_type_var,
*HYPERPRIORS.keys(),
command=self.update_hp_type
)
self.hp_type_menu.grid(row=0, column=1, sticky='W')
self.hyperhyperparameter_frame = HyperhyperparameterFrame(
HYPERPRIORS[self.hp_type],
HYPERPRIOR_DEFAULTS[name][1],
self
)
self.hyperhyperparameter_frame.grid(row=0, column=2, sticky='EW')
self.grid_columnconfigure(2, weight=1)
def update_hp_type(self, hp_type):
if hp_type != self.hp_type:
self.hyperhyperparameter_frame.destroy()
self.hyperhyperparameter_frame = HyperhyperparameterFrame(
HYPERPRIORS[hp_type],
HYPERPRIOR_DEFAULTS[self.name][1],
self
)
self.hyperhyperparameter_frame.grid(row=0, column=2, sticky='EW')
self.hp_type = hp_type
def get_hyperprior(self):
try:
return HYPERPRIOR_MAP[self.hp_type](
[float(self.hyperhyperparameter_frame.boxes[0].get())],
[float(self.hyperhyperparameter_frame.boxes[1].get())]
)
except ValueError:
self.master.master.master.status_frame.add_line(
"Invalid hyperprior for %s!" % (self.name,)
)
return None
except KeyError:
raise ValueError("Unsupported hyperprior type!")
class HyperhyperparameterFrame(tk.Frame):
"""Frame to handle setting of the hyperhyperparameters of a given hyperprior.
Parameters
----------
names : list of str
The names of the hyperhyperparameters, in order.
"""
def __init__(self, names, defaults, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.labels = []
self.boxes = []
for k, name, d in zip(range(0, len(names)), names, defaults):
self.labels.append(tk.Label(self, text=name))
self.labels[-1].grid(row=0, column=2 * k, sticky='E')
self.boxes.append(tk.Entry(self, width=3))
self.boxes[-1].insert(0, str(d))
self.boxes[-1].grid(row=0, column=2 * k + 1, sticky='EW')
self.grid_columnconfigure(2 * k + 1, weight=1)
class ConstraintsFrame(tk.Frame):
"""Frame to handle selection of which constraints are applied where.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Checkbox for core constraint:
self.core_state = tk.IntVar(self)
self.core_button = tk.Checkbutton(
self,
text="core constraint",
variable=self.core_state,
command=self.update_core
)
self.core_button.grid(row=0, column=0, sticky='W')
# Checkbox for edge constraint:
self.edge_state = tk.IntVar(self)
self.edge_button = tk.Checkbutton(
self,
text="edge constraint",
variable=self.edge_state,
command=self.update_edge
)
self.edge_button.grid(row=1, column=0, sticky='W')
# Label for core location:
self.core_label = tk.Label(self, text="location:")
self.core_label.grid(row=0, column=1, sticky='E')
# Label for edge locations:
self.edge_label = tk.Label(self, text="locations:")
self.edge_label.grid(row=1, column=1, sticky='E')
# Box for core location:
self.core_loc = tk.Entry(self)
self.core_loc.grid(row=0, column=2, sticky='EW')
# Box for edge location:
self.edge_loc = tk.Entry(self)
self.edge_loc.grid(row=1, column=2, sticky='EW')
# Allow boxes to expand:
self.grid_columnconfigure(2, weight=1)
def update_core(self):
"""Update the core constraint. Enable location box if constraint is on.
"""
if self.core_state.get():
self.core_label.config(state=tk.NORMAL)
self.core_loc.config(state=tk.NORMAL)
else:
self.core_label.config(state=tk.DISABLED)
self.core_loc.config(state=tk.DISABLED)
def update_edge(self):
"""Update the edge constraint. Enable location box if constraint is on.
"""
if self.edge_state.get():
self.edge_label.config(state=tk.NORMAL)
self.edge_loc.config(state=tk.NORMAL)
else:
self.edge_label.config(state=tk.DISABLED)
self.edge_loc.config(state=tk.DISABLED)
class KernelFrame(tk.Frame):
"""Frame to hold components used to specify the covariance kernel.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create main label for frame:
self.frame_label = tk.Label(self, text="Covariance Kernel", font=tkFont.Font(weight=tkFont.BOLD))
self.frame_label.grid(row=0, sticky='W')
# Create frame to hold kernel type:
self.kernel_type_frame = KernelTypeFrame(self)
self.kernel_type_frame.grid(row=1, sticky='W')
self.k = self.kernel_type_frame.k_var.get()
# Create frame to hold hyperparameter bounds:
self.bounds_label = tk.Label(self, text="hyperparameter priors:")
self.bounds_label.grid(row=2, sticky='W')
self.bounds_frame = KernelBoundsFrame(
HYPERPARAMETERS[self.kernel_type_frame.k_var.get()],
self
)
self.bounds_frame.grid(row=3, sticky='EW')
# Create frame to hold constraint checkboxes:
self.constraints_frame = ConstraintsFrame(self)
self.constraints_frame.grid(row=4, sticky='EW')
# Allow boxes to expand:
self.grid_columnconfigure(0, weight=1)
# Initial settings:
self.constraints_frame.core_button.invoke()
self.constraints_frame.edge_button.invoke()
def update_kernel(self, k):
"""Update the covariance kernel, redraw the bounds selection.
"""
# Only update if necessary:
if k != self.k:
self.bounds_frame.destroy()
self.bounds_frame = KernelBoundsFrame(HYPERPARAMETERS[k], self)
self.bounds_frame.grid(row=3, sticky='EW')
self.k = k
class FittingMethodFrame(tk.Frame):
"""Frame to handle selection between MAP and MCMC.
"""
USE_MAP = 1
USE_MCMC = 2
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create radio buttons to select MAP versus MCMC:
# When state changes, enable/disable MCMC settings.
self.method_state = tk.IntVar(self)
self.MAP_button = tk.Radiobutton(
self,
text="MAP",
variable=self.method_state,
value=self.USE_MAP,
command=self.master.update_method
)
self.MCMC_button = tk.Radiobutton(
self,
text="MCMC",
variable=self.method_state,
value=self.USE_MCMC,
command=self.master.update_method
)
self.MAP_button.grid(row=0, column=0)
self.MCMC_button.grid(row=0, column=3)
# Create label and box to set number of random starts:
self.starts_label = tk.Label(self, text="random starts:")
self.starts_label.grid(row=0, column=1, sticky='E')
self.starts_box = tk.Entry(self, width=6)
self.starts_box.grid(row=0, column=2, sticky='EW')
class MCMCFrame(tk.Frame):
"""Frame to hold selection of MCMC parameters.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
self.walker_label = tk.Label(self, text="walkers:")
self.walker_label.grid(row=0, column=0, sticky='E')
self.walker_box = tk.Entry(self, width=3)
self.walker_box.insert(tk.END, '200')
self.walker_box.grid(row=0, column=1, sticky='EW')
self.sample_label = tk.Label(self, text="samples:")
self.sample_label.grid(row=0, column=2, sticky='E')
self.sample_box = tk.Entry(self, width=3)
self.sample_box.insert(tk.END, '200')
self.sample_box.grid(row=0, column=3, sticky='EW')
self.burn_label = tk.Label(self, text="burn:")
self.burn_label.grid(row=0, column=4, sticky='E')
self.burn_box = tk.Entry(self, width=3)
self.burn_box.insert(tk.END, '100')
self.burn_box.grid(row=0, column=5, sticky='EW')
self.keep_label = tk.Label(self, text="keep:")
self.keep_label.grid(row=0, column=6, sticky='E')
self.keep_box = tk.Entry(self, width=3)
self.keep_box.insert(tk.END, '200')
self.keep_box.grid(row=0, column=7, sticky='EW')
self.a_label = tk.Label(self, text="a:")
self.a_label.grid(row=0, column=8, sticky='E')
self.a_box = tk.Entry(self, width=3)
self.a_box.insert(tk.END, '2')
self.a_box.grid(row=0, column=9, sticky='EW')
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(3, weight=1)
self.grid_columnconfigure(5, weight=1)
self.grid_columnconfigure(7, weight=1)
self.grid_columnconfigure(9, weight=1)
class MCMCConstraintFrame(tk.Frame):
"""Frame to hold selection of properties of full Monte Carlo sampling.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create button to select full Monte Carlo:
# Enable additional options when selected.
self.full_MC_state = tk.IntVar(self)
self.full_MC_button = tk.Checkbutton(
self,
text="use full Monte Carlo",
variable=self.full_MC_state,
command=self.update_full_MC
)
self.full_MC_button.grid(row=0, column=0, sticky='W')
# Create label and box to specify the number of samples used when using
# MAP estimation:
self.samples_label = tk.Label(self, text="samples:")
self.samples_label.grid(row=0, column=1, sticky='E')
self.samples_box = tk.Entry(self, width=4)
self.samples_box.insert(0, 500)
self.samples_box.grid(row=0, column=2, sticky='EW')
# Create button to select positivity constraint:
self.pos_state = tk.IntVar(self)
self.pos_button = tk.Checkbutton(self,
text="reject negative samples",
variable=self.pos_state)
self.pos_button.grid(row=1, column=0, sticky='W')
# Create button to select monotonicity constraint:
self.mono_state = tk.IntVar(self)
self.mono_button = tk.Checkbutton(self,
text="reject non-monotonic samples",
variable=self.mono_state)
self.mono_button.grid(row=2, column=0, sticky='W')
self.grid_columnconfigure(2, weight=1)
def update_full_MC(self):
"""Update state of full Monte Carlo.
"""
if self.full_MC_state.get():
self.pos_button.config(state=tk.NORMAL)
if self.master.master.master.eval_frame.a_L_state.get():
self.mono_button.config(state=tk.NORMAL)
if self.master.method_frame.method_state.get() == self.master.method_frame.USE_MAP:
self.samples_label.config(state=tk.NORMAL)
self.samples_box.config(state=tk.NORMAL)
else:
self.samples_label.config(state=tk.DISABLED)
self.samples_box.config(state=tk.DISABLED)
else:
self.samples_label.config(state=tk.DISABLED)
self.samples_box.config(state=tk.DISABLED)
self.pos_button.config(state=tk.DISABLED)
self.mono_button.config(state=tk.DISABLED)
class FittingFrame(tk.Frame):
"""Frame to hold the components controlling the fit.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create main label for frame:
self.frame_label = tk.Label(self, text="Fitting Method", font=tkFont.Font(weight=tkFont.BOLD))
self.frame_label.grid(row=0, sticky='W')
# Create frame to hold fitting method selection:
self.method_frame = FittingMethodFrame(self)
self.method_frame.grid(row=1, sticky='W')
# Create frame to hold MCMC parameters:
self.MCMC_frame = MCMCFrame(self)
self.MCMC_frame.grid(row=2, sticky='EW')
# Create frame to hold MCMC constraint options:
self.MCMC_constraint_frame = MCMCConstraintFrame(self)
self.MCMC_constraint_frame.grid(row=3, sticky='EW')
self.grid_columnconfigure(0, weight=1)
self.method_frame.MAP_button.invoke()
self.MCMC_constraint_frame.update_full_MC()
def update_method(self):
"""Update the fitting method used, enabling/disabling the MCMC options.
"""
self.MCMC_constraint_frame.update_full_MC()
if self.method_frame.method_state.get() == self.method_frame.USE_MAP:
self.method_frame.starts_label.config(state=tk.NORMAL)
self.method_frame.starts_box.config(state=tk.NORMAL)
for w in self.MCMC_frame.winfo_children():
w.config(state=tk.DISABLED)
else:
self.method_frame.starts_label.config(state=tk.DISABLED)
self.method_frame.starts_box.config(state=tk.DISABLED)
for w in self.MCMC_frame.winfo_children():
w.config(state=tk.NORMAL)
class EvaluationFrame(tk.Frame):
"""Frame to control where/what is evaluated.
"""
UNIFORM_GRID = 1
POINTS = 2
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create main label for frame:
self.frame_label = tk.Label(
self,
text="Evaluation",
font=tkFont.Font(weight=tkFont.BOLD)
)
self.frame_label.grid(row=0, sticky='W')
# Create radio buttons for uniform grid versus specific points:
self.method_state = tk.IntVar(self)
self.uniform_button = tk.Radiobutton(
self,
text="uniform grid:",
variable=self.method_state,
value=self.UNIFORM_GRID,
command=self.update_method
)
self.points_button = tk.Radiobutton(
self,
text="specific points:",
variable=self.method_state,
value=self.POINTS,
command=self.update_method
)
self.uniform_button.grid(row=1, column=0, sticky='W')
self.points_button.grid(row=2, column=0, sticky='W')
# Create labels and boxes for setting parameters:
self.npts_box = tk.Entry(self, width=4)
self.npts_box.insert(0, '400')
self.npts_box.grid(row=1, column=1, sticky='EW')
self.npts_label = tk.Label(self, text="points from")
self.npts_label.grid(row=1, column=2)
self.x_min_box = tk.Entry(self, width=4)
self.x_min_box.insert(0, '0.0')
self.x_min_box.grid(row=1, column=3, sticky='EW')
self.to_label = tk.Label(self, text="to")
self.to_label.grid(row=1, column=4)
self.x_max_box = tk.Entry(self, width=4)
self.x_max_box.insert(0, '1.2')
self.x_max_box.grid(row=1, column=5, sticky='EW')
self.x_points_box = tk.Entry(self)
self.x_points_box.grid(row=2, column=1, columnspan=5, sticky='EW')
# Create frame to hold options of things to evaluate:
self.eval_option_frame = tk.Frame(self)
# Create label for compute options:
self.compute_label = tk.Label(self.eval_option_frame, text="compute:")
self.compute_label.grid(row=0, column=0, sticky='E')
# Create checkbox to select whether or not a/L is computed:
self.a_L_state = tk.IntVar(self)
self.a_L_button = tk.Checkbutton(
self.eval_option_frame,
text="a/L",
variable=self.a_L_state,
command=self.update_a_L
)
self.a_L_button.grid(row=0, column=1, sticky='W')
# Create checkbox to select whether or not volume average is computed:
self.vol_avg_state = tk.IntVar(self)
self.vol_avg_button = tk.Checkbutton(
self.eval_option_frame,
text="volume average",
variable=self.vol_avg_state
)
self.vol_avg_button.grid(row=0, column=2, sticky='W')
# Create checkbox to select whether or not peaking is computed:
self.peaking_state = tk.IntVar(self)
self.peaking_button = tk.Checkbutton(
self.eval_option_frame,
text="peaking",
variable=self.peaking_state
)
self.peaking_button.grid(row=0, column=3, sticky='W')
# Create checkbox to select whether or not TCI integrals are computed:
self.TCI_state = tk.IntVar(self)
self.TCI_button = tk.Checkbutton(
self.eval_option_frame,
text="TCI",
variable=self.TCI_state
)
self.TCI_button.grid(row=0, column=4, sticky='W')
self.eval_option_frame.grid(row=3, column=0, sticky='W', columnspan=6)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(3, weight=1)
self.grid_columnconfigure(5, weight=1)
self.uniform_button.invoke()
self.a_L_button.select()
def update_method(self):
"""Update the method between being on a uniform grid versus specific points.
"""
if self.method_state.get() == self.UNIFORM_GRID:
self.npts_box.config(state=tk.NORMAL)
self.npts_label.config(state=tk.NORMAL)
self.x_min_box.config(state=tk.NORMAL)
self.to_label.config(state=tk.NORMAL)
self.x_max_box.config(state=tk.NORMAL)
self.x_points_box.config(state=tk.DISABLED)
else:
self.npts_box.config(state=tk.DISABLED)
self.npts_label.config(state=tk.DISABLED)
self.x_min_box.config(state=tk.DISABLED)
self.to_label.config(state=tk.DISABLED)
self.x_max_box.config(state=tk.DISABLED)
self.x_points_box.config(state=tk.NORMAL)
def update_a_L(self):
"""Update the options available and plot shown based on whether or not a/L is computed.
"""
if not self.a_L_state.get():
self.master.master.master.plot_frame.a_grad.set_visible(False)
self.master.master.master.plot_frame.a_a_L.set_visible(False)
self.master.master.master.plot_frame.a_val.change_geometry(1, 1, 1)
else:
self.master.master.master.plot_frame.a_val.change_geometry(3, 1, 1)
self.master.master.master.plot_frame.a_grad.set_visible(True)
self.master.master.master.plot_frame.a_a_L.set_visible(True)
self.master.master.master.plot_frame.canvas.draw()
if self.a_L_state.get() and self.master.master.fitting_frame.MCMC_constraint_frame.full_MC_state.get():
self.master.master.fitting_frame.MCMC_constraint_frame.mono_button.config(state=tk.NORMAL)
else:
self.master.master.fitting_frame.MCMC_constraint_frame.mono_button.config(state=tk.DISABLED)
class StatusBox(tk.Frame):
"""Frame to hold a box that conveys useful status information.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
self.history_box = tk.Text(
self,
width=30,
height=6,
wrap='word'
)
self.history_box.grid(row=1, sticky='EWNS')
self.add_line(
'This is {progname} version {ver}, launched at {time}. {cores} '
'cores detected.'.format(
progname=PROG_NAME,
ver=__version__,
time=time.strftime(DATE_FORMAT),
cores=multiprocessing.cpu_count()
)
)
self.history_scroll = tk.Scrollbar(self)
self.history_scroll.grid(row=1, column=1, sticky='NS')
self.history_scroll.config(command=self.history_box.yview)
self.history_box.config(yscrollcommand=self.history_scroll.set)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(1, weight=1)
self.history_box.bind("<1>", lambda event: self.history_box.focus_set())
def add_line(self, s):
"""Add a line to the history box and print it to stdout.
"""
self.history_box.config(state=tk.NORMAL)
print(s)
sys.stdout.flush()
self.history_box.insert(tk.END, s + '\n')
self.history_box.config(state=tk.DISABLED)
self.history_box.yview(tk.END)
self.history_box.update()
class ControlBox(tk.Frame):
"""Frame to hold the various buttons to control the program.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Create buttons:
self.top_frame = tk.Frame(self)
self.load_button = tk.Button(self.top_frame, text="load data", command=self.master.master.load_data)
self.load_button.grid(row=0, column=0)
self.avg_button = tk.Button(self.top_frame, text="plot data", command=self.master.master.average_data)
self.avg_button.grid(row=0, column=1)
self.fit_button = tk.Button(self.top_frame, text="fit data", command=self.master.master.fit_data)
self.fit_button.grid(row=0, column=2)
self.save_button = tk.Button(self.top_frame, text="save fit", command=self.master.master.save_fit)
self.save_button.grid(row=0, column=3)
self.top_frame.grid(row=0, column=0, sticky='EW')
self.bottom_frame = tk.Frame(self)
self.save_state_button = tk.Button(self.bottom_frame, text="save state", command=self.master.master.save_state)
self.save_state_button.grid(row=0, column=0)
self.load_state_button = tk.Button(self.bottom_frame, text="load state", command=self.master.master.load_state)
self.load_state_button.grid(row=0, column=1)
self.exit_button = tk.Button(self.bottom_frame, text="exit", command=self.master.master.exit)
self.exit_button.grid(row=0, column=2)
self.bottom_frame.grid(row=1, column=0, sticky='EW')
self.grid_columnconfigure(0, weight=1)
class PlotFrame(tk.Frame):
"""Frame to hold the plots.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
self.f = Figure()
self.suptitle = self.f.suptitle('')
self.a_val = self.f.add_subplot(3, 1, 1)
self.a_grad = self.f.add_subplot(3, 1, 2, sharex=self.a_val)
self.a_a_L = self.f.add_subplot(3, 1, 3, sharex=self.a_val)
self.canvas = FigureCanvasTkAgg(self.f, master=self)
self.canvas.show()
self.canvas.get_tk_widget().grid(row=0, column=0, sticky='NESW')
# Need to put the toolbar in its own frame, since it automatically calls
# pack on itself, but I am using grid.
self.toolbar_frame = tk.Frame(self)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.toolbar_frame)
self.toolbar.update()
self.toolbar_frame.grid(row=1, column=0, sticky='EW')
self.canvas.mpl_connect('button_press_event', lambda event: self.canvas._tkcanvas.focus_set())
self.canvas.mpl_connect('key_press_event', self.on_key_event)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
def on_key_event(self, evt):
"""Tie keys to the toolbar.
"""
key_press_handler(evt, self.canvas, self.toolbar)
class PlotParamFrame(tk.Frame):
"""Frame to let the user set parameters of the plotting.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
self.limits_label = tk.Label(self, text="plot limits:")
self.limits_label.grid(row=0, column=0, sticky='W', columnspan=4)
self.x_lim_label = tk.Label(self, text="x:")
self.x_lim_label.grid(row=1, column=0, sticky='E')
self.x_lb_box = tk.Entry(self)
self.x_lb_box.grid(row=1, column=1, sticky='EW')
self.x_to_label = tk.Label(self, text="to")
self.x_to_label.grid(row=1, column=2, sticky='EW')
self.x_ub_box = tk.Entry(self)
self.x_ub_box.grid(row=1, column=3, sticky='EW')
self.y_lim_label = tk.Label(self, text="y:")
self.y_lim_label.grid(row=2, column=0, sticky='E')
self.y_lb_box = tk.Entry(self)
self.y_lb_box.grid(row=2, column=1, sticky='EW')
self.y_to_label = tk.Label(self, text="to")
self.y_to_label.grid(row=2, column=2, sticky='EW')
self.y_ub_box = tk.Entry(self)
self.y_ub_box.grid(row=2, column=3, sticky='EW')
self.dy_lim_label = tk.Label(self, text="dy/dx:")
self.dy_lim_label.grid(row=3, column=0, sticky='E')
self.dy_lb_box = tk.Entry(self)
self.dy_lb_box.grid(row=3, column=1, sticky='EW')
self.dy_to_label = tk.Label(self, text="to")
self.dy_to_label.grid(row=3, column=2, sticky='EW')
self.dy_ub_box = tk.Entry(self)
self.dy_ub_box.grid(row=3, column=3, sticky='EW')
self.aLy_lim_label = tk.Label(self, text="a/Ly:")
self.aLy_lim_label.grid(row=4, column=0, sticky='E')
self.aLy_lb_box = tk.Entry(self)
self.aLy_lb_box.grid(row=4, column=1, sticky='EW')
self.aLy_to_label = tk.Label(self, text="to")
self.aLy_to_label.grid(row=4, column=2, sticky='EW')
self.aLy_ub_box = tk.Entry(self)
self.aLy_ub_box.grid(row=4, column=3, sticky='EW')
self.update_button = tk.Button(self, text='apply', command=self.update_limits)
self.update_button.grid(row=5, column=0, columnspan=4, sticky='W')
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(3, weight=1)
def update_limits(self):
"""Apply the plot limits selected.
"""
try:
x_lb = float(self.x_lb_box.get())
except ValueError:
pass
else:
self.master.master.plot_frame.a_val.set_xlim(left=x_lb)
try:
x_ub = float(self.x_ub_box.get())
except ValueError:
pass
else:
self.master.master.plot_frame.a_val.set_xlim(right=x_ub)
try:
y_lb = float(self.y_lb_box.get())
except ValueError:
pass
else:
self.master.master.plot_frame.a_val.set_ylim(bottom=y_lb)
try:
y_ub = float(self.y_ub_box.get())
except ValueError:
pass
else:
self.master.master.plot_frame.a_val.set_ylim(top=y_ub)
try:
dy_lb = float(self.dy_lb_box.get())
except ValueError:
pass
else:
self.master.master.plot_frame.a_grad.set_ylim(bottom=dy_lb)
try:
dy_ub = float(self.dy_ub_box.get())
except ValueError:
pass
else:
self.master.master.plot_frame.a_grad.set_ylim(top=dy_ub)
try:
aLy_lb = float(self.aLy_lb_box.get())
except ValueError:
pass
else:
self.master.master.plot_frame.a_a_L.set_ylim(bottom=aLy_lb)
try:
aLy_ub = float(self.aLy_ub_box.get())
except ValueError:
pass
else:
self.master.master.plot_frame.a_a_L.set_ylim(top=aLy_ub)
self.master.master.plot_frame.canvas.draw()
class ControlFrame(tk.Frame):
"""Frame to hold all of the controls.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Frame.__init__(self, *args, **kwargs)
# Use Notebook to get tabs.
self.note = ttk.Notebook(self)
# self.note.enable_traversal()
self.data_time_frame = tk.Frame(self)
self.fit_eval_frame = tk.Frame(self)
self.data_source_frame = DataSourceFrame(self.data_time_frame, **FRAME_PARAMS)
self.data_source_frame.grid(row=0, sticky='EW')
self.averaging_frame = AveragingFrame(self.data_time_frame, **FRAME_PARAMS)
self.averaging_frame.grid(row=1, sticky='NSEW')
self.data_time_frame.grid_columnconfigure(0, weight=1)
self.data_time_frame.grid_rowconfigure(1, weight=1)
self.kernel_frame = KernelFrame(self, **FRAME_PARAMS)
self.fitting_frame = FittingFrame(self.fit_eval_frame, **FRAME_PARAMS)
self.fitting_frame.grid(row=0, sticky='EW')
self.eval_frame = EvaluationFrame(self.fit_eval_frame, **FRAME_PARAMS)
self.eval_frame.grid(row=1, sticky='EW')
self.outlier_frame = OutlierFrame(self.fit_eval_frame, **FRAME_PARAMS)
self.outlier_frame.grid(row=2, sticky='NSEW')
self.fit_eval_frame.grid_columnconfigure(0, weight=1)
self.fit_eval_frame.grid_rowconfigure(2, weight=1)
self.plot_param_frame = PlotParamFrame(self, **FRAME_PARAMS)
self.note.add(
self.data_time_frame,
text="Data",
# underline=0
)
self.note.add(
self.kernel_frame,
text="Kernel",
# underline=0
)
self.note.add(
self.fit_eval_frame,
text="Fit",
# underline=0
)
self.note.add(
self.plot_param_frame,
text="Plot",
# underline=0
)
self.note.grid(row=0, sticky='EWNS')
self.status_frame = StatusBox(self, **FRAME_PARAMS)
self.status_frame.grid(row=1, sticky='EWNS')
self.control_frame = ControlBox(self)
self.control_frame.grid(row=2, sticky='EW')
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(1, weight=1)
class FitWindow(tk.Tk):
"""Base window class to hold plot and controls.
"""
def __init__(self, *args, **kwargs):
# Need to use old, hackish way since tkinter uses old-style classes:
tk.Tk.__init__(self, *args, **kwargs)
# Workaround for Tkinter hanging on quit:
self.protocol("WM_DELETE_WINDOW", self.exit)
self.wm_title("%s %s" % (PROG_NAME, __version__,))
self.control_frame = ControlFrame(self)
self.control_frame.grid(row=0, column=1, sticky='NESW')
self.plot_frame = PlotFrame(self)
self.plot_frame.grid(row=0, column=0, sticky='NESW')
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.master_p = None
self.p = None
self.combined_p = None
# l, e store the plotted lines, envelopes so they can be removed as
# needed.
self.l = []
self.e = []
self.X = None
self.res = None
self.flagged_plt = None
self.bind("<%s-d>" % (COMMAND_KEY,), self.set_tab)
self.bind("<%s-k>" % (COMMAND_KEY,), self.set_tab)
self.bind("<%s-f>" % (COMMAND_KEY,), self.set_tab)
self.bind("<%s-p>" % (COMMAND_KEY,), self.set_tab)
self.bind("<F1>", self.set_tab)
self.bind("<F2>", self.set_tab)
self.bind("<F3>", self.set_tab)
self.bind("<F4>", self.set_tab)
def set_tab(self, event):
"""Set the tab as indicated by the keysym in the event.
"""
if event.keysym.lower() in ('d', 'f1'):
self.control_frame.note.select(0)
elif event.keysym.lower() in ('k', 'f2'):
self.control_frame.note.select(1)
elif event.keysym.lower() in ('f', 'f3'):
self.control_frame.note.select(2)
elif event.keysym.lower() in ('p', 'f4'):
self.control_frame.note.select(3)
def load_data(self):
"""Load the data from the relevant source.
"""
self.efit_tree = None
if (self.control_frame.data_source_frame.tree_file_frame.source_state.get() ==
self.control_frame.data_source_frame.tree_file_frame.TREE_MODE):
# Fetch data from the tree:
signal = self.control_frame.data_source_frame.signal_coordinate_frame.signal_var.get()
# Put empty lists in each field so that the order/position is
# preserved.
self.master_p = collections.OrderedDict(
zip(SYSTEM_OPTIONS[signal], [None,] * len(SYSTEM_OPTIONS[signal]))
)
try:
shot = int(self.control_frame.data_source_frame.shot_frame.shot_box.get())
self.control_frame.status_frame.add_line(
"Loading data from shot number %d..." % (shot,)
)
# Load the EFIT tree:
EFIT_tree_name = self.control_frame.data_source_frame.EFIT_frame.EFIT_field.get()
if EFIT_tree_name == '':
EFIT_tree_name = 'ANALYSIS'
try:
self.efit_tree = eqtools.CModEFITTree(
shot,
tree=EFIT_tree_name
)
except:
self.control_frame.status_frame.add_line(
"Could not load EFIT data from tree %s! Loading of data "
"from tree failed." % (EFIT_tree_name,)
)
if args.no_interaction:
raise e
return
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid shot number! Loading of data from tree failed."
)
return
# Make list of systems to include:
include = [
b.system for b in self.control_frame.data_source_frame.system_frame.buttons
if b.state_var.get()
]
for system in include:
self.control_frame.status_frame.add_line(
"Loading data from %s..." % (system,)
)
try:
if signal == 'ne':
kwargs = {}
if system == 'TCI':
try:
kwargs['TCI_quad_points'] = int(
self.control_frame.data_source_frame.TCI_frame.TCI_points_box.get()
)
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid value for number of TCI quadrature "
"points! Loading of data from tree failed."
)
return
try:
kwargs['TCI_thin'] = int(
self.control_frame.data_source_frame.TCI_frame.TCI_thin_box.get()
)
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid value for TCI thinning! Loading of "
"data from tree failed."
)
return
try:
kwargs['TCI_ds'] = float(
self.control_frame.data_source_frame.TCI_frame.TCI_ds_box.get()
)
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid value for TCI step size! Loading of "
"data from tree failed."
)
return
self.master_p[system] = profiletools.ne(
shot,
include=[system],
efit_tree=self.efit_tree,
**kwargs
)
elif signal == 'Te':
# Don't remove the ECE edge here, since it still has ALL
# the points left in.
self.master_p[system] = profiletools.Te(
shot,
include=[system],
remove_ECE_edge=False,
efit_tree=self.efit_tree
)
elif signal == 'emiss':
self.master_p[system] = profiletools.emiss(
shot,
include=[system],
efit_tree=self.efit_tree
)
else:
self.control_frame.status_frame.add_line(
"Unsupported signal %s!" % (signal,)
)
return
except MDSplus.TreeException as e:
self.control_frame.status_frame.add_line(
"Could not fetch data from the tree for system %s. "
"Exception was: %s" % (system, e,)
)
else:
# Load data from file:
self.master_p = collections.OrderedDict()
path = self.control_frame.data_source_frame.tree_file_frame.path_entry.get()
root, ext = os.path.splitext(path)
path = os.path.abspath(os.path.expanduser(path))
base = os.path.basename(path)
if not os.path.isfile(path):
self.control_frame.status_frame.add_line(
"File %s does not exist or is not a file! Loading of data "
"from file failed." % (path,)
)
return
time_name = self.control_frame.data_source_frame.variable_name_frame.time_box.get()
space_name = self.control_frame.data_source_frame.variable_name_frame.space_box.get()
data_name = self.control_frame.data_source_frame.variable_name_frame.data_box.get()
if space_name:
if time_name:
X_names = [time_name, space_name]
else:
X_names = [space_name]
else:
X_names = None
if not data_name:
data_name = None
if ext.lower() == '.csv':
metadata_lines = self.control_frame.data_source_frame.variable_name_frame.meta_box.get()
try:
metadata_lines = int(metadata_lines)
except ValueError:
if metadata_lines == '':
metadata_lines = None
else:
self.control_frame.status_frame.add_line(
"Invalid number of metadata lines! Loading of data from "
"file failed."
)
return
self.control_frame.status_frame.add_line("Loading data from CSV file %s..." % (path,))
self.master_p[base] = profiletools.read_plasma_csv(
path,
X_names=X_names,
y_name=data_name,
metadata_lines=metadata_lines
)
else:
if X_names is None or data_name is None:
self.control_frame.status_frame.add_line(
"Must specify variable names when reading from a NetCDF "
"file! Loading of data from file failed."
)
return
self.control_frame.status_frame.add_line(
"Loading data from NetCDF file %s..." % (path,)
)
self.master_p[base] = profiletools.read_plasma_NetCDF(
path,
X_names,
data_name
)
if hasattr(self.master_p[base], 'shot'):
self.control_frame.data_source_frame.shot_frame.shot_box.delete(0, tk.END)
self.control_frame.data_source_frame.shot_frame.shot_box.insert(
0, str(self.master_p[base].shot)
)
self.efit_tree = self.master_p[base].efit_tree
if hasattr(self.master_p[base], 't_min'):
self.control_frame.averaging_frame.time_window_frame.window_button.invoke()
self.control_frame.averaging_frame.time_window_frame.t_min_box.delete(0, tk.END)
self.control_frame.averaging_frame.time_window_frame.t_min_box.insert(
0, str(self.master_p[base].t_min)
)
if hasattr(self.master_p[base], 't_max'):
self.control_frame.averaging_frame.time_window_frame.window_button.invoke()
self.control_frame.averaging_frame.time_window_frame.t_max_box.delete(0, tk.END)
self.control_frame.averaging_frame.time_window_frame.t_max_box.insert(
0, str(self.master_p[base].t_max)
)
if hasattr(self.master_p[base], 'times'):
self.control_frame.averaging_frame.time_window_frame.point_button.invoke()
self.control_frame.averaging_frame.time_window_frame.times_box.times_box.delete(0, tk.END)
self.control_frame.averaging_frame.time_window_frame.times_box.times_box.insert(
0, str(self.master_p[base].times)[1:-1]
)
# Set the coordinate selector
self.control_frame.data_source_frame.signal_coordinate_frame.coordinate_var.set(
self.master_p[base].abscissa
)
self.control_frame.status_frame.add_line("Loading of data complete.")
def average_data(self):
"""Average and plot the data.
"""
# Set of markers to cycle through for the datapoints:
# Make it new every time so the results are predictable upon reloading.
markercycle = itertools.cycle('o^sDH*')
self.control_frame.status_frame.add_line("Averaging and plotting data...")
if not self.master_p:
self.load_data()
# Clear the plots completely:
self.plot_frame.a_val.clear()
self.plot_frame.a_grad.clear()
self.plot_frame.a_a_L.clear()
method = self.control_frame.averaging_frame.method_frame.method_var.get()
weighted = self.control_frame.averaging_frame.method_frame.weighted_state.get()
y_method = self.control_frame.averaging_frame.method_frame.error_method_var.get()
abscissa = self.control_frame.data_source_frame.signal_coordinate_frame.coordinate_var.get()
window_mode = (self.control_frame.averaging_frame.time_window_frame.method_state.get() ==
self.control_frame.averaging_frame.time_window_frame.WINDOW_MODE)
core_only = self.control_frame.kernel_frame.kernel_type_frame.core_only_state.get()
times = []
if window_mode:
try:
t_min = float(self.control_frame.averaging_frame.time_window_frame.t_min_box.get())
except ValueError:
t_min = None
self.control_frame.status_frame.add_line(
"Invalid value for t_min. No lower bound applied."
)
try:
t_max = float(self.control_frame.averaging_frame.time_window_frame.t_max_box.get())
except ValueError:
t_max = None
self.control_frame.status_frame.add_line(
"Invalid value for t_max. No upper bound applied."
)
else:
s_times = re.findall(
LIST_REGEX,
self.control_frame.averaging_frame.time_window_frame.times_box.times_box.get()
)
for t in s_times:
try:
times.append(float(t))
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid value %s in time points, will be ignored." % (t,)
)
if not times:
self.control_frame.status_frame.add_line(
"No valid points in time points. No bounding applied."
)
try:
tol = float(self.control_frame.averaging_frame.time_window_frame.times_box.times_tol_box.get())
except ValueError:
tol = None
self.control_frame.status_frame.add_line(
"No tolerance for time points specified, points used may "
"be arbitrarily far from points requested."
)
# Keep a deepcopy so we don't mutate the master data that have been
# pulled from the tree.
self.p = copy.deepcopy(self.master_p)
for k, p in self.p.iteritems():
# Data that haven't been loaded are stored as an empty list.
if p:
self.control_frame.status_frame.add_line(
"Processing data from %s..." % (k,)
)
# Restore the master tree so they can all share data:
p.efit_tree = self.efit_tree
# Restrict data to desired time points:
if p.X_dim > 1:
if window_mode:
if t_min is not None:
if p.X is not None:
p.remove_points(p.X[:, 0] < t_min)
for pt in p.transformed:
good_idxs = (pt.X[:, :, 0] >= t_min).all(axis=1)
pt.X = pt.X[good_idxs]
pt.err_X = pt.err_X[good_idxs]
pt.y = pt.y[good_idxs]
pt.err_y = pt.err_y[good_idxs]
pt.T = pt.T[good_idxs]
if t_max is not None:
if p.X is not None:
p.remove_points(p.X[:, 0] > t_max)
for pt in p.transformed:
good_idxs = (pt.X[:, :, 0] <= t_max).all(axis=1)
pt.X = pt.X[good_idxs]
pt.err_X = pt.err_X[good_idxs]
pt.y = pt.y[good_idxs]
pt.err_y = pt.err_y[good_idxs]
pt.T = pt.T[good_idxs]
else:
if times:
p.keep_times(times, tol=tol)
# Convert abscissa if needed:
try:
p.convert_abscissa(abscissa)
except Exception as e:
self.control_frame.status_frame.add_line(
"Conversion to coordinate %s from %s failed for system %s."
% (abscissa, p.abscissa, k)
)
print(e)
if len(times) == 1 or method == "all points":
p.drop_axis(0)
else:
p.time_average(
robust=(method == "robust"),
y_method=y_method,
weighted=weighted
)
# Remove the edge ECE points here, after the conversion is
# complete:
if 'GPC' in k or 'ECE' in k or core_only:
p.remove_edge_points()
# Fudge the uncertainty if requested:
if self.control_frame.averaging_frame.fudge_frame.fudge_state.get():
try:
fudge_val = float(self.control_frame.averaging_frame.fudge_frame.fudge_value_box.get())
except ValueError:
fudge_val = -1.0
if fudge_val < 0:
self.control_frame.status_frame.add_line(
"Invalid value for uncertainty adjustment, uncertainties "
"will not be adjusted!"
)
fudge_val = 0.0
if self.control_frame.averaging_frame.fudge_frame.fudge_type_var.get() == 'absolute':
new_err_y = fudge_val * scipy.ones_like(p.err_y)
else:
new_err_y = fudge_val * p.y
fudge_method = self.control_frame.averaging_frame.fudge_frame.fudge_method_var.get()
if fudge_method == 'override':
p.err_y = new_err_y
elif fudge_method == 'minimum':
bad_idx = (p.err_y <= new_err_y)
p.err_y[bad_idx] = new_err_y[bad_idx]
else:
p.err_y = scipy.sqrt(p.err_y**2 + new_err_y**2)
# Plot the data channel-by-channel so it gets color-coded right:
p.plot_data(ax=self.plot_frame.a_val, fmt=markercycle.next())
# Now that the profiles are loaded, stitch them together:
self.control_frame.status_frame.add_line("Combining profiles...")
p_list = []
# Get list of profiles to actually include -- this lets the user elect
# to drop a profile from the fit even if they loaded it.
p_include = [
b.state_var.get()
for b in self.control_frame.data_source_frame.system_frame.buttons
]
for p, i in zip(self.p.values(), p_include):
if i:
p_list.append(p)
if len(p_list) == 0:
self.control_frame.status_frame.add_line("No profiles to combine!")
else:
self.combined_p = copy.deepcopy(p_list.pop(0))
for p_other in p_list:
self.combined_p.add_profile(p_other)
# Remove extreme change points, keeping track of the bad indices.
if self.control_frame.outlier_frame.extreme_state.get():
self.control_frame.status_frame.add_line(
"Removing points that exhibit extreme changes..."
)
try:
self.extreme_flagged = self.combined_p.remove_extreme_changes(
thresh=float(self.control_frame.outlier_frame.extreme_thresh_box.get()),
mask_only=True,
# TEMPORARY TEST!!!
# logic='or'
)
y_bad_c = self.combined_p.y[self.extreme_flagged]
X_bad_c = self.combined_p.X[self.extreme_flagged, :].ravel()
self.control_frame.status_frame.add_line(
"Removed %d points with extreme changes." % (len(y_bad_c),)
)
if len(y_bad_c) > 0:
self.plot_frame.a_val.plot(
X_bad_c, y_bad_c, 'mx', label='extreme change', ms=14
)
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid threshold for extreme change rejection!"
)
self.extreme_flagged = scipy.zeros_like(self.combined_p.y, dtype=bool)
else:
self.extreme_flagged = scipy.zeros_like(self.combined_p.y, dtype=bool)
self.plot_frame.a_val.set_ylabel(
"%s [%s]" % (self.combined_p.y_label, self.combined_p.y_units,)
if self.combined_p.y_units
else self.combined_p.y_label
)
# self.control_frame.plot_param_frame.update_limits()
# Only update the value axis:
try:
x_min = float(self.control_frame.plot_param_frame.x_lb_box.get())
except ValueError:
x_min = self.combined_p.X.min()
try:
x_max = float(self.control_frame.plot_param_frame.x_ub_box.get())
except ValueError:
x_max = self.combined_p.X.max()
self.plot_frame.a_val.set_xlim(left=x_min, right=x_max)
try:
y_min = float(self.control_frame.plot_param_frame.y_lb_box.get())
except ValueError:
y_min = 0
try:
y_max = float(self.control_frame.plot_param_frame.y_ub_box.get())
except ValueError:
y_max = None
self.plot_frame.a_val.set_ylim(bottom=y_min, top=y_max)
self.plot_frame.a_val.legend(loc='best', fontsize=12, ncol=2)
# Produce a descriptive title for the plot:
title = ''
try:
title += "shot %d" % (self.combined_p.shot)
except AttributeError:
pass
try:
title += " t_min %f" % (self.combined_p.t_min)
except AttributeError:
pass
try:
title += " t_max %f" % (self.combined_p.t_max)
except AttributeError:
pass
if hasattr(self.combined_p, 'times'):
times = list(self.combined_p.times)
title += " times %f" % (times.pop())
for t in times:
title += ",%f" % (t,)
self.plot_frame.suptitle.set_text(title)
self.control_frame.outlier_frame.update_show_idx()
# update_show_idx always calls draw, so we don't need to here.
# self.plot_frame.canvas.draw()
self.plot_frame.canvas._tkcanvas.focus_set()
self.outlier_flagged = scipy.zeros_like(self.combined_p.y, dtype=bool)
self.control_frame.status_frame.add_line(
"Averaging and plotting of data complete."
)
def process_bounds(self):
"""Process the hyperparameter bounds.
If a field is blank, take the bound from the GP's hyperprior. If a field
is populated, put that into the GP's hyperprior.
"""
hyperpriors = [
hf.get_hyperprior() for hf in self.control_frame.kernel_frame.bounds_frame.hyperprior_frames
]
# Use a conditional in case there is a kernel with no hyperparameters:
if hyperpriors:
hyperprior = hyperpriors.pop(0)
for hp in hyperpriors:
try:
hyperprior *= hp
except TypeError:
return False
self.combined_p.gp.k.hyperprior = hyperprior
return True
def fit_data(self):
"""Perform the actual fit and evaluation.
"""
if not self.combined_p:
self.average_data()
# Form X grid to evaluate on:
if (self.control_frame.eval_frame.method_state.get() ==
self.control_frame.eval_frame.UNIFORM_GRID):
try:
X_min = float(self.control_frame.eval_frame.x_min_box.get())
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid lower bound for uniform grid!"
)
return
try:
X_max = float(self.control_frame.eval_frame.x_max_box.get())
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid upper bound for uniform grid!"
)
return
try:
npts = int(self.control_frame.eval_frame.npts_box.get())
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid number of points for uniform grid!"
)
return
X = scipy.linspace(X_min, X_max, npts)
else:
X = []
s_points = re.findall(
LIST_REGEX,
self.control_frame.eval_frame.x_points_box.get()
)
for p in s_points:
try:
X.append(float(p))
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid value %s in evaluation points, will be ignored." % (p,)
)
if not X:
self.control_frame.status_frame.add_line(
"No valid points in evaluation points!"
)
return
self.control_frame.status_frame.add_line("Creating Gaussian process...")
res = self.create_gp()
if not res:
self.control_frame.status_frame.add_line("Failed creating Gaussian process!")
return
self.control_frame.status_frame.add_line("Gaussian process created.")
# Process outliers:
remove_outliers = self.control_frame.outlier_frame.outlier_state.get()
if remove_outliers:
self.control_frame.status_frame.add_line("Finding outliers...")
self.control_frame.status_frame.add_line("Finding initial MAP estimate...")
self.find_MAP()
thresh = float(self.control_frame.outlier_frame.outlier_thresh_box.get())
self.outlier_flagged, bad_transformed = self.combined_p.remove_outliers(
thresh=thresh,
check_transformed=True,
mask_only=True
)
X_bad_o = self.combined_p.X[self.outlier_flagged, :].ravel()
err_X_bad_o = self.combined_p.err_X[self.outlier_flagged, :].ravel()
y_bad_o = self.combined_p.y[self.outlier_flagged]
err_y_bad_o = self.combined_p.err_y[self.outlier_flagged]
self.control_frame.status_frame.add_line(
"Found %d candidate outliers." % (len(y_bad_o),)
)
truly_bad_transformed = [pt for pt in bad_transformed if len(pt.y) > 0]
if len(truly_bad_transformed) > 0:
self.control_frame.status_frame.add_line(
"Removed the following %d transformed quantities:" % (len(truly_bad_transformed),)
)
for pt in truly_bad_transformed:
self.control_frame.status_frame.add_line(pt.y_label)
# TODO: Put a test to put transformed quantities back in!
# Perform a second MAP estimation to put back IN the outliers that
# now don't look so outlying:
if len(y_bad_o) > 1:
self.control_frame.status_frame.add_line(
"Finding second MAP estimate..."
)
self.create_gp()
self.find_MAP()
idxs = scipy.where(self.outlier_flagged)[0]
# Handle single points:
mean = self.combined_p.gp.predict(
X_bad_o,
n=0,
noise=False,
return_std=False
)
deltas = scipy.absolute(mean - y_bad_o) / err_y_bad_o
deltas[err_y_bad_o == 0] = 0
bad_idxs = (deltas >= thresh)
good_idxs = ~bad_idxs
self.outlier_flagged[idxs[good_idxs]] = False
self.create_gp()
self.control_frame.status_frame.add_line(
"Removed %d outliers."
% (len(y_bad_o[bad_idxs]),)
)
elif len(y_bad_o) == 1:
bad_idxs = scipy.ones_like(y_bad_o, dtype=bool)
self.control_frame.status_frame.add_line("Removed 1 outlier.")
else:
bad_idxs = scipy.zeros_like(y_bad_o, dtype=bool)
# Plot the points that were removed:
if len(y_bad_o[bad_idxs]) > 0:
self.plot_frame.a_val.plot(
X_bad_o[bad_idxs],
y_bad_o[bad_idxs],
'rx',
label='outlier', ms=14
)
# Do the voodoo:
use_MCMC = (
self.control_frame.fitting_frame.method_frame.method_state.get() ==
self.control_frame.fitting_frame.method_frame.USE_MCMC
)
# Only redo the MAP estimate if there were points removed:
if not use_MCMC and (not remove_outliers or not bad_idxs.all()):
self.control_frame.status_frame.add_line("Finding MAP estimate...")
self.find_MAP()
# Evaluate:
self.control_frame.status_frame.add_line("Evaluating fit...")
full_MC = self.control_frame.fitting_frame.MCMC_constraint_frame.full_MC_state.get()
compute_a_L = self.control_frame.eval_frame.a_L_state.get()
if full_MC:
positivity_constraint = self.control_frame.fitting_frame.MCMC_constraint_frame.pos_state.get()
monotonicity_constraint = (
compute_a_L and
self.control_frame.fitting_frame.MCMC_constraint_frame.mono_state.get()
)
rejection_func = profiletools.RejectionFunc(
X <= self.combined_p.X.max(),
positivity=positivity_constraint,
monotonicity=monotonicity_constraint
)
try:
if not use_MCMC:
num_samples = int(
self.control_frame.fitting_frame.MCMC_constraint_frame.samples_box.get()
)
else:
num_samples = 1
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid number of Monte Carlo samples! Disabling use of "
"full Monte Carlo."
)
full_MC = False
num_samples = 1
else:
num_samples = 1
rejection_func = None
self.sampler = None
if use_MCMC:
self.control_frame.status_frame.add_line(
"Running MCMC sampler..."
)
self.run_MCMC_sampler()
MCMC_results_window = MCMCWindow(self)
MCMC_results_window.grab_set()
self.wait_window(MCMC_results_window)
if self.sampler:
try:
self.sampler.pool.close()
except AttributeError:
pass
try:
burn = int(self.control_frame.fitting_frame.MCMC_frame.burn_box.get())
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid value for burn! Evaluation failed."
)
return
if burn >= self.sampler.chain.shape[1]:
burn = 0
self.control_frame.status_frame.add_line(
"Not enough points, setting burn to 0!"
)
try:
thin = max(
self.sampler.chain.shape[0] *
(self.sampler.chain.shape[1] - burn) //
int(self.control_frame.fitting_frame.MCMC_frame.keep_box.get()),
1
)
print("thin=%d" % (thin,))
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid value for keep! Evaluation failed."
)
return
self.control_frame.status_frame.add_line(
"MCMC sampling complete.\nComputing profile from samples..."
)
else:
self.control_frame.status_frame.add_line(
"MCMC evaluation aborted."
)
return
else:
burn = None
thin = None
try:
compute_vol_avg = self.control_frame.eval_frame.vol_avg_state.get()
compute_peaking = self.control_frame.eval_frame.peaking_state.get()
if compute_vol_avg or compute_peaking:
dum, weights = self.combined_p._make_volume_averaging_matrix(rho_grid=X)
if compute_a_L:
weights = scipy.hstack((weights, scipy.zeros_like(weights)))
output_transform = scipy.vstack((weights, scipy.eye(2 * len(X))))
else:
output_transform = scipy.vstack((weights, scipy.eye(len(X))))
# This will put the volume average as the first element.
if compute_peaking:
if 'psinorm' in self.combined_p.abscissa:
if self.combined_p.abscissa.startswith('sqrt'):
core_loc = scipy.sqrt(0.2)
else:
core_loc = 0.2
else:
times = self.combined_p._get_efit_times_to_average()
core_loc = self.combined_p.efit_tree.psinorm2rho(
self.combined_p.abscissa,
0.2,
times,
each_t=True
)
core_loc = scipy.stats.nanmean(core_loc.ravel())
X = scipy.insert(X, 0, core_loc)
output_transform = scipy.insert(output_transform, 0, 0, axis=1)
core_select = scipy.zeros(output_transform.shape[1])
core_select[0] = 1
output_transform = scipy.insert(output_transform, 1, core_select, axis=0)
# This will put w(psinorm=0.2) as the second element and the
# volume average as the first element.
else:
output_transform = None
if compute_peaking:
special_vals = 2
elif compute_vol_avg:
special_vals = 1
else:
special_vals = 0
if compute_a_L:
res = self.combined_p.compute_a_over_L(
X,
use_MCMC=use_MCMC,
sampler=self.sampler,
return_prediction=True,
full_MC=full_MC,
rejection_func=rejection_func,
num_samples=num_samples,
burn=burn,
thin=thin,
output_transform=output_transform,
special_vals=special_vals,
special_X_vals=int(compute_peaking)
)
# Print summary of fit:
self.control_frame.status_frame.add_line(
"Median relative uncertainty in value: %.2f%%\n"
"Median relative uncertainty in gradient: %.2f%%\n"
"Median relative uncertainty in a/L: %.2f%%" %
(
100 * scipy.median(scipy.absolute(res['std_val'] / res['mean_val'])),
100 * scipy.median(scipy.absolute(res['std_grad'] / res['mean_grad'])),
100 * scipy.median(scipy.absolute(res['std_a_L'] / res['mean_a_L']))
)
)
else:
res = self.combined_p.smooth(
X,
n=0,
use_MCMC=use_MCMC,
sampler=self.sampler,
full_output=True,
full_MC=full_MC,
rejection_func=rejection_func,
num_samples=num_samples,
burn=burn,
thin=thin,
output_transform=output_transform
)
# Repackage the results to match the form returned by MCMC:
res['special_mean'] = res['mean'][:special_vals]
res['special_cov'] = res['cov'][:special_vals, :special_vals]
res['mean_val'] = res.pop('mean')[special_vals:]
res['std_val'] = res.pop('std')[special_vals:]
# Print summary of fit:
self.control_frame.status_frame.add_line(
"Median relative uncertainty in value: %.2f%%" %
(100 * scipy.median(scipy.absolute(res['std_val'] / res['mean_val'])),)
)
if (self.control_frame.data_source_frame.signal_coordinate_frame.signal_var.get() == 'ne' and
self.control_frame.eval_frame.TCI_state.get()):
# TODO: Make this load TCI chords if not present!
# TODO: This breaks with full Monte Carlo!
if 'TCI' not in self.p:
self.control_frame.status_frame.add_line(
"Must have loaded TCI first! TCI integrals will not be computed."
)
else:
p_TCI = self.p['TCI']
self.control_frame.status_frame.add_line(
"Computing TCI line integrals..."
)
for pt in p_TCI.transformed:
self.control_frame.status_frame.add_line(pt.y_label)
res_nl = self.combined_p.smooth(
scipy.vstack(pt.X),
use_MCMC=use_MCMC,
sampler=self.sampler,
full_output=True,
full_MC=full_MC,
rejection_func=rejection_func,
num_samples=num_samples,
burn=burn,
thin=thin,
output_transform=scipy.linalg.block_diag(*pt.T)
)
if pt.y_units:
y_units = pt.y_units.translate(None, '\\${}')
self.control_frame.status_frame.add_line(
u" measured: (%6.4g\u00B1%6.4g) %s\n"
u" fit: (%6.4g\u00B1%6.4g) %s"
% (pt.y[0], pt.err_y[0], y_units,
res_nl['mean'][0], res_nl['std'][0], y_units)
)
else:
self.control_frame.status_frame.add_line(
u" measured: %6.4g\u00B1%6.4g\n"
u" fit: %6.4g\u00B1%6.4g"
% (pt.y[0], pt.err_y[0],
res_nl['mean'][0], res_nl['std'][0])
)
except numpy.linalg.LinAlgError as e:
self.control_frame.status_frame.add_line(
"Evaluation failed! Try re-running and/or adjusting bounds/number "
"of samples. Exception was: %s" % (e,)
)
if args.no_interaction:
raise e
return
# Compute volume average, peaking if requested:
self.mean_peaking = None
self.std_peaking = None
self.mean_vol_avg = None
self.std_vol_avg = None
if compute_vol_avg or compute_peaking:
self.mean_vol_avg = res['special_mean'][0]
self.std_vol_avg = scipy.sqrt(res['special_cov'][0, 0])
if compute_vol_avg:
if self.combined_p.y_units:
self.control_frame.status_frame.add_line(
u"Volume average is (%g\u00b1%g) %s"
% (self.mean_vol_avg, self.std_vol_avg,
self.combined_p.y_units.translate(None, '\\${}'),)
)
else:
self.control_frame.status_frame.add_line(
u"Volume average is %g\u00b1%g"
% (self.mean_vol_avg, self.std_vol_avg,)
)
if compute_peaking:
mean_w2 = res['special_mean'][1]
std_w2 = scipy.sqrt(res['special_cov'][1, 1])
cov_w2_vol_avg = res['special_cov'][0, 1]
self.mean_peaking = mean_w2 / self.mean_vol_avg
self.std_peaking = scipy.sqrt(
std_w2**2 / self.mean_vol_avg**2 +
self.std_vol_avg**2 * mean_w2**2 / self.mean_vol_avg**4 -
2.0 * cov_w2_vol_avg * mean_w2 / self.mean_vol_avg**3
)
self.control_frame.status_frame.add_line(
u"Peaking is %g\u00b1%g"
% (self.mean_peaking, self.std_peaking,)
)
# Delete the extra points so they don't confuse the plot/file output:
X = X[1:]
if full_MC:
self.control_frame.status_frame.add_line(
"Got %d samples that met the constraints."
% (res['samp'].shape[1],)
)
self.res = res
self.X = X
self.plot_fit()
self.control_frame.status_frame.add_line("Fitting complete.")
def plot_fit(self):
# Delete old lines, envelopes:
for line in self.l:
try:
line.remove()
except ValueError:
pass
for env in self.e:
try:
env.remove()
except ValueError:
pass
# Plot the fits:
self.l, self.e = gptools.univariate_envelope_plot(
self.X,
self.res['mean_val'],
self.res['std_val'],
ax=self.plot_frame.a_val,
color='b'
)
if self.control_frame.eval_frame.a_L_state.get():
color = plt.getp(self.l[0], 'color')
core_mask = self.X <= 1
l, e = gptools.univariate_envelope_plot(
self.X,
self.res['mean_grad'],
self.res['std_grad'],
ax=self.plot_frame.a_grad,
color=color
)
self.plot_frame.a_grad.set_ylim(
bottom=(self.res['mean_grad'][core_mask] - 3 * self.res['std_grad'][core_mask]).min(),
top=(self.res['mean_grad'][core_mask] + 3 * self.res['std_grad'][core_mask]).max()
)
self.l.extend(l)
self.e.extend(e)
l, e = gptools.univariate_envelope_plot(
self.X,
self.res['mean_a_L'],
self.res['std_a_L'],
ax=self.plot_frame.a_a_L,
color=color
)
# Avoid bug in MPL v. 1.4.2:
if matplotlib.__version__ != '1.4.2':
self.plot_frame.a_a_L.set_ylim(
bottom=(self.res['mean_a_L'][core_mask] - 3 * self.res['std_a_L'][core_mask]).min(),
top=(self.res['mean_a_L'][core_mask] + 3 * self.res['std_a_L'][core_mask]).max()
)
self.l.extend(l)
self.e.extend(e)
self.plot_frame.a_grad.set_xlabel(self.plot_frame.a_val.get_xlabel())
self.plot_frame.a_a_L.set_xlabel(self.plot_frame.a_val.get_xlabel())
y_units = self.combined_p.y_units
if not y_units:
y_units = '1'
X_units = self.combined_p.X_units[0]
if X_units:
X_units = '/' + X_units
combined_units = y_units + X_units
# Use translate instead of strip in case there are buried $'s. This
# might be ugly with mixed-math y/X-labels, but will be better than
# causing the math to go fubar.
if combined_units != '1':
label = "$d%s/d%s$ [%s]" % (
self.combined_p.y_label.translate(None, '$'),
self.combined_p.X_labels[0].translate(None, '$'),
combined_units
)
else:
label = "$d%s/d%s$" % (
self.combined_p.y_label.translate(None, '$'),
self.combined_p.X_labels[0]
)
self.plot_frame.a_grad.set_ylabel(label)
self.plot_frame.a_a_L.set_ylabel(
"$a/L_{%s}$" % (self.combined_p.y_label.translate(None, '$'),)
)
self.control_frame.plot_param_frame.update_limits()
self.plot_frame.a_val.legend(loc='best', fontsize=12, ncol=2)
self.plot_frame.canvas.draw()
self.plot_frame.canvas._tkcanvas.focus_set()
def create_gp(self):
"""Create the Gaussian process from the combined profile.
"""
# Remove points that were flagged by the user:
s_flagged_idxs = re.findall(
RANGE_LIST_REGEX,
self.control_frame.outlier_frame.specific_box.get()
)
flagged_idxs = set()
for s in s_flagged_idxs:
if ':' in s or '-' in s:
try:
start, stop = re.split('[:-]', s)
start = int(start)
stop = int(stop)
if stop <= start:
raise ValueError("stop <= start")
if start < 0 or stop >= len(self.combined_p.y):
raise ValueError("out of bounds!")
else:
flagged_idxs.update(range(start, stop + 1))
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid range %s, will be ignored." % (s,)
)
else:
try:
i = int(s)
if i >= len(self.combined_p.y) or i < 0:
self.control_frame.status_frame.add_line(
"Value %d out of range, will be ignored." % (i,)
)
else:
flagged_idxs.add(i)
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid index to remove '%s', will be ignored." % (s,)
)
flagged_idxs = list(flagged_idxs)
if self.flagged_plt is not None:
for p in self.flagged_plt:
try:
p.remove()
except ValueError:
pass
if len(flagged_idxs) > 0:
self.flagged_plt = self.plot_frame.a_val.plot(
self.combined_p.X[flagged_idxs, :].ravel(),
self.combined_p.y[flagged_idxs],
'x',
color='orange',
label='flagged',
ms=14
)
mask = scipy.ones_like(self.combined_p.y, dtype=bool)
mask[flagged_idxs] = False
# Mask out outliers and extreme changes:
mask = mask & (~self.extreme_flagged) & (~self.outlier_flagged)
self.combined_p.create_gp(
k=self.control_frame.kernel_frame.kernel_type_frame.k_var.get(),
constrain_slope_on_axis=False,
constrain_at_limiter=False,
mask=mask
)
# Process core constraint:
if self.control_frame.kernel_frame.constraints_frame.core_state.get():
s_core_loc = self.control_frame.kernel_frame.constraints_frame.core_loc.get()
if s_core_loc == '':
self.combined_p.constrain_slope_on_axis()
else:
s_core_loc = re.findall(LIST_REGEX, s_core_loc)
core_locs = []
for loc in s_core_loc:
try:
core_locs.append(float(loc))
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid core constraint location %s in core "
"locations, will be ignored." % (loc,)
)
if len(core_locs) == 0:
self.control_frame.status_frame.add_line(
"No valid core constraint locations, constraint will not "
"be applied!"
)
else:
self.combined_p.gp.add_data(
core_locs,
scipy.zeros_like(core_locs),
n=1
)
# Process edge constraint:
if self.control_frame.kernel_frame.constraints_frame.edge_state.get():
s_edge_locs = self.control_frame.kernel_frame.constraints_frame.edge_loc.get()
if s_edge_locs == '':
self.combined_p.constrain_at_limiter()
else:
s_edge_locs = re.findall(LIST_REGEX, s_edge_locs)
edge_locs = []
for loc in s_edge_locs:
try:
edge_locs.append(float(loc))
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid edge constraint location %s in edge "
"locations, will be ignored." % (loc,)
)
if len(edge_locs) == 0:
self.control_frame.status_frame.add_line(
"No valid edge constraint locations, constraint will not "
"be applied!"
)
else:
self.combined_p.gp.add_data(
edge_locs,
scipy.zeros_like(edge_locs),
err_y=0.01,
n=0
)
self.combined_p.gp.add_data(
edge_locs,
scipy.zeros_like(edge_locs),
err_y=0.1,
n=1
)
# This needs to be called again
if len(self.combined_p.transformed) > 0:
self.combined_p.gp.condense_duplicates()
# Process bounds:
return self.process_bounds()
def find_MAP(self):
"""Find the MAP estimate for the hyperparameters.
"""
try:
res_min, complete = self.combined_p.find_gp_MAP_estimate(
random_starts=int(self.control_frame.fitting_frame.method_frame.starts_box.get()),
verbose=True,
method='SLSQP'
)
except ValueError as e:
self.control_frame.status_frame.add_line(
"MAP estimate failed. Hyperparameters should not be trusted! Try "
"re-running the fit with more random starts. Exception was: '%s'."
% (e,)
)
if args.no_interaction:
raise e
else:
self.control_frame.status_frame.add_line(
"MAP estimate complete. Result is:"
)
for v, l in zip(self.combined_p.gp.free_params, self.combined_p.gp.free_param_names):
self.control_frame.status_frame.add_line("%s\t%.3e" % (l.translate(None, '\\'), v))
if complete < 4:
self.control_frame.status_frame.add_line(
"Less than 4 completed starts were obtained. Try increasing "
"the number of random starts, or adjusting the hyperparameter "
"bounds."
)
if not res_min.success:
self.control_frame.status_frame.add_line(
"Optimizer reports failure, selected hyperparameters "
"are likely NOT optimal. Status: %d, Message: '%s'. "
"Try adjusting bounds, initial guesses or the number "
"of random starts used."
% (res_min.status, res_min.message)
)
bounds = scipy.asarray(self.combined_p.gp.free_param_bounds)
if ((res_min.x <= 1.001 * bounds[:, 0]).any() or
(res_min.x >= 0.999 * bounds[:, 1]).any()):
self.control_frame.status_frame.add_line(
"Optimizer appears to have hit/exceeded the bounds. Try "
"adjusting bounds, initial guesses or the number of random "
"starts used."
)
def run_MCMC_sampler(self):
"""Run the MCMC sampler, save the resulting sampler object internally.
"""
try:
walkers = int(self.control_frame.fitting_frame.MCMC_frame.walker_box.get())
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid number of MCMC walkers! Evaluation failed."
)
self.sampler = None
return
try:
samples = int(self.control_frame.fitting_frame.MCMC_frame.sample_box.get())
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid number of MCMC samples! Evaluation failed."
)
self.sampler = None
return
try:
a = float(self.control_frame.fitting_frame.MCMC_frame.a_box.get())
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid sampler proposal width! Evaluation failed."
)
self.sampler = None
return
self.sampler = self.combined_p.gp.sample_hyperparameter_posterior(
nsamp=samples,
nwalkers=walkers,
sampler=self.sampler,
sampler_a=a
)
def save_fit(self, save_plot=False):
"""Save the fit to an output file.
"""
if self.res is None:
self.fit_data()
if not args.output_filename:
path = tkFileDialog.asksaveasfilename(
filetypes=[
('all files', '*'),
('NetCDF', ('*.nc', '*.cdf', '*.dat')),
('Pickle', '*.pkl'),
('CSV', '*.csv')
]
)
else:
path = args.output_filename
if path:
root, ext = os.path.splitext(path)
if save_plot:
# Produce a descriptive title for the plot:
title = PROG_NAME + ' ' + __version__
try:
title += " shot %d" % (self.combined_p.shot)
except AttributeError:
pass
try:
title += " t_min %f" % (self.combined_p.t_min)
except AttributeError:
pass
try:
title += " t_max %f" % (self.combined_p.t_max)
except AttributeError:
pass
if hasattr(self.combined_p, 'times'):
times = list(self.combined_p.times)
title += " times %f" % (times.pop())
for t in times:
title += ",%f" % (t,)
try:
title += " coordinate %s" % (self.combined_p.abscissa)
except AttributeError:
pass
self.plot_frame.suptitle.set_text(title)
self.plot_frame.f.savefig(
os.path.expanduser(root) + '.pdf',
format='pdf'
)
history = (
"Created by user {user} on {host} with {module} version {ver} on {time}.\n".format(
host=socket.gethostname(),
user=getpass.getuser(),
module=inspect.stack()[0][1],
ver=__version__,
time=time.asctime()
)
)
if ext.lower() == '.csv':
# Write output to CSV file:
self.control_frame.status_frame.add_line(
"Writing results to CSV file %s..." % os.path.basename(path)
)
X_name = (
self.combined_p.X_labels[0] + ' [' + self.combined_p.X_units[0] + ']'
if self.combined_p.X_units[0]
else self.combined_p.X_labels[0]
)
y_name = (
self.combined_p.y_label + ' [' + self.combined_p.y_units + ']'
if self.combined_p.y_units
else self.combined_p.y_label
)
with open(os.path.expanduser(path), 'wb') as outfile:
# Write metadata:
metadata = history
try:
metadata += "shot %d\n" % (self.combined_p.shot)
except AttributeError:
pass
try:
metadata += "t_min %f\n" % (self.combined_p.t_min)
except AttributeError:
pass
try:
metadata += "t_max %f\n" % (self.combined_p.t_max)
except AttributeError:
pass
if hasattr(self.combined_p, 'times'):
times = list(self.combined_p.times)
metadata += "times %f" % (times.pop())
for t in times:
metadata += ",%f" % (t,)
metadata += "\n"
try:
metadata += "coordinate %s\n" % (self.combined_p.abscissa)
except AttributeError:
pass
if self.mean_vol_avg:
metadata += "vol_avg %f\nerr_vol_avg %f\n" % (self.mean_vol_avg, self.std_vol_avg,)
if self.mean_peaking:
metadata += "peaking %f\nerr_peaking %f\n" % (self.mean_peaking, self.std_peaking,)
outfile.write(
"metadata %d\n" % (len(metadata.splitlines()) + 1,) + metadata
)
writer = csv.writer(outfile)
if 'mean_a_L' in self.res:
writer.writerow(
[X_name,
y_name, 'err_' + y_name,
'D' + self.combined_p.y_label, 'err_D' + self.combined_p.y_label,
'a_L' + self.combined_p.y_label, 'err_a_L' + self.combined_p.y_label]
)
writer.writerows(
zip(
self.X,
self.res['mean_val'], self.res['std_val'],
self.res['mean_grad'], self.res['std_grad'],
self.res['mean_a_L'], self.res['std_a_L']
)
)
else:
writer.writerow([X_name, y_name, 'err_' + y_name])
writer.writerows(zip(self.X, self.res['mean_val'], self.res['std_val']))
elif ext.lower() == '.pkl':
# Write output to dictionary in pickle file:
self.control_frame.status_frame.add_line(
"Writing results to pickle file %s..." % os.path.basename(path)
)
res_dict = {
'X': self.X,
'y': self.res['mean_val'],
'err_y': self.res['std_val'],
'X_label': self.combined_p.X_labels[0],
'X_units': self.combined_p.X_units[0],
'y_label': self.combined_p.y_label,
'y_units': self.combined_p.y_units
}
if 'mean_a_L' in self.res:
res_dict["dy/dX"] = self.res['mean_grad']
res_dict["err_dy/dX"] = self.res['std_grad']
res_dict["a_Ly"] = self.res['mean_a_L']
res_dict["err_a_Ly"] = self.res['std_a_L']
# Add metadata:
res_dict['history'] = history
try:
res_dict['shot'] = self.combined_p.shot
except AttributeError:
pass
try:
res_dict['t_min'] = self.combined_p.t_min
except AttributeError:
pass
try:
res_dict['t_max'] = self.combined_p.t_max
except AttributeError:
pass
try:
res_dict['times'] = list(self.combined_p.times)
except AttributeError:
pass
try:
res_dict['coordinate'] = self.combined_p.abscissa
except AttributeError:
pass
if self.mean_vol_avg:
res_dict['vol_avg'] = self.mean_vol_avg
res_dict['err_vol_avg'] = self.std_vol_avg
if self.mean_peaking:
res_dict['peaking'] = self.mean_peaking
res_dict['err_peaking'] = self.std_peaking
if self.save_state:
res_dict['state'] = self.package_state()
with open(os.path.expanduser(path), 'wb') as f:
pickle.dump(res_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
# Write output to NetCDF file:
self.control_frame.status_frame.add_line(
"Writing results to NetCDF file %s..." % os.path.basename(path)
)
X_name = self.combined_p.X_labels[0].translate(None, '\\$')
X_units = self.combined_p.X_units[0]
y_name = self.combined_p.y_label.translate(None, '\\$')
y_units = self.combined_p.y_units
with scipy.io.netcdf.netcdf_file(os.path.expanduser(path), mode='w') as f:
f.history = history
try:
f.shot = self.combined_p.shot
except AttributeError:
pass
try:
f.t_min = self.combined_p.t_min
except AttributeError:
pass
try:
f.t_max = self.combined_p.t_max
except AttributeError:
pass
try:
f.times = list(self.combined_p.times)
except AttributeError:
pass
try:
f.coordinate = self.combined_p.abscissa
except AttributeError:
pass
if self.mean_vol_avg:
f.vol_avg = self.mean_vol_avg
f.err_vol_avg = self.std_vol_avg
if self.mean_peaking:
f.peaking = self.mean_peaking
f.err_peaking = self.std_peaking
if self.save_state:
f.state = pickle.dumps(self.package_state(), protocol=pickle.HIGHEST_PROTOCOL)
f.x_name = X_name
f.y_name = y_name
f.createDimension(X_name, len(self.X))
v_X = f.createVariable(X_name, float, (X_name,))
v_X[:] = self.X
v_X.units = X_units
v_y = f.createVariable(y_name, float, (X_name,))
v_y[:] = self.res['mean_val']
v_y.units = y_units
v_err_y = f.createVariable('err_'+y_name, float, (X_name,))
v_err_y[:] = self.res['std_val']
v_err_y.units = y_units
if 'mean_a_L' in self.res:
v_grad = f.createVariable("d%s/d%s" % (y_name, X_name), float, (X_name,))
v_grad[:] = self.res['mean_grad']
v_grad.units = y_units + '/' + X_units
v_err_grad = f.createVariable("err_d%s/d%s" % (y_name, X_name), float, (X_name,))
v_err_grad[:] = self.res['std_grad']
v_err_grad.units = y_units + '/' + X_units
v_a_L = f.createVariable("a_L%s" % (y_name,), float, (X_name,))
v_a_L[:] = self.res['mean_a_L']
v_a_L.units = ''
v_err_a_L = f.createVariable("err_a_L%s" % (y_name,), float, (X_name,))
v_err_a_L[:] = self.res['std_a_L']
v_err_a_L.units = ''
self.control_frame.status_frame.add_line(
"Done writing results."
)
def package_state(self):
"""Create a dictionary representing the internal state of the program.
"""
state = {}
# From the tree/file selector frame:
state['data source'] = self.control_frame.data_source_frame.tree_file_frame.source_state.get()
state['file path'] = self.control_frame.data_source_frame.tree_file_frame.path_entry.get()
# From the variable/column name frame:
state['time name'] = self.control_frame.data_source_frame.variable_name_frame.time_box.get()
state['space name'] = self.control_frame.data_source_frame.variable_name_frame.space_box.get()
state['data name'] = self.control_frame.data_source_frame.variable_name_frame.data_box.get()
state['meta name'] = self.control_frame.data_source_frame.variable_name_frame.meta_box.get()
# From the shot number frame:
state['shot'] = self.control_frame.data_source_frame.shot_frame.shot_box.get()
# From the signal/coordinate selector frame:
state['signal'] = self.control_frame.data_source_frame.signal_coordinate_frame.signal_var.get()
state['coordinate'] = self.control_frame.data_source_frame.signal_coordinate_frame.coordinate_var.get()
# From the TCI parameter frame:
state['TCI quad points'] = self.control_frame.data_source_frame.TCI_frame.TCI_points_box.get()
state['TCI thin'] = self.control_frame.data_source_frame.TCI_frame.TCI_thin_box.get()
state['TCI ds'] = self.control_frame.data_source_frame.TCI_frame.TCI_ds_box.get()
# From the system selector frame:
state['system states'] = [
b.state_var.get() for b in self.control_frame.data_source_frame.system_frame.buttons
]
# From the EFIT parameter frame:
state['EFIT tree name'] = self.control_frame.data_source_frame.EFIT_frame.EFIT_field.get()
# From the time window selection frame:
state['time method'] = self.control_frame.averaging_frame.time_window_frame.method_state.get()
state['t min'] = self.control_frame.averaging_frame.time_window_frame.t_min_box.get()
state['t max'] = self.control_frame.averaging_frame.time_window_frame.t_max_box.get()
state['times'] = self.control_frame.averaging_frame.time_window_frame.times_box.times_box.get()
state['times tol'] = self.control_frame.averaging_frame.time_window_frame.times_box.times_tol_box.get()
# From the averaging method frame:
state['averaging method'] = self.control_frame.averaging_frame.method_frame.method_var.get()
state['uncertainty method'] = self.control_frame.averaging_frame.method_frame.error_method_var.get()
state['weighting state'] = self.control_frame.averaging_frame.method_frame.weighted_state.get()
# From the uncertainty adjustment frame:
state['uncertainty adjust state'] = self.control_frame.averaging_frame.fudge_frame.fudge_state.get()
state['uncertainty adjust method'] = self.control_frame.averaging_frame.fudge_frame.fudge_method_var.get()
state['uncertainty adjust type'] = self.control_frame.averaging_frame.fudge_frame.fudge_type_var.get()
state['uncertainty adjust value'] = self.control_frame.averaging_frame.fudge_frame.fudge_value_box.get()
# From the kernel type frame:
state['kernel type'] = self.control_frame.kernel_frame.kernel_type_frame.k_var.get()
state['core only state'] = self.control_frame.kernel_frame.kernel_type_frame.core_only_state.get()
# From the hyperprior frames:
state['hyperprior types'] = [
hf.hp_type_var.get() for hf in self.control_frame.kernel_frame.bounds_frame.hyperprior_frames
]
state['hyperhyperparameter states'] = [
[
b.get() for b in hf.hyperhyperparameter_frame.boxes
] for hf in self.control_frame.kernel_frame.bounds_frame.hyperprior_frames
]
# From the constraint frame:
state['core constraint state'] = self.control_frame.kernel_frame.constraints_frame.core_state.get()
state['edge constraint state'] = self.control_frame.kernel_frame.constraints_frame.edge_state.get()
state['core locations'] = self.control_frame.kernel_frame.constraints_frame.core_loc.get()
state['edge locations'] = self.control_frame.kernel_frame.constraints_frame.edge_loc.get()
# From the fitting method frame:
state['fitting method state'] = self.control_frame.fitting_frame.method_frame.method_state.get()
state['random starts'] = self.control_frame.fitting_frame.method_frame.starts_box.get()
# From the MCMC parameter frame:
state['MCMC walkers'] = self.control_frame.fitting_frame.MCMC_frame.walker_box.get()
state['MCMC samples'] = self.control_frame.fitting_frame.MCMC_frame.sample_box.get()
state['MCMC burn'] = self.control_frame.fitting_frame.MCMC_frame.burn_box.get()
state['MCMC keep'] = self.control_frame.fitting_frame.MCMC_frame.keep_box.get()
state['MCMC a'] = self.control_frame.fitting_frame.MCMC_frame.a_box.get()
# From the full MC constraint frame:
state['full MC state'] = self.control_frame.fitting_frame.MCMC_constraint_frame.full_MC_state.get()
state['full MC samples'] = self.control_frame.fitting_frame.MCMC_constraint_frame.samples_box.get()
state['positivity constraint state'] = self.control_frame.fitting_frame.MCMC_constraint_frame.pos_state.get()
state['monotonicity constraint state'] = self.control_frame.fitting_frame.MCMC_constraint_frame.mono_state.get()
# From the evaluation frame:
state['evaluation method state'] = self.control_frame.eval_frame.method_state.get()
state['num evaluation points'] = self.control_frame.eval_frame.npts_box.get()
state['evaluation x min'] = self.control_frame.eval_frame.x_min_box.get()
state['evaluation x max'] = self.control_frame.eval_frame.x_max_box.get()
state['evaluation specific x points'] = self.control_frame.eval_frame.x_points_box.get()
state['compute a/L state'] = self.control_frame.eval_frame.a_L_state.get()
state['compute volume average state'] = self.control_frame.eval_frame.vol_avg_state.get()
state['compute peaking state'] = self.control_frame.eval_frame.peaking_state.get()
state['compute TCI state'] = self.control_frame.eval_frame.TCI_state.get()
# From the outlier rejection frame:
state['extreme change rejection state'] = self.control_frame.outlier_frame.extreme_state.get()
state['outlier rejection state'] = self.control_frame.outlier_frame.outlier_state.get()
state['extreme change threshold'] = self.control_frame.outlier_frame.extreme_thresh_box.get()
state['outlier rejection threshold'] = self.control_frame.outlier_frame.outlier_thresh_box.get()
state['specific flagged points state'] = self.control_frame.outlier_frame.specific_box.get()
state['show idx state'] = self.control_frame.outlier_frame.show_idx_state.get()
# From the plot parameters frame:
state['plot x lb'] = self.control_frame.plot_param_frame.x_lb_box.get()
state['plot x ub'] = self.control_frame.plot_param_frame.x_ub_box.get()
state['plot y lb'] = self.control_frame.plot_param_frame.y_lb_box.get()
state['plot y ub'] = self.control_frame.plot_param_frame.y_ub_box.get()
state['plot dy lb'] = self.control_frame.plot_param_frame.dy_lb_box.get()
state['plot dy ub'] = self.control_frame.plot_param_frame.dy_ub_box.get()
state['plot aLy lb'] = self.control_frame.plot_param_frame.aLy_lb_box.get()
state['plot aLy ub'] = self.control_frame.plot_param_frame.aLy_ub_box.get()
# Data stored directly in self:
state['master p'] = self.master_p
state['p'] = self.p
state['combined_p'] = self.combined_p
state['X'] = self.X
try:
if not self.save_cov:
self.res.pop('cov', None)
except:
pass
state['res'] = self.res
try:
state['efit_tree'] = self.efit_tree
except AttributeError:
state['efit_tree'] = None
try:
if self.save_sampler:
# Need to close out the pool:
self.sampler.pool.close()
self.sampler.pool = None
state['sampler'] = self.sampler
else:
state['sampler'] = None
except AttributeError:
state['sampler'] = None
try:
state['mean_peaking'] = self.mean_peaking
except AttributeError:
state['mean_peaking'] = None
try:
state['std_peaking'] = self.std_peaking
except AttributeError:
state['std_peaking'] = None
try:
state['mean_vol_avg'] = self.mean_vol_avg
except AttributeError:
state['mean_vol_avg'] = None
try:
state['std_vol_avg'] = self.std_vol_avg
except AttributeError:
state['std_vol_avg'] = None
try:
state['extreme_flagged'] = self.extreme_flagged
except AttributeError:
state['extreme_flagged'] = None
try:
state['outlier_flagged'] = self.outlier_flagged
except AttributeError:
state['outlier_flagged'] = None
return state
def parcel_state(self, path):
state = self.package_state()
for k, v in state.iteritems():
k = k.replace('/', '_')
with open(os.path.abspath(os.path.join(path, k + '.pkl')), 'wb') as f:
pickle.dump(v, f)
def save_state(self):
path = tkFileDialog.asksaveasfilename(
defaultextension='.gpfit',
filetypes=[
('gpfit', '*.gpfit'),
('all files', '*')
]
)
if path:
with open(os.path.expanduser(path), 'wb') as outfile:
pickle.dump(self.package_state(), outfile, protocol=pickle.HIGHEST_PROTOCOL)
self.control_frame.status_frame.add_line(
"Done writing state."
)
def load_state(self, path=None):
"""Load the state information from the selected file.
"""
if path is None:
path = tkFileDialog.askopenfilename(
filetypes=[
('all files', '*'),
('gpfit state files', '*.gpfit'),
('NetCDF files', ('*.nc', '*.cdf', '*.dat')),
('Pickle files', '*.pkl')
]
)
if path:
root, ext = os.path.splitext(path)
if ext == '.csv':
self.control_frame.status_frame.add_line(
"Cannot load state information from CSV file!"
)
return
elif ext == '.gpfit':
with open(os.path.expanduser(path), 'rb') as infile:
state = pickle.load(infile)
elif ext == '.pkl':
with open(os.path.expanduser(path), 'rb') as infile:
try:
state = pickle.load(infile)['state']
except KeyError:
self.control_frame.status_frame.add_line(
"No state information in pickle file %s!" % (path,)
)
return
else:
try:
with scipy.io.netcdf.netcdf_file(os.path.expanduser(path), mode='r') as f:
try:
state = pickle.loads(f.state)
except AttributeError:
self.control_frame.status_frame.add_line(
"No state information in NetCDF file %s!" % (path,)
)
return
except TypeError:
self.control_frame.status_frame.add_line(
"Unknown file type for file %s! (Tried to treat as NetCDF.)" % (path,)
)
return
self.apply_state(state)
self.control_frame.status_frame.add_line(
"Done loading state."
)
def apply_state(self, state):
"""Apply the given state dictionary.
"""
self.control_frame.data_source_frame.tree_file_frame.source_state.set(state['data source'])
self.control_frame.data_source_frame.update_source()
impose_entry(
self.control_frame.data_source_frame.tree_file_frame.path_entry,
state['file path']
)
impose_entry(
self.control_frame.data_source_frame.variable_name_frame.time_box,
state['time name']
)
impose_entry(
self.control_frame.data_source_frame.variable_name_frame.space_box,
state['space name']
)
impose_entry(
self.control_frame.data_source_frame.variable_name_frame.data_box,
state['data name']
)
impose_entry(
self.control_frame.data_source_frame.variable_name_frame.meta_box,
state['meta name']
)
impose_entry(
self.control_frame.data_source_frame.shot_frame.shot_box,
state['shot']
)
self.control_frame.data_source_frame.signal_coordinate_frame.signal_var.set(state['signal'])
self.control_frame.data_source_frame.update_signal(state['signal'])
self.control_frame.data_source_frame.signal_coordinate_frame.coordinate_var.set(state['coordinate'])
impose_entry(
self.control_frame.data_source_frame.TCI_frame.TCI_points_box,
state['TCI quad points']
)
impose_entry(
self.control_frame.data_source_frame.TCI_frame.TCI_thin_box,
state['TCI thin']
)
impose_entry(
self.control_frame.data_source_frame.TCI_frame.TCI_ds_box,
state['TCI ds']
)
for b, s in zip(
self.control_frame.data_source_frame.system_frame.buttons,
state['system states']
):
b.state_var.set(s)
if b.system == 'TCI':
b.invoke_TCI()
impose_entry(
self.control_frame.data_source_frame.EFIT_frame.EFIT_field,
state['EFIT tree name']
)
self.control_frame.averaging_frame.time_window_frame.method_state.set(state['time method'])
self.control_frame.averaging_frame.time_window_frame.update_method()
impose_entry(
self.control_frame.averaging_frame.time_window_frame.t_min_box,
state['t min']
)
impose_entry(
self.control_frame.averaging_frame.time_window_frame.t_max_box,
state['t max']
)
impose_entry(
self.control_frame.averaging_frame.time_window_frame.times_box.times_box,
state['times']
)
# Handle legacy files without this key:
try:
impose_entry(
self.control_frame.averaging_frame.time_window_frame.times_box.times_tol_box,
state['times tol']
)
except KeyError:
pass
self.control_frame.averaging_frame.method_frame.method_var.set(state['averaging method'])
self.control_frame.averaging_frame.method_frame.update_method(state['averaging method'])
self.control_frame.averaging_frame.method_frame.error_method_var.set(state['uncertainty method'])
self.control_frame.averaging_frame.method_frame.weighted_state.set(state['weighting state'])
try:
self.control_frame.averaging_frame.fudge_frame.fudge_state.set(state['uncertainty adjust state'])
self.control_frame.averaging_frame.fudge_frame.set_state()
except KeyError:
pass
try:
self.control_frame.averaging_frame.fudge_frame.fudge_method_var.set(state['uncertainty adjust method'])
except KeyError:
pass
try:
self.control_frame.averaging_frame.fudge_frame.fudge_type_var.set(state['uncertainty adjust type'])
except KeyError:
pass
try:
impose_entry(
self.control_frame.averaging_frame.fudge_frame.fudge_value_box,
state['uncertainty adjust value']
)
except KeyError:
pass
self.control_frame.kernel_frame.kernel_type_frame.k_var.set(state['kernel type'])
self.control_frame.kernel_frame.update_kernel(state['kernel type'])
self.control_frame.kernel_frame.kernel_type_frame.core_only_state.set(state['core only state'])
for hf, t in zip(
self.control_frame.kernel_frame.bounds_frame.hyperprior_frames,
state['hyperprior types']
):
hf.hp_type_var.set(t)
for hf, hhps in zip(
self.control_frame.kernel_frame.bounds_frame.hyperprior_frames,
state['hyperhyperparameter states']
):
for b, v in zip(hf.hyperhyperparameter_frame.boxes, hhps):
impose_entry(b, v)
self.control_frame.kernel_frame.constraints_frame.core_state.set(state['core constraint state'])
self.control_frame.kernel_frame.constraints_frame.update_core()
self.control_frame.kernel_frame.constraints_frame.edge_state.set(state['edge constraint state'])
self.control_frame.kernel_frame.constraints_frame.update_edge()
impose_entry(
self.control_frame.kernel_frame.constraints_frame.core_loc,
state['core locations']
)
impose_entry(
self.control_frame.kernel_frame.constraints_frame.edge_loc,
state['edge locations']
)
self.control_frame.fitting_frame.method_frame.method_state.set(state['fitting method state'])
self.control_frame.fitting_frame.update_method()
impose_entry(
self.control_frame.fitting_frame.method_frame.starts_box,
state['random starts']
)
impose_entry(
self.control_frame.fitting_frame.MCMC_frame.walker_box,
state['MCMC walkers']
)
impose_entry(
self.control_frame.fitting_frame.MCMC_frame.sample_box,
state['MCMC samples']
)
impose_entry(
self.control_frame.fitting_frame.MCMC_frame.burn_box,
state['MCMC burn']
)
impose_entry(
self.control_frame.fitting_frame.MCMC_frame.keep_box,
state['MCMC keep']
)
impose_entry(
self.control_frame.fitting_frame.MCMC_frame.a_box,
state['MCMC a']
)
self.control_frame.fitting_frame.MCMC_constraint_frame.full_MC_state.set(state['full MC state'])
self.control_frame.fitting_frame.MCMC_constraint_frame.update_full_MC()
impose_entry(
self.control_frame.fitting_frame.MCMC_constraint_frame.samples_box,
state['full MC samples']
)
self.control_frame.fitting_frame.MCMC_constraint_frame.pos_state.set(state['positivity constraint state'])
self.control_frame.fitting_frame.MCMC_constraint_frame.mono_state.set(state['monotonicity constraint state'])
self.control_frame.eval_frame.method_state.set(state['evaluation method state'])
self.control_frame.eval_frame.update_method()
impose_entry(
self.control_frame.eval_frame.npts_box,
state['num evaluation points']
)
impose_entry(
self.control_frame.eval_frame.x_min_box,
state['evaluation x min']
)
impose_entry(
self.control_frame.eval_frame.x_max_box,
state['evaluation x max']
)
impose_entry(
self.control_frame.eval_frame.x_points_box,
state['evaluation specific x points']
)
self.control_frame.eval_frame.a_L_state.set(state['compute a/L state'])
self.control_frame.eval_frame.update_a_L()
self.control_frame.eval_frame.vol_avg_state.set(state['compute volume average state'])
self.control_frame.eval_frame.peaking_state.set(state['compute peaking state'])
self.control_frame.eval_frame.TCI_state.set(state['compute TCI state'])
self.control_frame.outlier_frame.extreme_state.set(state['extreme change rejection state'])
self.control_frame.outlier_frame.update_extreme()
self.control_frame.outlier_frame.outlier_state.set(state['outlier rejection state'])
self.control_frame.outlier_frame.update_outlier()
impose_entry(
self.control_frame.outlier_frame.extreme_thresh_box,
state['extreme change threshold']
)
impose_entry(
self.control_frame.outlier_frame.outlier_thresh_box,
state['outlier rejection threshold']
)
impose_entry(
self.control_frame.outlier_frame.specific_box,
state['specific flagged points state']
)
self.control_frame.outlier_frame.show_idx_state.set(state['show idx state'])
impose_entry(
self.control_frame.plot_param_frame.x_lb_box,
state['plot x lb']
)
impose_entry(
self.control_frame.plot_param_frame.x_ub_box,
state['plot x ub']
)
impose_entry(
self.control_frame.plot_param_frame.y_lb_box,
state['plot y lb']
)
impose_entry(
self.control_frame.plot_param_frame.y_ub_box,
state['plot y ub']
)
impose_entry(
self.control_frame.plot_param_frame.dy_lb_box,
state['plot dy lb']
)
impose_entry(
self.control_frame.plot_param_frame.dy_ub_box,
state['plot dy ub']
)
impose_entry(
self.control_frame.plot_param_frame.aLy_lb_box,
state['plot aLy lb']
)
impose_entry(
self.control_frame.plot_param_frame.aLy_ub_box,
state['plot aLy ub']
)
self.master_p = state['master p']
self.p = state['p']
self.combined_p = state['combined_p']
self.X = state['X']
self.res = state['res']
self.efit_tree = state['efit_tree']
self.sampler = state['sampler']
self.mean_peaking = state['mean_peaking']
self.std_peaking = state['std_peaking']
self.mean_vol_avg = state['mean_vol_avg']
self.std_vol_avg = state['std_vol_avg']
self.extreme_flagged = state['extreme_flagged']
self.outlier_flagged = state['outlier_flagged']
# Now we can update all of the plots:
self.plot_frame.a_val.clear()
self.plot_frame.a_grad.clear()
self.plot_frame.a_a_L.clear()
markercycle = itertools.cycle('o^sDH*')
# First, we plot the data points:
if self.p is not None:
for k, p in self.p.iteritems():
if p:
p.plot_data(ax=self.plot_frame.a_val, fmt=markercycle.next())
# And the flagged outliers:
y_bad_c = self.combined_p.y[self.extreme_flagged]
X_bad_c = self.combined_p.X[self.extreme_flagged, :].ravel()
if len(y_bad_c) > 0:
self.plot_frame.a_val.plot(
X_bad_c, y_bad_c, 'mx', label='extreme change', ms=14
)
self.plot_frame.a_val.set_ylabel(
"%s [%s]" % (self.combined_p.y_label, self.combined_p.y_units,)
if self.combined_p.y_units
else self.combined_p.y_label
)
# Only update the value axis:
try:
x_min = float(self.control_frame.plot_param_frame.x_lb_box.get())
except ValueError:
x_min = self.combined_p.X.min()
try:
x_max = float(self.control_frame.plot_param_frame.x_ub_box.get())
except ValueError:
x_max = self.combined_p.X.max()
self.plot_frame.a_val.set_xlim(left=x_min, right=x_max)
try:
y_min = float(self.control_frame.plot_param_frame.y_lb_box.get())
except ValueError:
y_min = 0
try:
y_max = float(self.control_frame.plot_param_frame.y_ub_box.get())
except ValueError:
y_max = None
self.plot_frame.a_val.set_ylim(bottom=y_min, top=y_max)
self.plot_frame.a_val.legend(loc='best', fontsize=12, ncol=2)
# Produce a descriptive title for the plot:
title = ''
try:
title += "shot %d" % (self.combined_p.shot)
except AttributeError:
pass
try:
title += " t_min %f" % (self.combined_p.t_min)
except AttributeError:
pass
try:
title += " t_max %f" % (self.combined_p.t_max)
except AttributeError:
pass
if hasattr(self.combined_p, 'times'):
times = list(self.combined_p.times)
title += " times %f" % (times.pop())
for t in times:
title += ",%f" % (t,)
self.plot_frame.suptitle.set_text(title)
self.control_frame.outlier_frame.update_show_idx()
# update_show_idx always calls draw, so we don't need to here.
# self.plot_frame.canvas.draw()
self.plot_frame.canvas._tkcanvas.focus_set()
# Then, we plot the outliers:
if self.outlier_flagged is not None:
X_bad_o = self.combined_p.X[self.outlier_flagged, :].ravel()
err_X_bad_o = self.combined_p.err_X[self.outlier_flagged, :].ravel()
y_bad_o = self.combined_p.y[self.outlier_flagged]
err_y_bad_o = self.combined_p.err_y[self.outlier_flagged]
if len(y_bad_o) > 0:
self.plot_frame.a_val.plot(
X_bad_o,
y_bad_o,
'rx',
label='outlier', ms=14
)
# Show points that were flagged by the user:
s_flagged_idxs = re.findall(
LIST_REGEX,
self.control_frame.outlier_frame.specific_box.get()
)
flagged_idxs = set()
for s in s_flagged_idxs:
try:
i = float(s)
if i >= len(self.combined_p.y):
self.control_frame.status_frame.add_line(
"Value %d out of range, will be ignored." % (i,)
)
else:
flagged_idxs.add(i)
except ValueError:
self.control_frame.status_frame.add_line(
"Invalid index to remove '%s', will be ignored." % (s,)
)
flagged_idxs = list(flagged_idxs)
if self.flagged_plt is not None:
for p in self.flagged_plt:
try:
p.remove()
except ValueError:
pass
if len(flagged_idxs) > 0:
self.flagged_plt = self.plot_frame.a_val.plot(
self.combined_p.X[flagged_idxs, :].ravel(),
self.combined_p.y[flagged_idxs],
'x',
color='orange',
label='flagged',
ms=14
)
if self.res is not None:
self.plot_fit()
def exit(self):
"""Quit the program, cleaning up as needed.
"""
self.destroy()
self.quit()
class MCMCResultsFrame(tk.Frame):
"""Frame to plot the results of the MCMC sampler.
"""
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
sampler = self.master.master.sampler
k = sampler.flatchain.shape[1]
self.f = Figure()
gs1 = mplgs.GridSpec(k, k)
gs2 = mplgs.GridSpec(1, k)
gs1.update(bottom=0.275, top=0.98)
gs2.update(bottom=0.1, top=0.2)
self.axes = []
# j is the row, i is the column.
for j in xrange(0, k + 1):
row = []
for i in xrange(0, k):
if i > j:
row.append(None)
else:
sharey = row[-1] if i > 0 and i < j and j < k else None
sharex = self.axes[-1][i] if j > i and j < k else \
(row[-1] if i > 0 and j == k else None)
gs = gs1[j, i] if j < k else gs2[:, i]
row.append(self.f.add_subplot(gs, sharey=sharey, sharex=sharex))
self.axes.append(row)
self.axes = scipy.asarray(self.axes)
self.canvas = FigureCanvasTkAgg(self.f, master=self)
self.canvas.show()
self.canvas.get_tk_widget().grid(row=0, column=0, sticky='NESW')
self.toolbar_frame = tk.Frame(self)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.toolbar_frame)
self.toolbar.update()
# self.canvas._tkcanvas.grid(row=1, column=0, sticky='EW')
self.toolbar_frame.grid(row=1, column=0, sticky='EW')
self.canvas.mpl_connect('button_press_event', lambda event: self.canvas._tkcanvas.focus_set())
self.canvas.mpl_connect('key_press_event', self.on_key_event)
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.refresh()
def on_key_event(self, evt):
"""Respond to key presses.
"""
key_press_handler(evt, self.canvas, self.toolbar)
def print_stats(self, box):
"""Print the statistics of the sampler to the given `box`.
`box` can be anything with a :py:meth:`add_line` method.
"""
sampler = self.master.master.sampler
try:
box.add_line("MCMC sampler autocorrelation times:\n%s" % (sampler.acor,))
except RuntimeError:
box.add_line("Could not compute MCMC sampler autocorrelation times.")
box.add_line("MCMC sampler mean acceptance fraction: %.2f%%" % (100 * scipy.mean(sampler.acceptance_fraction),))
box.add_line("Parameter summary:")
box.add_line("param\tmean\t95% posterior interval")
try:
burn = int(self.master.master.control_frame.fitting_frame.MCMC_frame.burn_box.get())
except ValueError:
self.master.MCMC_control_frame.help_box.add_line("Invalid value for burn! Defaulting to 0.")
burn = 0
if burn >= sampler.chain.shape[1]:
burn = 0
mean, ci_l, ci_u = gptools.summarize_sampler(sampler, burn=burn)
names = HYPERPARAMETERS[self.master.master.control_frame.kernel_frame.kernel_type_frame.k_var.get()]
for n, m, l, u in zip(names, mean, ci_l, ci_u):
box.add_line("%s\t%4.4g\t[%4.4g, %4.4g]" % (n, m, l, u))
def refresh(self, print_stats=True):
"""Refresh the plot.
"""
sampler = self.master.master.sampler
if sampler:
if print_stats:
self.print_stats(self.master.MCMC_control_frame.help_box)
labels = ['$%s$' % (l,) for l in self.master.master.combined_p.gp.free_param_names]
k = sampler.flatchain.shape[1]
try:
burn = int(self.master.master.control_frame.fitting_frame.MCMC_frame.burn_box.get())
except ValueError:
self.master.MCMC_control_frame.help_box.add_line("Invalid value for burn! Defaulting to 0.")
burn = 0
if burn >= sampler.chain.shape[1]:
burn = 0
flat_trace = sampler.chain[:, burn:, :]
flat_trace = flat_trace.reshape((-1, k))
# j is the row, i is the column.
# Loosely based on triangle.py
for i in xrange(0, k):
self.axes[i, i].clear()
self.axes[i, i].hist(flat_trace[:, i], bins=50, color='black')
if i == k - 1:
self.axes[i, i].set_xlabel(labels[i])
if i < k - 1:
plt.setp(self.axes[i, i].get_xticklabels(), visible=False)
plt.setp(self.axes[i, i].get_yticklabels(), visible=False)
# for j in xrange(0, i):
# self.axes[j, i].set_visible(False)
# self.axes[j, i].set_frame_on(False)
for j in xrange(i + 1, k):
self.axes[j, i].clear()
ct, x, y, im = self.axes[j, i].hist2d(
flat_trace[:, i],
flat_trace[:, j],
bins=50,
cmap='gray_r'
)
# xmid = 0.5 * (x[1:] + x[:-1])
# ymid = 0.5 * (y[1:] + y[:-1])
# self.axes[j, i].contour(xmid, ymid, ct.T, colors='k')
if j < k - 1:
plt.setp(self.axes[j, i].get_xticklabels(), visible=False)
if i != 0:
plt.setp(self.axes[j, i].get_yticklabels(), visible=False)
if i == 0:
self.axes[j, i].set_ylabel(labels[j])
if j == k - 1:
self.axes[j, i].set_xlabel(labels[i])
self.axes[-1, i].clear()
self.axes[-1, i].plot(sampler.chain[:, :, i].T, alpha=0.1)
self.axes[-1, i].axvline(burn, color='r', linewidth=3)
self.axes[-1, i].set_ylabel(labels[i])
self.axes[-1, i].set_xlabel('step')
self.canvas.draw()
else:
self.master.MCMC_control_frame.help_box.add_line(
"Sampler is invalid, please set valid parameter values and resample!"
)
class MCMCControlFrame(tk.Frame):
"""Frame to hold results of the MCMC sampler.
"""
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.help_box = StatusBox(self, **FRAME_PARAMS)
self.help_box.grid(row=0, column=0, sticky='EWNS')
self.help_box.add_line(
"""\nCheck to make sure the output of the MCMC sampler
looks correct.
1.) The chains (bottom plots) should be well mixed:
they should all overlap and not spend too much
time in any given spot. Set burn to the
iteration number at which they appear to
become mixed and the initial transients have
died down.
2.) The univariate marginals (diagonal of the
matrix) should be peaked and go to zero near
the edges. It is also preferable that they be
unimodal. If they look wrong, try adjusting
the bounds to exclude any unphysical modes.
3.) The bivariate marginals should also be unimodal.
"""
)
self.control_frame = tk.Frame(self)
self.resample_button = tk.Button(
self.control_frame,
text="resample",
command=self.resample
)
self.resample_button.grid(row=0, column=0, sticky='W')
self.add_sample_button = tk.Button(
self.control_frame,
text="add samples",
command=self.add_samples
)
self.add_sample_button.grid(row=0, column=1)
self.burn_button = tk.Button(
self.control_frame,
text="apply burn",
command=self.apply_burn
)
self.burn_button.grid(row=0, column=2)
self.reject_button = tk.Button(
self.control_frame,
text="abort",
command=self.abort
)
self.reject_button.grid(row=0, column=3, sticky='W')
self.accept_button = tk.Button(
self.control_frame,
text="continue",
command=self.continue_
)
self.accept_button.grid(row=0, column=4, sticky='W')
self.control_frame.grid(row=3, column=0, sticky='EW')
self.entry_frame = MCMCFrame(self, **FRAME_PARAMS)
self.entry_frame.grid(row=1, column=0, columnspan=5, sticky='EW')
# Update from master frame:
self.entry_frame.walker_box.delete(0, tk.END)
self.entry_frame.walker_box.insert(
0,
self.master.master.control_frame.fitting_frame.MCMC_frame.walker_box.get()
)
self.entry_frame.sample_box.delete(0, tk.END)
self.entry_frame.sample_box.insert(
0,
self.master.master.control_frame.fitting_frame.MCMC_frame.sample_box.get()
)
self.entry_frame.burn_box.delete(0, tk.END)
self.entry_frame.burn_box.insert(
0,
self.master.master.control_frame.fitting_frame.MCMC_frame.burn_box.get()
)
self.entry_frame.keep_box.delete(0, tk.END)
self.entry_frame.keep_box.insert(
0,
self.master.master.control_frame.fitting_frame.MCMC_frame.keep_box.get()
)
self.entry_frame.a_box.delete(0, tk.END)
self.entry_frame.a_box.insert(
0,
self.master.master.control_frame.fitting_frame.MCMC_frame.a_box.get()
)
# Input hyperparameter bounds:
self.bounds_meta_frame = tk.Frame(self, **FRAME_PARAMS)
self.bounds_label = tk.Label(
self.bounds_meta_frame,
text="hyperparameter bounds:"
)
self.bounds_label.grid(row=0, column=0, sticky='W')
self.bounds_frame = KernelBoundsFrame(
HYPERPARAMETERS[self.master.master.control_frame.kernel_frame.kernel_type_frame.k_var.get()],
self.bounds_meta_frame
)
self.bounds_frame.grid(row=1, column=0, sticky='EW')
self.bounds_meta_frame.grid(row=2, column=0, sticky='EW')
self.bounds_meta_frame.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
self.get_hyperprior_from_master()
def get_hyperprior_from_master(self):
"""Fetch the hyperprior details from the parent Frame.
"""
for hf_self, hf_master in zip(
self.bounds_frame.hyperprior_frames,
self.master.master.control_frame.kernel_frame.bounds_frame.hyperprior_frames
):
hf_self.hp_type_var.set(hf_master.hp_type)
hf_self.update_hp_type(hf_self.hp_type_var.get())
for hhpb_self, hhpb_master in zip(
hf_self.hyperhyperparameter_frame.boxes,
hf_master.hyperhyperparameter_frame.boxes
):
hhpb_self.delete(0, tk.END)
hhpb_self.insert(0, hhpb_master.get())
def send_hyperprior_to_master(self):
"""Send the hyperprior details back to the master Frame and call :py:meth:`FitWindow.process_bounds`.
"""
for hf_self, hf_master in zip(
self.bounds_frame.hyperprior_frames,
self.master.master.control_frame.kernel_frame.bounds_frame.hyperprior_frames
):
hf_master.hp_type_var.set(hf_self.hp_type)
hf_master.update_hp_type(hf_master.hp_type_var.get())
for hhpb_self, hhpb_master in zip(
hf_self.hyperhyperparameter_frame.boxes,
hf_master.hyperhyperparameter_frame.boxes
):
hhpb_master.delete(0, tk.END)
hhpb_master.insert(0, hhpb_self.get())
return self.master.master.process_bounds()
def update_MCMC_params(self, walkers=True, sample=True, burn=True, thin=True, a=True):
"""Update the MCMC parameters and propagate back to the parent Frame.
"""
if walkers:
self.master.master.control_frame.fitting_frame.MCMC_frame.walker_box.delete(0, tk.END)
self.master.master.control_frame.fitting_frame.MCMC_frame.walker_box.insert(
0,
self.entry_frame.walker_box.get()
)
if sample:
self.master.master.control_frame.fitting_frame.MCMC_frame.sample_box.delete(0, tk.END)
self.master.master.control_frame.fitting_frame.MCMC_frame.sample_box.insert(
0,
self.entry_frame.sample_box.get()
)
if burn:
self.master.master.control_frame.fitting_frame.MCMC_frame.burn_box.delete(0, tk.END)
self.master.master.control_frame.fitting_frame.MCMC_frame.burn_box.insert(
0,
self.entry_frame.burn_box.get()
)
if thin:
self.master.master.control_frame.fitting_frame.MCMC_frame.keep_box.delete(0, tk.END)
self.master.master.control_frame.fitting_frame.MCMC_frame.keep_box.insert(
0,
self.entry_frame.keep_box.get()
)
if a:
self.master.master.control_frame.fitting_frame.MCMC_frame.a_box.delete(0, tk.END)
self.master.master.control_frame.fitting_frame.MCMC_frame.a_box.insert(
0,
self.entry_frame.a_box.get()
)
def continue_(self):
"""Accept the samples and evaluate.
"""
self.help_box.add_line("Continuing...")
self.send_hyperprior_to_master()
self.update_MCMC_params()
self.master.destroy(good=True)
def abort(self):
"""Reject the samples and return to the parent window.
"""
self.help_box.add_line("Aborting evaluation...")
self.send_hyperprior_to_master()
# self.master.master.sampler.pool.close()
# self.master.master.sampler = None
self.master.destroy(good=False)
def resample(self):
"""Re-run the sampler with new hyperparameters.
"""
self.help_box.add_line("Re-running MCMC sampler...")
self.send_hyperprior_to_master()
self.update_MCMC_params()
try:
self.master.master.sampler.pool.close()
except AttributeError:
pass
self.master.master.sampler = None
self.master.master.run_MCMC_sampler()
self.master.MCMC_frame.refresh()
self.help_box.add_line("Done resampling.")
def add_samples(self):
"""Add samples without changing the hyperparameter bounds.
"""
self.help_box.add_line("Adding new samples...")
self.update_MCMC_params(walkers=False)
self.master.master.run_MCMC_sampler()
self.master.MCMC_frame.refresh()
self.help_box.add_line("Done sampling.")
def apply_burn(self):
"""Replot with new burn.
"""
self.update_MCMC_params(walkers=False, sample=False, thin=False)
self.master.MCMC_frame.refresh(print_stats=False)
class MCMCWindow(tk.Toplevel):
"""Window to display and interact with results of MCMC sampler.
"""
def __init__(self, *args, **kwargs):
tk.Toplevel.__init__(self, *args, **kwargs)
self.wm_title("%s %s: MCMC results" % (PROG_NAME, __version__,))
self.MCMC_control_frame = MCMCControlFrame(self)
self.MCMC_frame = MCMCResultsFrame(self)
self.MCMC_frame.grid(row=0, column=0, sticky='NSEW')
self.MCMC_control_frame.grid(row=0, column=1, sticky='NSEW')
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
def destroy(self, good=False):
if not good:
self.master.sampler.pool.close()
self.master.sampler = None
tk.Toplevel.destroy(self)
def impose_entry(w, v):
"""Impose value `v` on :py:class:`tk.Entry` `w`, leaving `w` in its previous state.
"""
s = w.cget('state')
w.config(state=tk.NORMAL)
w.delete(0, tk.END)
w.insert(0, v)
w.config(state=s)
def run_gui(argv=None):
global args
if argv is not None:
args = parser.parse_args(argv)
root = FitWindow()
if args.load:
root.load_state(path=args.load)
else:
# Set the defaults HERE so we don't clobber what's in the file:
# Populate the GUI with parameters from args:
if not args.kernel:
if args.core_only:
args.kernel = 'SE'
else:
args.kernel = 'gibbstanh'
elif args.kernel == 'SEsym1d':
args.no_core_constraint = True
# Turn off edge constraint for --core-only:
if args.core_only:
args.no_edge_constraint = True
# Bump random starts up to 4 for low processor count machines:
if not args.random_starts:
num_proc = multiprocessing.cpu_count()
if num_proc < 4:
args.random_starts = 4
else:
args.random_starts = min(num_proc, 20)
if args.signal:
root.control_frame.data_source_frame.tree_file_frame.source_state.set(
root.control_frame.data_source_frame.tree_file_frame.TREE_MODE
)
root.control_frame.data_source_frame.update_source()
root.control_frame.data_source_frame.signal_coordinate_frame.signal_var.set(args.signal)
root.control_frame.data_source_frame.update_signal(args.signal)
if args.shot is not None:
impose_entry(
root.control_frame.data_source_frame.shot_frame.shot_box,
str(args.shot)
)
if args.t_min is not None:
root.control_frame.averaging_frame.time_window_frame.method_state.set(
root.control_frame.averaging_frame.time_window_frame.WINDOW_MODE
)
root.control_frame.averaging_frame.time_window_frame.update_method()
impose_entry(
root.control_frame.averaging_frame.time_window_frame.t_min_box,
str(args.t_min)
)
if args.t_max is not None:
root.control_frame.averaging_frame.time_window_frame.method_state.set(
root.control_frame.averaging_frame.time_window_frame.WINDOW_MODE
)
root.control_frame.averaging_frame.time_window_frame.update_method()
impose_entry(
root.control_frame.averaging_frame.time_window_frame.t_max_box,
str(args.t_max)
)
if args.t_points:
root.control_frame.averaging_frame.time_window_frame.method_state.set(
root.control_frame.averaging_frame.time_window_frame.POINT_MODE
)
root.control_frame.averaging_frame.time_window_frame.update_method()
impose_entry(
root.control_frame.averaging_frame.time_window_frame.times_box.times_box,
str(args.t_points)[1:-1]
)
if args.t_tol:
impose_entry(
root.control_frame.averaging_frame.time_window_frame.times_box.times_tol_box,
str(args.t_tol)
)
if args.npts is not None:
root.control_frame.eval_frame.method_state.set(
root.control_frame.eval_frame.UNIFORM_GRID
)
root.control_frame.eval_frame.update_method()
impose_entry(
root.control_frame.eval_frame.npts_box,
str(args.npts)
)
if args.x_min is not None:
root.control_frame.eval_frame.method_state.set(
root.control_frame.eval_frame.UNIFORM_GRID
)
root.control_frame.eval_frame.update_method()
impose_entry(
root.control_frame.eval_frame.x_min_box,
str(args.x_min)
)
if args.x_max is not None:
root.control_frame.eval_frame.method_state.set(
root.control_frame.eval_frame.UNIFORM_GRID
)
root.control_frame.eval_frame.update_method()
impose_entry(
root.control_frame.eval_frame.x_max_box,
str(args.x_max)
)
if args.x_pts:
root.control_frame.eval_frame.method_state.set(
root.control_frame.eval_frame.POINTS
)
root.control_frame.eval_frame.update_method()
impose_entry(
root.control_frame.eval_frame.x_points_box,
str(args.x_pts)[1:-1]
)
if args.system:
root.control_frame.data_source_frame.tree_file_frame.source_state.set(
root.control_frame.data_source_frame.tree_file_frame.TREE_MODE
)
root.control_frame.data_source_frame.update_source()
systems = set(args.system)
if 'TS' in systems:
systems.remove('TS')
systems.add('ETS')
systems.add('CTS')
for b in root.control_frame.data_source_frame.system_frame.buttons:
if b.system in systems:
b.button.select()
else:
b.button.deselect()
if b.system == 'TCI':
b.invoke_TCI()
if args.TCI_quad_points:
impose_entry(
root.control_frame.data_source_frame.TCI_frame.TCI_points_box,
str(args.TCI_quad_points)
)
if args.TCI_thin:
impose_entry(
root.control_frame.data_source_frame.TCI_frame.TCI_thin_box,
str(args.TCI_thin)
)
if args.TCI_ds:
impose_entry(
root.control_frame.data_source_frame.TCI_frame.TCI_ds_box,
str(args.TCI_ds)
)
if args.kernel:
root.control_frame.kernel_frame.kernel_type_frame.k_var.set(args.kernel)
root.control_frame.kernel_frame.update_kernel(args.kernel)
if args.coordinate:
root.control_frame.data_source_frame.signal_coordinate_frame.coordinate_var.set(args.coordinate)
if args.core_constraint_location is not None:
root.control_frame.kernel_frame.constraints_frame.core_button.select()
root.control_frame.kernel_frame.constraints_frame.update_core()
impose_entry(
root.control_frame.kernel_frame.constraints_frame.core_loc,
str(args.core_constraint_location)[1:-1]
)
if args.edge_constraint_locations:
root.control_frame.kernel_frame.constraints_frame.edge_button.select()
root.control_frame.kernel_frame.constraints_frame.update_edge()
impose_entry(
root.control_frame.kernel_frame.constraints_frame.edge_loc,
str(args.edge_constraint_locations)[1:-1]
)
if args.no_core_constraint:
root.control_frame.kernel_frame.constraints_frame.core_button.deselect()
root.control_frame.kernel_frame.constraints_frame.update_core()
if args.no_edge_constraint:
root.control_frame.kernel_frame.constraints_frame.edge_button.deselect()
root.control_frame.kernel_frame.constraints_frame.update_edge()
if args.core_only:
root.control_frame.kernel_frame.kernel_type_frame.core_only_button.select()
if args.unweighted:
root.control_frame.averaging_frame.method_frame.weighted_button.deselect()
if args.robust:
root.control_frame.averaging_frame.method_frame.method_var.set('robust')
if args.all_points:
root.control_frame.averaging_frame.method_frame.method_var.set('all points')
root.control_frame.averaging_frame.method_frame.update_method('all points')
if args.uncertainty_method:
root.control_frame.averaging_frame.method_frame.error_method_var.set(args.uncertainty_method.replace('_', ' '))
root.control_frame.averaging_frame.method_frame.update_method(
root.control_frame.averaging_frame.method_frame.method_var.get()
)
if args.uncertainty_adjust_value:
root.control_frame.averaging_frame.fudge_frame.fudge_button.select()
root.control_frame.averaging_frame.fudge_frame.set_state()
impose_entry(
root.control_frame.averaging_frame.fudge_frame.fudge_value_box,
str(args.uncertainty_adjust_value)
)
if args.uncertainty_adjust_method:
root.control_frame.averaging_frame.fudge_frame.fudge_method_var.set(
args.uncertainty_adjust_method
)
if args.uncertainty_adjust_type:
root.control_frame.averaging_frame.fudge_frame.fudge_type_var.set(
args.uncertainty_adjust_type
)
if args.change_threshold is not None:
root.control_frame.outlier_frame.extreme_button.select()
root.control_frame.outlier_frame.update_extreme()
impose_entry(
root.control_frame.outlier_frame.extreme_thresh_box,
str(args.change_threshold)
)
if args.outlier_threshold is not None:
root.control_frame.outlier_frame.outlier_button.select()
root.control_frame.outlier_frame.update_outlier()
impose_entry(
root.control_frame.outlier_frame.outlier_thresh_box,
str(args.outlier_threshold)
)
if args.random_starts is not None:
impose_entry(
root.control_frame.fitting_frame.method_frame.starts_box,
str(args.random_starts)
)
if args.hyperprior:
hp = list(args.hyperprior)
kernel = root.control_frame.kernel_frame.kernel_type_frame.k_var.get()
valid_names = HYPERPARAMETERS[kernel].keys()
while hp:
name = HYPERPARAMETER_NAMES[hp.pop(0)]
name_idx = valid_names.index(name)
dist_name = hp.pop(0)
param_count = len(HYPERPRIORS[dist_name])
hpf = root.control_frame.kernel_frame.bounds_frame.hyperprior_frames[name_idx]
hpf.hp_type_var.set(dist_name)
hpf.update_hp_type(dist_name)
for k in xrange(0, param_count):
impose_entry(
hpf.hyperhyperparameter_frame.boxes[k],
hp.pop(0)
)
elif args.bounds:
for k, hf in zip(
xrange(0, len(root.control_frame.kernel_frame.bounds_frame.hyperprior_frames)),
root.control_frame.kernel_frame.bounds_frame.hyperprior_frames
):
hf.hp_type_var.set('uniform')
hf.update_hp_type('uniform')
impose_entry(
hf.hyperhyperparameter_frame.boxes[0],
str(args.bounds[2 * k])
)
impose_entry(
hf.hyperhyperparameter_frame.boxes[1],
str(args.bounds[2 * k + 1])
)
if args.input_filename or args.abscissa_name or args.ordinate_name or args.metadata_lines:
root.control_frame.data_source_frame.tree_file_frame.source_state.set(
root.control_frame.data_source_frame.tree_file_frame.FILE_MODE
)
root.control_frame.data_source_frame.update_source()
if args.input_filename:
impose_entry(
root.control_frame.data_source_frame.tree_file_frame.path_entry,
args.input_filename
)
if args.abscissa_name:
if len(args.abscissa_name) == 2:
impose_entry(
root.control_frame.data_source_frame.variable_name_frame.time_box,
str(args.abscissa_name[0])
)
impose_entry(
root.control_frame.data_source_frame.variable_name_frame.space_box,
str(args.abscissa_name[-1])
)
if args.ordinate_name:
impose_entry(
root.control_frame.data_source_frame.variable_name_frame.data_box,
str(args.ordinate_name)
)
if args.metadata_lines is not None:
impose_entry(
root.control_frame.data_source_frame.variable_name_frame.meta_box,
str(args.metadata_lines)
)
if args.use_MCMC or args.walkers or args.MCMC_samp or args.burn or args.keep or args.sampler_a:
root.control_frame.fitting_frame.method_frame.method_state.set(
root.control_frame.fitting_frame.method_frame.USE_MCMC
)
root.control_frame.fitting_frame.update_method()
if args.walkers is not None:
impose_entry(
root.control_frame.fitting_frame.MCMC_frame.walker_box,
str(args.walkers)
)
if args.MCMC_samp is not None:
impose_entry(
root.control_frame.fitting_frame.MCMC_frame.sample_box,
str(args.MCMC_samp)
)
if args.burn is not None:
impose_entry(
root.control_frame.fitting_frame.MCMC_frame.burn_box,
str(args.burn)
)
if args.keep is not None:
impose_entry(
root.control_frame.fitting_frame.MCMC_frame.keep_box,
str(args.keep)
)
if args.sampler_a is not None:
impose_entry(
root.control_frame.fitting_frame.MCMC_frame.a_box,
str(args.sampler_a)
)
if args.full_monte_carlo or args.monte_carlo_samples or args.reject_negative or args.reject_non_monotonic:
root.control_frame.fitting_frame.MCMC_constraint_frame.full_MC_button.select()
root.control_frame.fitting_frame.MCMC_constraint_frame.update_full_MC()
if args.monte_carlo_samples:
impose_entry(
root.control_frame.fitting_frame.MCMC_constraint_frame.samples_box,
str(args.monte_carlo_samples)
)
if args.reject_negative:
root.control_frame.fitting_frame.MCMC_constraint_frame.pos_button.select()
if args.reject_non_monotonic:
root.control_frame.fitting_frame.MCMC_constraint_frame.mono_button.select()
if args.no_a_over_L:
root.control_frame.eval_frame.a_L_button.deselect()
root.control_frame.eval_frame.update_a_L()
if args.compute_vol_avg:
root.control_frame.eval_frame.vol_avg_button.select()
if args.compute_peaking:
root.control_frame.eval_frame.peaking_button.select()
if args.compute_TCI:
root.control_frame.eval_frame.TCI_button.select()
if args.x_lim:
impose_entry(
root.control_frame.plot_param_frame.x_lb_box,
str(args.x_lim[0])
)
impose_entry(
root.control_frame.plot_param_frame.x_ub_box,
str(args.x_lim[1])
)
if args.y_lim:
impose_entry(
root.control_frame.plot_param_frame.y_lb_box,
str(args.y_lim[0])
)
impose_entry(
root.control_frame.plot_param_frame.y_ub_box,
str(args.y_lim[1])
)
if args.dy_lim:
impose_entry(
root.control_frame.plot_param_frame.dy_lb_box,
str(args.dy_lim[0])
)
impose_entry(
root.control_frame.plot_param_frame.dy_ub_box,
str(args.dy_lim[1])
)
if args.aLy_lim:
impose_entry(
root.control_frame.plot_param_frame.aLy_lb_box,
str(args.aLy_lim[0])
)
impose_entry(
root.control_frame.plot_param_frame.aLy_ub_box,
str(args.aLy_lim[1])
)
if args.EFIT_tree:
impose_entry(
root.control_frame.data_source_frame.EFIT_frame.EFIT_field,
args.EFIT_tree
)
if args.plot_idxs:
root.control_frame.outlier_frame.show_idx_button.select()
if args.remove_points:
impose_entry(
root.control_frame.outlier_frame.specific_box,
str(args.remove_points)[1:-1]
)
root.save_state = not args.no_save_state
root.save_cov = args.cov_in_save_state
root.save_sampler = args.sampler_in_save_state
if args.full_auto or args.no_interaction:
root.load_data()
root.average_data()
root.fit_data()
if args.no_interaction:
root.save_fit(save_plot=True)
root.exit()
if not args.no_interaction and not args.no_mainloop:
root.mainloop()
return (root.X, root.res, root.combined_p)
else:
return root
if __name__ == "__main__":
root = run_gui()
|
markchil/profiletools
|
profiletools/gui.py
|
Python
|
gpl-3.0
| 232,035
|
[
"Gaussian",
"NetCDF"
] |
8b9ef9a3fb122587021c079e099e239736ab977640a1e35534b71e4ddd031352
|
#########################################################################################
# OAR.py
# 10.11.2014
# Author: Matvey Sapunov, A.T.
#########################################################################################
""" OAR.py is a DIRAC independent class representing OAR batch system.
OAR objects are used as backend batch system representation for
LocalComputingElement and SSHComputingElement classes
"""
__RCSID__ = "$Id$"
import commands
import os
import json
class OAR(object):
def submitJob(self, **kwargs):
""" Submit nJobs to the OAR batch system
"""
resultDict = {}
MANDATORY_PARAMETERS = ['Executable', 'OutputDir', 'ErrorDir',
'Queue', 'SubmitOptions']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
nJobs = kwargs.get('NJobs')
if not nJobs:
nJobs = 1
outputDir = kwargs['OutputDir']
errorDir = kwargs['ErrorDir']
queue = kwargs['Queue']
submitOptions = kwargs['SubmitOptions']
executable = kwargs['Executable']
outFile = os.path.join(outputDir, "%jobid%")
errFile = os.path.join(errorDir, "%jobid%")
outFile = os.path.expandvars(outFile)
errFile = os.path.expandvars(errFile)
executable = os.path.expandvars(executable)
jobIDs = []
preamble = kwargs.get("Preamble")
for _i in xrange(nJobs):
cmd = '%s; ' % preamble if preamble else ''
cmd += "oarsub -O %s.out -E %s.err -q %s -n DIRACPilot %s %s" % (outFile,
errFile,
queue,
submitOptions,
executable)
status, output = commands.getstatusoutput(cmd)
if status != 0 or not output:
break
lines = output.split('\n')
jid = ''
if "OAR_JOB_ID" in lines[-1]:
_prefix, jid = lines[-1].split("=")
if not jid:
break
jid = jid.strip()
jobIDs.append(jid)
if jobIDs:
resultDict['Status'] = 0
resultDict['Jobs'] = jobIDs
else:
resultDict['Status'] = status
resultDict['Message'] = output
resultDict['Jobs'] = jobIDs
return resultDict
def killJob(self, **kwargs):
""" Delete a job from OAR batch scheduler. Input: list of jobs output: int
"""
resultDict = {}
MANDATORY_PARAMETERS = ['JobIDList']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
jobIDList = kwargs['JobIDList']
if not jobIDList:
resultDict['Status'] = -1
resultDict['Message'] = 'Empty job list'
return resultDict
successful = []
failed = []
for job in jobIDList:
status, output = commands.getstatusoutput('oardel %s' % job)
if status != 0:
failed.append(job)
else:
successful.append(job)
resultDict['Status'] = 0
if failed:
resultDict['Status'] = 1
resultDict['Message'] = output
resultDict['Successful'] = successful
resultDict['Failed'] = failed
return resultDict
def getJobStatus(self, **kwargs):
""" Get status of the jobs in the given list
"""
resultDict = {}
MANDATORY_PARAMETERS = ['JobIDList']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
jobIDList = kwargs['JobIDList']
if not jobIDList:
resultDict['Status'] = -1
resultDict['Message'] = 'Empty job list'
return resultDict
user = kwargs.get('User')
if not user:
user = os.environ.get('USER')
if not user:
resultDict['Status'] = -1
resultDict['Message'] = 'No user name'
return resultDict
status, output = commands.getstatusoutput("oarstat --sql \"project = '%s'\" -J" % user)
if status != 0:
resultDict['Status'] = status
resultDict['Message'] = output
return resultDict
try:
output = json.loads(output)
except Exception as x:
resultDict['Status'] = 2048
resultDict['Message'] = str(x)
return resultDict
if not len(output) > 0:
resultDict['Status'] = 1024
resultDict['Message'] = output
return resultDict
statusDict = {}
for job in jobIDList:
if job not in output:
statusDict[job] = "Unknown"
continue
if "state" not in output[job]:
statusDict[job] = "Unknown"
continue
state = output[job]["state"]
if state in ["Running", "Finishing"]:
statusDict[job] = "Running"
continue
if state in ["Error", "toError"]:
statusDict[job] = "Aborted"
continue
if state in ["Waiting", "Hold", "toAckReservation", "Suspended", "toLaunch", "Launching"]:
statusDict[job] = "Waiting"
continue
if state == "Terminated":
statusDict[job] = "Done"
continue
statusDict[job] = "Unknown"
continue
# Final output
status = 0
resultDict['Status'] = 0
resultDict['Jobs'] = statusDict
return resultDict
def getCEStatus(self, **kwargs):
""" Get the overall status of the CE
"""
resultDict = {}
user = kwargs.get('User')
if not user:
user = os.environ.get('USER')
if not user:
resultDict['Status'] = -1
resultDict['Message'] = 'No user name'
return resultDict
waitingJobs = 0
runningJobs = 0
status, output = commands.getstatusoutput('oarstat -u %s -J' % user)
if status != 0:
if "arrayref expected" in output:
resultDict['Status'] = 0
resultDict["Waiting"] = 0
resultDict["Running"] = 0
return resultDict
resultDict['Status'] = status
resultDict['Message'] = output
return resultDict
try:
output = json.loads(output)
except Exception as x:
resultDict['Status'] = 2048
resultDict['Message'] = str(x)
return resultDict
if output > 0:
resultDict['Status'] = 0
resultDict["Waiting"] = waitingJobs
resultDict["Running"] = runningJobs
return resultDict
for value in output.values():
if "state" not in value:
continue
state = value["state"]
if state in ["Running", "Finishing"]:
runningJobs += 1
continue
if state in ["Waiting", "Hold", "toAckReservation", "Suspended", "toLaunch", "Launching"]:
waitingJobs += 1
continue
# Final output
resultDict['Status'] = 0
resultDict["Waiting"] = waitingJobs
resultDict["Running"] = runningJobs
return resultDict
|
andresailer/DIRAC
|
Resources/Computing/BatchSystems/OAR.py
|
Python
|
gpl-3.0
| 7,033
|
[
"DIRAC"
] |
e4ceb388ca0d38ad4da0d40cf49e0f8e50686b9e88f70f39edc0e49e2570cadd
|
"""
Extended docstrings for functions.py
"""
pi = r"""
`\pi`, roughly equal to 3.141592654, represents the area of the unit
circle, the half-period of trigonometric functions, and many other
things in mathematics.
Mpmath can evaluate `\pi` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +pi
3.1415926535897932384626433832795028841971693993751
This shows digits 99991-100000 of `\pi`::
>>> mp.dps = 100000
>>> str(pi)[-10:]
'5549362464'
**Possible issues**
:data:`pi` always rounds to the nearest floating-point
number when used. This means that exact mathematical identities
involving `\pi` will generally not be preserved in floating-point
arithmetic. In particular, multiples of :data:`pi` (except for
the trivial case ``0*pi``) are *not* the exact roots of
:func:`~mpmath.sin`, but differ roughly by the current epsilon::
>>> mp.dps = 15
>>> sin(pi)
1.22464679914735e-16
One solution is to use the :func:`~mpmath.sinpi` function instead::
>>> sinpi(1)
0.0
See the documentation of trigonometric functions for additional
details.
"""
degree = r"""
Represents one degree of angle, `1^{\circ} = \pi/180`, or
about 0.01745329. This constant may be evaluated to arbitrary
precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +degree
0.017453292519943295769236907684886127134428718885417
The :data:`degree` object is convenient for conversion
to radians::
>>> sin(30 * degree)
0.5
>>> asin(0.5) / degree
30.0
"""
e = r"""
The transcendental number `e` = 2.718281828... is the base of the
natural logarithm (:func:`~mpmath.ln`) and of the exponential function
(:func:`~mpmath.exp`).
Mpmath can be evaluate `e` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +e
2.7182818284590452353602874713526624977572470937
This shows digits 99991-100000 of `e`::
>>> mp.dps = 100000
>>> str(e)[-10:]
'2100427165'
**Possible issues**
:data:`e` always rounds to the nearest floating-point number
when used, and mathematical identities involving `e` may not
hold in floating-point arithmetic. For example, ``ln(e)``
might not evaluate exactly to 1.
In particular, don't use ``e**x`` to compute the exponential
function. Use ``exp(x)`` instead; this is both faster and more
accurate.
"""
phi = r"""
Represents the golden ratio `\phi = (1+\sqrt 5)/2`,
approximately equal to 1.6180339887. To high precision,
its value is::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +phi
1.6180339887498948482045868343656381177203091798058
Formulas for the golden ratio include the following::
>>> (1+sqrt(5))/2
1.6180339887498948482045868343656381177203091798058
>>> findroot(lambda x: x**2-x-1, 1)
1.6180339887498948482045868343656381177203091798058
>>> limit(lambda n: fib(n+1)/fib(n), inf)
1.6180339887498948482045868343656381177203091798058
"""
euler = r"""
Euler's constant or the Euler-Mascheroni constant `\gamma`
= 0.57721566... is a number of central importance to
number theory and special functions. It is defined as the limit
.. math ::
\gamma = \lim_{n\to\infty} H_n - \log n
where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic
number (see :func:`~mpmath.harmonic`).
Evaluation of `\gamma` is supported at arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +euler
0.57721566490153286060651209008240243104215933593992
We can also compute `\gamma` directly from the definition,
although this is less efficient::
>>> limit(lambda n: harmonic(n)-log(n), inf)
0.57721566490153286060651209008240243104215933593992
This shows digits 9991-10000 of `\gamma`::
>>> mp.dps = 10000
>>> str(euler)[-10:]
'4679858165'
Integrals, series, and representations for `\gamma` in terms of
special functions include the following (there are many others)::
>>> mp.dps = 25
>>> -quad(lambda x: exp(-x)*log(x), [0,inf])
0.5772156649015328606065121
>>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1])
0.5772156649015328606065121
>>> nsum(lambda k: 1/k-log(1+1/k), [1,inf])
0.5772156649015328606065121
>>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf])
0.5772156649015328606065121
>>> -diff(gamma, 1)
0.5772156649015328606065121
>>> limit(lambda x: 1/x-gamma(x), 0)
0.5772156649015328606065121
>>> limit(lambda x: zeta(x)-1/(x-1), 1)
0.5772156649015328606065121
>>> (log(2*pi*nprod(lambda n:
... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2
0.5772156649015328606065121
For generalizations of the identities `\gamma = -\Gamma'(1)`
and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see
:func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively.
"""
catalan = r"""
Catalan's constant `K` = 0.91596559... is given by the infinite
series
.. math ::
K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}.
Mpmath can evaluate it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +catalan
0.91596559417721901505460351493238411077414937428167
One can also compute `K` directly from the definition, although
this is significantly less efficient::
>>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf])
0.91596559417721901505460351493238411077414937428167
This shows digits 9991-10000 of `K`::
>>> mp.dps = 10000
>>> str(catalan)[-10:]
'9537871503'
Catalan's constant has numerous integral representations::
>>> mp.dps = 50
>>> quad(lambda x: -log(x)/(1+x**2), [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: atan(x)/x, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: ellipk(x**2)/2, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1])
0.91596559417721901505460351493238411077414937428167
As well as series representations::
>>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n:
... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8
0.91596559417721901505460351493238411077414937428167
>>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf])
0.91596559417721901505460351493238411077414937428167
"""
khinchin = r"""
Khinchin's constant `K` = 2.68542... is a number that
appears in the theory of continued fractions. Mpmath can evaluate
it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +khinchin
2.6854520010653064453097148354817956938203822939945
An integral representation is::
>>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1])
>>> 2*exp(1/log(2)*I)
2.6854520010653064453097148354817956938203822939945
The computation of ``khinchin`` is based on an efficient
implementation of the following series::
>>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k)
... for k in range(1,2*int(n)))
>>> exp(nsum(f, [1,inf])/log(2))
2.6854520010653064453097148354817956938203822939945
"""
glaisher = r"""
Glaisher's constant `A`, also known as the Glaisher-Kinkelin
constant, is a number approximately equal to 1.282427129 that
sometimes appears in formulas related to gamma and zeta functions.
It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`).
The constant is defined as `A = \exp(1/12-\zeta'(-1))` where
`\zeta'(s)` denotes the derivative of the Riemann zeta function
(see :func:`~mpmath.zeta`).
Mpmath can evaluate Glaisher's constant to arbitrary precision:
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +glaisher
1.282427129100622636875342568869791727767688927325
We can verify that the value computed by :data:`glaisher` is
correct using mpmath's facilities for numerical
differentiation and arbitrary evaluation of the zeta function:
>>> exp(mpf(1)/12 - diff(zeta, -1))
1.282427129100622636875342568869791727767688927325
Here is an example of an integral that can be evaluated in
terms of Glaisher's constant:
>>> mp.dps = 15
>>> quad(lambda x: log(gamma(x)), [1, 1.5])
-0.0428537406502909
>>> -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2
-0.042853740650291
Mpmath computes Glaisher's constant by applying Euler-Maclaurin
summation to a slowly convergent series. The implementation is
reasonably efficient up to about 10,000 digits. See the source
code for additional details.
References:
http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html
"""
apery = r"""
Represents Apery's constant, which is the irrational number
approximately equal to 1.2020569 given by
.. math ::
\zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}.
The calculation is based on an efficient hypergeometric
series. To 50 decimal places, the value is given by::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +apery
1.2020569031595942853997381615114499907649862923405
Other ways to evaluate Apery's constant using mpmath
include::
>>> zeta(3)
1.2020569031595942853997381615114499907649862923405
>>> -psi(2,1)/2
1.2020569031595942853997381615114499907649862923405
>>> 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7
1.2020569031595942853997381615114499907649862923405
>>> f = lambda k: 2/k**3/(exp(2*pi*k)-1)
>>> 7*pi**3/180 - nsum(f, [1,inf])
1.2020569031595942853997381615114499907649862923405
This shows digits 9991-10000 of Apery's constant::
>>> mp.dps = 10000
>>> str(apery)[-10:]
'3189504235'
"""
mertens = r"""
Represents the Mertens or Meissel-Mertens constant, which is the
prime number analog of Euler's constant:
.. math ::
B_1 = \lim_{N\to\infty}
\left(\sum_{p_k \le N} \frac{1}{p_k} - \log \log N \right)
Here `p_k` denotes the `k`-th prime number. Other names for this
constant include the Hadamard-de la Vallee-Poussin constant or
the prime reciprocal constant.
The following gives the Mertens constant to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +mertens
0.2614972128476427837554268386086958590515666482612
References:
http://mathworld.wolfram.com/MertensConstant.html
"""
twinprime = r"""
Represents the twin prime constant, which is the factor `C_2`
featuring in the Hardy-Littlewood conjecture for the growth of the
twin prime counting function,
.. math ::
\pi_2(n) \sim 2 C_2 \frac{n}{\log^2 n}.
It is given by the product over primes
.. math ::
C_2 = \prod_{p\ge3} \frac{p(p-2)}{(p-1)^2} \approx 0.66016
Computing `C_2` to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +twinprime
0.66016181584686957392781211001455577843262336028473
References:
http://mathworld.wolfram.com/TwinPrimesConstant.html
"""
ln = r"""
Computes the natural logarithm of `x`, `\ln x`.
See :func:`~mpmath.log` for additional documentation."""
sqrt = r"""
``sqrt(x)`` gives the principal square root of `x`, `\sqrt x`.
For positive real numbers, the principal root is simply the
positive square root. For arbitrary complex numbers, the principal
square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`.
The function thus has a branch cut along the negative half real axis.
For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to
performing ``x**0.5``.
**Examples**
Basic examples and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sqrt(10)
3.16227766016838
>>> sqrt(100)
10.0
>>> sqrt(-4)
(0.0 + 2.0j)
>>> sqrt(1+1j)
(1.09868411346781 + 0.455089860562227j)
>>> sqrt(inf)
+inf
Square root evaluation is fast at huge precision::
>>> mp.dps = 50000
>>> a = sqrt(3)
>>> str(a)[-10:]
'9329332814'
:func:`mpmath.iv.sqrt` supports interval arguments::
>>> iv.dps = 15; iv.pretty = True
>>> iv.sqrt([16,100])
[4.0, 10.0]
>>> iv.sqrt(2)
[1.4142135623730949234, 1.4142135623730951455]
>>> iv.sqrt(2) ** 2
[1.9999999999999995559, 2.0000000000000004441]
"""
cbrt = r"""
``cbrt(x)`` computes the cube root of `x`, `x^{1/3}`. This
function is faster and more accurate than raising to a floating-point
fraction::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> 125**(mpf(1)/3)
mpf('4.9999999999999991')
>>> cbrt(125)
mpf('5.0')
Every nonzero complex number has three cube roots. This function
returns the cube root defined by `\exp(\log(x)/3)` where the
principal branch of the natural logarithm is used. Note that this
does not give a real cube root for negative real numbers::
>>> mp.pretty = True
>>> cbrt(-1)
(0.5 + 0.866025403784439j)
"""
exp = r"""
Computes the exponential function,
.. math ::
\exp(x) = e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}.
For complex numbers, the exponential function also satisfies
.. math ::
\exp(x+yi) = e^x (\cos y + i \sin y).
**Basic examples**
Some values of the exponential function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> exp(0)
1.0
>>> exp(1)
2.718281828459045235360287
>>> exp(-1)
0.3678794411714423215955238
>>> exp(inf)
+inf
>>> exp(-inf)
0.0
Arguments can be arbitrarily large::
>>> exp(10000)
8.806818225662921587261496e+4342
>>> exp(-10000)
1.135483865314736098540939e-4343
Evaluation is supported for interval arguments via
:func:`mpmath.iv.exp`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.exp([-inf,0])
[0.0, 1.0]
>>> iv.exp([0,1])
[1.0, 2.71828182845904523536028749558]
The exponential function can be evaluated efficiently to arbitrary
precision::
>>> mp.dps = 10000
>>> exp(pi) #doctest: +ELLIPSIS
23.140692632779269005729...8984304016040616
**Functional properties**
Numerical verification of Euler's identity for the complex
exponential function::
>>> mp.dps = 15
>>> exp(j*pi)+1
(0.0 + 1.22464679914735e-16j)
>>> chop(exp(j*pi)+1)
0.0
This recovers the coefficients (reciprocal factorials) in the
Maclaurin series expansion of exp::
>>> nprint(taylor(exp, 0, 5))
[1.0, 1.0, 0.5, 0.166667, 0.0416667, 0.00833333]
The exponential function is its own derivative and antiderivative::
>>> exp(pi)
23.1406926327793
>>> diff(exp, pi)
23.1406926327793
>>> quad(exp, [-inf, pi])
23.1406926327793
The exponential function can be evaluated using various methods,
including direct summation of the series, limits, and solving
the defining differential equation::
>>> nsum(lambda k: pi**k/fac(k), [0,inf])
23.1406926327793
>>> limit(lambda k: (1+pi/k)**k, inf)
23.1406926327793
>>> odefun(lambda t, x: x, 0, 1)(pi)
23.1406926327793
"""
cosh = r"""
Computes the hyperbolic cosine of `x`,
`\cosh(x) = (e^x + e^{-x})/2`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cosh(0)
1.0
>>> cosh(1)
1.543080634815243778477906
>>> cosh(-inf), cosh(+inf)
(+inf, +inf)
The hyperbolic cosine is an even, convex function with
a global minimum at `x = 0`, having a Maclaurin series
that starts::
>>> nprint(chop(taylor(cosh, 0, 5)))
[1.0, 0.0, 0.5, 0.0, 0.0416667, 0.0]
Generalized to complex numbers, the hyperbolic cosine is
equivalent to a cosine with the argument rotated
in the imaginary direction, or `\cosh x = \cos ix`::
>>> cosh(2+3j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
>>> cos(3-2j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
"""
sinh = r"""
Computes the hyperbolic sine of `x`,
`\sinh(x) = (e^x - e^{-x})/2`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sinh(0)
0.0
>>> sinh(1)
1.175201193643801456882382
>>> sinh(-inf), sinh(+inf)
(-inf, +inf)
The hyperbolic sine is an odd function, with a Maclaurin
series that starts::
>>> nprint(chop(taylor(sinh, 0, 5)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 0.00833333]
Generalized to complex numbers, the hyperbolic sine is
essentially a sine with a rotation `i` applied to
the argument; more precisely, `\sinh x = -i \sin ix`::
>>> sinh(2+3j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
>>> j*sin(3-2j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
"""
tanh = r"""
Computes the hyperbolic tangent of `x`,
`\tanh(x) = \sinh(x)/\cosh(x)`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tanh(0)
0.0
>>> tanh(1)
0.7615941559557648881194583
>>> tanh(-inf), tanh(inf)
(-1.0, 1.0)
The hyperbolic tangent is an odd, sigmoidal function, similar
to the inverse tangent and error function. Its Maclaurin
series is::
>>> nprint(chop(taylor(tanh, 0, 5)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.133333]
Generalized to complex numbers, the hyperbolic tangent is
essentially a tangent with a rotation `i` applied to
the argument; more precisely, `\tanh x = -i \tan ix`::
>>> tanh(2+3j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
>>> j*tan(3-2j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
"""
cos = r"""
Computes the cosine of `x`, `\cos(x)`.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cos(pi/3)
0.5
>>> cos(100000001)
-0.9802850113244713353133243
>>> cos(2+3j)
(-4.189625690968807230132555 - 9.109227893755336597979197j)
>>> cos(inf)
nan
>>> nprint(chop(taylor(cos, 0, 6)))
[1.0, 0.0, -0.5, 0.0, 0.0416667, 0.0, -0.00138889]
Intervals are supported via :func:`mpmath.iv.cos`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.cos([0,1])
[0.540302305868139717400936602301, 1.0]
>>> iv.cos([0,2])
[-0.41614683654714238699756823214, 1.0]
"""
sin = r"""
Computes the sine of `x`, `\sin(x)`.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sin(pi/3)
0.8660254037844386467637232
>>> sin(100000001)
0.1975887055794968911438743
>>> sin(2+3j)
(9.1544991469114295734673 - 4.168906959966564350754813j)
>>> sin(inf)
nan
>>> nprint(chop(taylor(sin, 0, 6)))
[0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333, 0.0]
Intervals are supported via :func:`mpmath.iv.sin`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.sin([0,1])
[0.0, 0.841470984807896506652502331201]
>>> iv.sin([0,2])
[0.0, 1.0]
"""
tan = r"""
Computes the tangent of `x`, `\tan(x) = \frac{\sin(x)}{\cos(x)}`.
The tangent function is singular at `x = (n+1/2)\pi`, but
``tan(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tan(pi/3)
1.732050807568877293527446
>>> tan(100000001)
-0.2015625081449864533091058
>>> tan(2+3j)
(-0.003764025641504248292751221 + 1.003238627353609801446359j)
>>> tan(inf)
nan
>>> nprint(chop(taylor(tan, 0, 6)))
[0.0, 1.0, 0.0, 0.333333, 0.0, 0.133333, 0.0]
Intervals are supported via :func:`mpmath.iv.tan`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.tan([0,1])
[0.0, 1.55740772465490223050697482944]
>>> iv.tan([0,2]) # Interval includes a singularity
[-inf, +inf]
"""
sec = r"""
Computes the secant of `x`, `\mathrm{sec}(x) = \frac{1}{\cos(x)}`.
The secant function is singular at `x = (n+1/2)\pi`, but
``sec(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sec(pi/3)
2.0
>>> sec(10000001)
-1.184723164360392819100265
>>> sec(2+3j)
(-0.04167496441114427004834991 + 0.0906111371962375965296612j)
>>> sec(inf)
nan
>>> nprint(chop(taylor(sec, 0, 6)))
[1.0, 0.0, 0.5, 0.0, 0.208333, 0.0, 0.0847222]
Intervals are supported via :func:`mpmath.iv.sec`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.sec([0,1])
[1.0, 1.85081571768092561791175326276]
>>> iv.sec([0,2]) # Interval includes a singularity
[-inf, +inf]
"""
csc = r"""
Computes the cosecant of `x`, `\mathrm{csc}(x) = \frac{1}{\sin(x)}`.
This cosecant function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``csc(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> csc(pi/3)
1.154700538379251529018298
>>> csc(10000001)
-1.864910497503629858938891
>>> csc(2+3j)
(0.09047320975320743980579048 + 0.04120098628857412646300981j)
>>> csc(inf)
nan
Intervals are supported via :func:`mpmath.iv.csc`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.csc([0,1]) # Interval includes a singularity
[1.18839510577812121626159943988, +inf]
>>> iv.csc([0,2])
[1.0, +inf]
"""
cot = r"""
Computes the cotangent of `x`,
`\mathrm{cot}(x) = \frac{1}{\tan(x)} = \frac{\cos(x)}{\sin(x)}`.
This cotangent function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``cot(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cot(pi/3)
0.5773502691896257645091488
>>> cot(10000001)
1.574131876209625656003562
>>> cot(2+3j)
(-0.003739710376336956660117409 - 0.9967577965693583104609688j)
>>> cot(inf)
nan
Intervals are supported via :func:`mpmath.iv.cot`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.cot([0,1]) # Interval includes a singularity
[0.642092615934330703006419974862, +inf]
>>> iv.cot([1,2])
[-inf, +inf]
"""
acos = r"""
Computes the inverse cosine or arccosine of `x`, `\cos^{-1}(x)`.
Since `-1 \le \cos(x) \le 1` for real `x`, the inverse
cosine is real-valued only for `-1 \le x \le 1`. On this interval,
:func:`~mpmath.acos` is defined to be a monotonically decreasing
function assuming values between `+\pi` and `0`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> acos(-1)
3.141592653589793238462643
>>> acos(0)
1.570796326794896619231322
>>> acos(1)
0.0
>>> nprint(chop(taylor(acos, 0, 6)))
[1.5708, -1.0, 0.0, -0.166667, 0.0, -0.075, 0.0]
:func:`~mpmath.acos` is defined so as to be a proper inverse function of
`\cos(\theta)` for `0 \le \theta < \pi`.
We have `\cos(\cos^{-1}(x)) = x` for all `x`, but
`\cos^{-1}(\cos(x)) = x` only for `0 \le \Re[x] < \pi`::
>>> for x in [1, 10, -1, 2+3j, 10+3j]:
... print("%s %s" % (cos(acos(x)), acos(cos(x))))
...
1.0 1.0
(10.0 + 0.0j) 2.566370614359172953850574
-1.0 1.0
(2.0 + 3.0j) (2.0 + 3.0j)
(10.0 + 3.0j) (2.566370614359172953850574 - 3.0j)
The inverse cosine has two branch points: `x = \pm 1`. :func:`~mpmath.acos`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\cos^{-1}(x) = \frac{\pi}{2} + i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
asin = r"""
Computes the inverse sine or arcsine of `x`, `\sin^{-1}(x)`.
Since `-1 \le \sin(x) \le 1` for real `x`, the inverse
sine is real-valued only for `-1 \le x \le 1`.
On this interval, it is defined to be a monotonically increasing
function assuming values between `-\pi/2` and `\pi/2`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> asin(-1)
-1.570796326794896619231322
>>> asin(0)
0.0
>>> asin(1)
1.570796326794896619231322
>>> nprint(chop(taylor(asin, 0, 6)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 0.075, 0.0]
:func:`~mpmath.asin` is defined so as to be a proper inverse function of
`\sin(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\sin(\sin^{-1}(x)) = x` for all `x`, but
`\sin^{-1}(\sin(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print("%s %s" % (chop(sin(asin(x))), asin(sin(x))))
...
1.0 1.0
10.0 -0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.0 + 3.0j)
(-2.0 + 3.0j) (-1.141592653589793238462643 - 3.0j)
The inverse sine has two branch points: `x = \pm 1`. :func:`~mpmath.asin`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\sin^{-1}(x) = -i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
atan = r"""
Computes the inverse tangent or arctangent of `x`, `\tan^{-1}(x)`.
This is a real-valued function for all real `x`, with range
`(-\pi/2, \pi/2)`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> atan(-inf)
-1.570796326794896619231322
>>> atan(-1)
-0.7853981633974483096156609
>>> atan(0)
0.0
>>> atan(1)
0.7853981633974483096156609
>>> atan(inf)
1.570796326794896619231322
>>> nprint(chop(taylor(atan, 0, 6)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.2, 0.0]
The inverse tangent is often used to compute angles. However,
the atan2 function is often better for this as it preserves sign
(see :func:`~mpmath.atan2`).
:func:`~mpmath.atan` is defined so as to be a proper inverse function of
`\tan(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\tan(\tan^{-1}(x)) = x` for all `x`, but
`\tan^{-1}(\tan(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> mp.dps = 25
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print("%s %s" % (tan(atan(x)), atan(tan(x))))
...
1.0 1.0
10.0 0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.000000000000000000000001 + 3.0j)
(-2.0 + 3.0j) (1.141592653589793238462644 + 3.0j)
The inverse tangent has two branch points: `x = \pm i`. :func:`~mpmath.atan`
places the branch cuts along the line segments `(-i \infty, -i)` and
`(+i, +i \infty)`. In general,
.. math ::
\tan^{-1}(x) = \frac{i}{2}\left(\log(1-ix)-\log(1+ix)\right)
where the principal-branch log is implied.
"""
acot = r"""Computes the inverse cotangent of `x`,
`\mathrm{cot}^{-1}(x) = \tan^{-1}(1/x)`."""
asec = r"""Computes the inverse secant of `x`,
`\mathrm{sec}^{-1}(x) = \cos^{-1}(1/x)`."""
acsc = r"""Computes the inverse cosecant of `x`,
`\mathrm{csc}^{-1}(x) = \sin^{-1}(1/x)`."""
coth = r"""Computes the hyperbolic cotangent of `x`,
`\mathrm{coth}(x) = \frac{\cosh(x)}{\sinh(x)}`.
"""
sech = r"""Computes the hyperbolic secant of `x`,
`\mathrm{sech}(x) = \frac{1}{\cosh(x)}`.
"""
csch = r"""Computes the hyperbolic cosecant of `x`,
`\mathrm{csch}(x) = \frac{1}{\sinh(x)}`.
"""
acosh = r"""Computes the inverse hyperbolic cosine of `x`,
`\mathrm{cosh}^{-1}(x) = \log(x+\sqrt{x+1}\sqrt{x-1})`.
"""
asinh = r"""Computes the inverse hyperbolic sine of `x`,
`\mathrm{sinh}^{-1}(x) = \log(x+\sqrt{1+x^2})`.
"""
atanh = r"""Computes the inverse hyperbolic tangent of `x`,
`\mathrm{tanh}^{-1}(x) = \frac{1}{2}\left(\log(1+x)-\log(1-x)\right)`.
"""
acoth = r"""Computes the inverse hyperbolic cotangent of `x`,
`\mathrm{coth}^{-1}(x) = \tanh^{-1}(1/x)`."""
asech = r"""Computes the inverse hyperbolic secant of `x`,
`\mathrm{sech}^{-1}(x) = \cosh^{-1}(1/x)`."""
acsch = r"""Computes the inverse hyperbolic cosecant of `x`,
`\mathrm{csch}^{-1}(x) = \sinh^{-1}(1/x)`."""
sinpi = r"""
Computes `\sin(\pi x)`, more accurately than the expression
``sin(pi*x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sinpi(10**10), sin(pi*(10**10))
(0.0, -2.23936276195592e-6)
>>> sinpi(10**10+0.5), sin(pi*(10**10+0.5))
(1.0, 0.999999999998721)
"""
cospi = r"""
Computes `\cos(\pi x)`, more accurately than the expression
``cos(pi*x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> cospi(10**10), cos(pi*(10**10))
(1.0, 0.999999999997493)
>>> cospi(10**10+0.5), cos(pi*(10**10+0.5))
(0.0, 1.59960492420134e-6)
"""
sinc = r"""
``sinc(x)`` computes the unnormalized sinc function, defined as
.. math ::
\mathrm{sinc}(x) = \begin{cases}
\sin(x)/x, & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
See :func:`~mpmath.sincpi` for the normalized sinc function.
Simple values and limits include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sinc(0)
1.0
>>> sinc(1)
0.841470984807897
>>> sinc(inf)
0.0
The integral of the sinc function is the sine integral Si::
>>> quad(sinc, [0, 1])
0.946083070367183
>>> si(1)
0.946083070367183
"""
sincpi = r"""
``sincpi(x)`` computes the normalized sinc function, defined as
.. math ::
\mathrm{sinc}_{\pi}(x) = \begin{cases}
\sin(\pi x)/(\pi x), & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
Equivalently, we have
`\mathrm{sinc}_{\pi}(x) = \mathrm{sinc}(\pi x)`.
The normalization entails that the function integrates
to unity over the entire real line::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quadosc(sincpi, [-inf, inf], period=2.0)
1.0
Like, :func:`~mpmath.sinpi`, :func:`~mpmath.sincpi` is evaluated accurately
at its roots::
>>> sincpi(10)
0.0
"""
expj = r"""
Convenience function for computing `e^{ix}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expj(0)
(1.0 + 0.0j)
>>> expj(-1)
(0.5403023058681397174009366 - 0.8414709848078965066525023j)
>>> expj(j)
(0.3678794411714423215955238 + 0.0j)
>>> expj(1+j)
(0.1987661103464129406288032 + 0.3095598756531121984439128j)
"""
expjpi = r"""
Convenience function for computing `e^{i \pi x}`.
Evaluation is accurate near zeros (see also :func:`~mpmath.cospi`,
:func:`~mpmath.sinpi`)::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expjpi(0)
(1.0 + 0.0j)
>>> expjpi(1)
(-1.0 + 0.0j)
>>> expjpi(0.5)
(0.0 + 1.0j)
>>> expjpi(-1)
(-1.0 + 0.0j)
>>> expjpi(j)
(0.04321391826377224977441774 + 0.0j)
>>> expjpi(1+j)
(-0.04321391826377224977441774 + 0.0j)
"""
floor = r"""
Computes the floor of `x`, `\lfloor x \rfloor`, defined as
the largest integer less than or equal to `x`::
>>> from mpmath import *
>>> mp.pretty = False
>>> floor(3.5)
mpf('3.0')
.. note ::
:func:`~mpmath.floor`, :func:`~mpmath.ceil` and :func:`~mpmath.nint` return a
floating-point number, not a Python ``int``. If `\lfloor x \rfloor` is
too large to be represented exactly at the present working precision,
the result will be rounded, not necessarily in the direction
implied by the mathematical definition of the function.
To avoid rounding, use *prec=0*::
>>> mp.dps = 15
>>> print(int(floor(10**30+1)))
1000000000000000019884624838656
>>> print(int(floor(10**30+1, prec=0)))
1000000000000000000000000000001
The floor function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> floor(3.25+4.75j)
mpc(real='3.0', imag='4.0')
"""
ceil = r"""
Computes the ceiling of `x`, `\lceil x \rceil`, defined as
the smallest integer greater than or equal to `x`::
>>> from mpmath import *
>>> mp.pretty = False
>>> ceil(3.5)
mpf('4.0')
The ceiling function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> ceil(3.25+4.75j)
mpc(real='4.0', imag='5.0')
See notes about rounding for :func:`~mpmath.floor`.
"""
nint = r"""
Evaluates the nearest integer function, `\mathrm{nint}(x)`.
This gives the nearest integer to `x`; on a tie, it
gives the nearest even integer::
>>> from mpmath import *
>>> mp.pretty = False
>>> nint(3.2)
mpf('3.0')
>>> nint(3.8)
mpf('4.0')
>>> nint(3.5)
mpf('4.0')
>>> nint(4.5)
mpf('4.0')
The nearest integer function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> nint(3.25+4.75j)
mpc(real='3.0', imag='5.0')
See notes about rounding for :func:`~mpmath.floor`.
"""
frac = r"""
Gives the fractional part of `x`, defined as
`\mathrm{frac}(x) = x - \lfloor x \rfloor` (see :func:`~mpmath.floor`).
In effect, this computes `x` modulo 1, or `x+n` where
`n \in \mathbb{Z}` is such that `x+n \in [0,1)`::
>>> from mpmath import *
>>> mp.pretty = False
>>> frac(1.25)
mpf('0.25')
>>> frac(3)
mpf('0.0')
>>> frac(-1.25)
mpf('0.75')
For a complex number, the fractional part function applies to
the real and imaginary parts separately::
>>> frac(2.25+3.75j)
mpc(real='0.25', imag='0.75')
Plotted, the fractional part function gives a sawtooth
wave. The Fourier series coefficients have a simple
form::
>>> mp.dps = 15
>>> nprint(fourier(lambda x: frac(x)-0.5, [0,1], 4))
([0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -0.31831, -0.159155, -0.106103, -0.0795775])
>>> nprint([-1/(pi*k) for k in range(1,5)])
[-0.31831, -0.159155, -0.106103, -0.0795775]
.. note::
The fractional part is sometimes defined as a symmetric
function, i.e. returning `-\mathrm{frac}(-x)` if `x < 0`.
This convention is used, for instance, by Mathematica's
``FractionalPart``.
"""
sign = r"""
Returns the sign of `x`, defined as `\mathrm{sign}(x) = x / |x|`
(with the special case `\mathrm{sign}(0) = 0`)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> sign(10)
mpf('1.0')
>>> sign(-10)
mpf('-1.0')
>>> sign(0)
mpf('0.0')
Note that the sign function is also defined for complex numbers,
for which it gives the projection onto the unit circle::
>>> mp.dps = 15; mp.pretty = True
>>> sign(1+j)
(0.707106781186547 + 0.707106781186547j)
"""
arg = r"""
Computes the complex argument (phase) of `x`, defined as the
signed angle between the positive real axis and `x` in the
complex plane::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> arg(3)
0.0
>>> arg(3+3j)
0.785398163397448
>>> arg(3j)
1.5707963267949
>>> arg(-3)
3.14159265358979
>>> arg(-3j)
-1.5707963267949
The angle is defined to satisfy `-\pi < \arg(x) \le \pi` and
with the sign convention that a nonnegative imaginary part
results in a nonnegative argument.
The value returned by :func:`~mpmath.arg` is an ``mpf`` instance.
"""
fabs = r"""
Returns the absolute value of `x`, `|x|`. Unlike :func:`abs`,
:func:`~mpmath.fabs` converts non-mpmath numbers (such as ``int``)
into mpmath numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fabs(3)
mpf('3.0')
>>> fabs(-3)
mpf('3.0')
>>> fabs(3+4j)
mpf('5.0')
"""
re = r"""
Returns the real part of `x`, `\Re(x)`. Unlike ``x.real``,
:func:`~mpmath.re` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> re(3)
mpf('3.0')
>>> re(-1+4j)
mpf('-1.0')
"""
im = r"""
Returns the imaginary part of `x`, `\Im(x)`. Unlike ``x.imag``,
:func:`~mpmath.im` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> im(3)
mpf('0.0')
>>> im(-1+4j)
mpf('4.0')
"""
conj = r"""
Returns the complex conjugate of `x`, `\overline{x}`. Unlike
``x.conjugate()``, :func:`~mpmath.im` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> conj(3)
mpf('3.0')
>>> conj(-1+4j)
mpc(real='-1.0', imag='-4.0')
"""
polar = r"""
Returns the polar representation of the complex number `z`
as a pair `(r, \phi)` such that `z = r e^{i \phi}`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> polar(-2)
(2.0, 3.14159265358979)
>>> polar(3-4j)
(5.0, -0.927295218001612)
"""
rect = r"""
Returns the complex number represented by polar
coordinates `(r, \phi)`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> chop(rect(2, pi))
-2.0
>>> rect(sqrt(2), -pi/4)
(1.0 - 1.0j)
"""
expm1 = r"""
Computes `e^x - 1`, accurately for small `x`.
Unlike the expression ``exp(x) - 1``, ``expm1(x)`` does not suffer from
potentially catastrophic cancellation::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> exp(1e-10)-1; print(expm1(1e-10))
1.00000008274037e-10
1.00000000005e-10
>>> exp(1e-20)-1; print(expm1(1e-20))
0.0
1.0e-20
>>> 1/(exp(1e-20)-1)
Traceback (most recent call last):
...
ZeroDivisionError
>>> 1/expm1(1e-20)
1.0e+20
Evaluation works for extremely tiny values::
>>> expm1(0)
0.0
>>> expm1('1e-10000000')
1.0e-10000000
"""
powm1 = r"""
Computes `x^y - 1`, accurately when `x^y` is very close to 1.
This avoids potentially catastrophic cancellation::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> power(0.99999995, 1e-10) - 1
0.0
>>> powm1(0.99999995, 1e-10)
-5.00000012791934e-18
Powers exactly equal to 1, and only those powers, yield 0 exactly::
>>> powm1(-j, 4)
(0.0 + 0.0j)
>>> powm1(3, 0)
0.0
>>> powm1(fadd(-1, 1e-100, exact=True), 4)
-4.0e-100
Evaluation works for extremely tiny `y`::
>>> powm1(2, '1e-100000')
6.93147180559945e-100001
>>> powm1(j, '1e-1000')
(-1.23370055013617e-2000 + 1.5707963267949e-1000j)
"""
root = r"""
``root(z, n, k=0)`` computes an `n`-th root of `z`, i.e. returns a number
`r` that (up to possible approximation error) satisfies `r^n = z`.
(``nthroot`` is available as an alias for ``root``.)
Every complex number `z \ne 0` has `n` distinct `n`-th roots, which are
equidistant points on a circle with radius `|z|^{1/n}`, centered around the
origin. A specific root may be selected using the optional index
`k`. The roots are indexed counterclockwise, starting with `k = 0` for the root
closest to the positive real half-axis.
The `k = 0` root is the so-called principal `n`-th root, often denoted by
`\sqrt[n]{z}` or `z^{1/n}`, and also given by `\exp(\log(z) / n)`. If `z` is
a positive real number, the principal root is just the unique positive
`n`-th root of `z`. Under some circumstances, non-principal real roots exist:
for positive real `z`, `n` even, there is a negative root given by `k = n/2`;
for negative real `z`, `n` odd, there is a negative root given by `k = (n-1)/2`.
To obtain all roots with a simple expression, use
``[root(z,n,k) for k in range(n)]``.
An important special case, ``root(1, n, k)`` returns the `k`-th `n`-th root of
unity, `\zeta_k = e^{2 \pi i k / n}`. Alternatively, :func:`~mpmath.unitroots`
provides a slightly more convenient way to obtain the roots of unity,
including the option to compute only the primitive roots of unity.
Both `k` and `n` should be integers; `k` outside of ``range(n)`` will be
reduced modulo `n`. If `n` is negative, `x^{-1/n} = 1/x^{1/n}` (or
the equivalent reciprocal for a non-principal root with `k \ne 0`) is computed.
:func:`~mpmath.root` is implemented to use Newton's method for small
`n`. At high precision, this makes `x^{1/n}` not much more
expensive than the regular exponentiation, `x^n`. For very large
`n`, :func:`~mpmath.nthroot` falls back to use the exponential function.
**Examples**
:func:`~mpmath.nthroot`/:func:`~mpmath.root` is faster and more accurate than raising to a
floating-point fraction::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> 16807 ** (mpf(1)/5)
mpf('7.0000000000000009')
>>> root(16807, 5)
mpf('7.0')
>>> nthroot(16807, 5) # Alias
mpf('7.0')
A high-precision root::
>>> mp.dps = 50; mp.pretty = True
>>> nthroot(10, 5)
1.584893192461113485202101373391507013269442133825
>>> nthroot(10, 5) ** 5
10.0
Computing principal and non-principal square and cube roots::
>>> mp.dps = 15
>>> root(10, 2)
3.16227766016838
>>> root(10, 2, 1)
-3.16227766016838
>>> root(-10, 3)
(1.07721734501594 + 1.86579517236206j)
>>> root(-10, 3, 1)
-2.15443469003188
>>> root(-10, 3, 2)
(1.07721734501594 - 1.86579517236206j)
All the 7th roots of a complex number::
>>> for r in [root(3+4j, 7, k) for k in range(7)]:
... print("%s %s" % (r, r**7))
...
(1.24747270589553 + 0.166227124177353j) (3.0 + 4.0j)
(0.647824911301003 + 1.07895435170559j) (3.0 + 4.0j)
(-0.439648254723098 + 1.17920694574172j) (3.0 + 4.0j)
(-1.19605731775069 + 0.391492658196305j) (3.0 + 4.0j)
(-1.05181082538903 - 0.691023585965793j) (3.0 + 4.0j)
(-0.115529328478668 - 1.25318497558335j) (3.0 + 4.0j)
(0.907748109144957 - 0.871672518271819j) (3.0 + 4.0j)
Cube roots of unity::
>>> for k in range(3): print(root(1, 3, k))
...
1.0
(-0.5 + 0.866025403784439j)
(-0.5 - 0.866025403784439j)
Some exact high order roots::
>>> root(75**210, 105)
5625.0
>>> root(1, 128, 96)
(0.0 - 1.0j)
>>> root(4**128, 128, 96)
(0.0 - 4.0j)
"""
unitroots = r"""
``unitroots(n)`` returns `\zeta_0, \zeta_1, \ldots, \zeta_{n-1}`,
all the distinct `n`-th roots of unity, as a list. If the option
*primitive=True* is passed, only the primitive roots are returned.
Every `n`-th root of unity satisfies `(\zeta_k)^n = 1`. There are `n` distinct
roots for each `n` (`\zeta_k` and `\zeta_j` are the same when
`k = j \pmod n`), which form a regular polygon with vertices on the unit
circle. They are ordered counterclockwise with increasing `k`, starting
with `\zeta_0 = 1`.
**Examples**
The roots of unity up to `n = 4`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(unitroots(1))
[1.0]
>>> nprint(unitroots(2))
[1.0, -1.0]
>>> nprint(unitroots(3))
[1.0, (-0.5 + 0.866025j), (-0.5 - 0.866025j)]
>>> nprint(unitroots(4))
[1.0, (0.0 + 1.0j), -1.0, (0.0 - 1.0j)]
Roots of unity form a geometric series that sums to 0::
>>> mp.dps = 50
>>> chop(fsum(unitroots(25)))
0.0
Primitive roots up to `n = 4`::
>>> mp.dps = 15
>>> nprint(unitroots(1, primitive=True))
[1.0]
>>> nprint(unitroots(2, primitive=True))
[-1.0]
>>> nprint(unitroots(3, primitive=True))
[(-0.5 + 0.866025j), (-0.5 - 0.866025j)]
>>> nprint(unitroots(4, primitive=True))
[(0.0 + 1.0j), (0.0 - 1.0j)]
There are only four primitive 12th roots::
>>> nprint(unitroots(12, primitive=True))
[(0.866025 + 0.5j), (-0.866025 + 0.5j), (-0.866025 - 0.5j), (0.866025 - 0.5j)]
The `n`-th roots of unity form a group, the cyclic group of order `n`.
Any primitive root `r` is a generator for this group, meaning that
`r^0, r^1, \ldots, r^{n-1}` gives the whole set of unit roots (in
some permuted order)::
>>> for r in unitroots(6): print(r)
...
1.0
(0.5 + 0.866025403784439j)
(-0.5 + 0.866025403784439j)
-1.0
(-0.5 - 0.866025403784439j)
(0.5 - 0.866025403784439j)
>>> r = unitroots(6, primitive=True)[1]
>>> for k in range(6): print(chop(r**k))
...
1.0
(0.5 - 0.866025403784439j)
(-0.5 - 0.866025403784439j)
-1.0
(-0.5 + 0.866025403784438j)
(0.5 + 0.866025403784438j)
The number of primitive roots equals the Euler totient function `\phi(n)`::
>>> [len(unitroots(n, primitive=True)) for n in range(1,20)]
[1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18]
"""
log = r"""
Computes the base-`b` logarithm of `x`, `\log_b(x)`. If `b` is
unspecified, :func:`~mpmath.log` computes the natural (base `e`) logarithm
and is equivalent to :func:`~mpmath.ln`. In general, the base `b` logarithm
is defined in terms of the natural logarithm as
`\log_b(x) = \ln(x)/\ln(b)`.
By convention, we take `\log(0) = -\infty`.
The natural logarithm is real if `x > 0` and complex if `x < 0` or if
`x` is complex. The principal branch of the complex logarithm is
used, meaning that `\Im(\ln(x)) = -\pi < \arg(x) \le \pi`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> log(1)
0.0
>>> log(2)
0.693147180559945
>>> log(1000,10)
3.0
>>> log(4, 16)
0.5
>>> log(j)
(0.0 + 1.5707963267949j)
>>> log(-1)
(0.0 + 3.14159265358979j)
>>> log(0)
-inf
>>> log(inf)
+inf
The natural logarithm is the antiderivative of `1/x`::
>>> quad(lambda x: 1/x, [1, 5])
1.6094379124341
>>> log(5)
1.6094379124341
>>> diff(log, 10)
0.1
The Taylor series expansion of the natural logarithm around
`x = 1` has coefficients `(-1)^{n+1}/n`::
>>> nprint(taylor(log, 1, 7))
[0.0, 1.0, -0.5, 0.333333, -0.25, 0.2, -0.166667, 0.142857]
:func:`~mpmath.log` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> log(pi)
1.1447298858494001741434273513530587116472948129153
>>> log(pi, pi**3)
0.33333333333333333333333333333333333333333333333333
>>> mp.dps = 25
>>> log(3+4j)
(1.609437912434100374600759 + 0.9272952180016122324285125j)
"""
log10 = r"""
Computes the base-10 logarithm of `x`, `\log_{10}(x)`. ``log10(x)``
is equivalent to ``log(x, 10)``.
"""
fmod = r"""
Converts `x` and `y` to mpmath numbers and returns `x \mod y`.
For mpmath numbers, this is equivalent to ``x % y``.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> fmod(100, pi)
2.61062773871641
You can use :func:`~mpmath.fmod` to compute fractional parts of numbers::
>>> fmod(10.25, 1)
0.25
"""
radians = r"""
Converts the degree angle `x` to radians::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> radians(60)
1.0471975511966
"""
degrees = r"""
Converts the radian angle `x` to a degree angle::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> degrees(pi/3)
60.0
"""
atan2 = r"""
Computes the two-argument arctangent, `\mathrm{atan2}(y, x)`,
giving the signed angle between the positive `x`-axis and the
point `(x, y)` in the 2D plane. This function is defined for
real `x` and `y` only.
The two-argument arctangent essentially computes
`\mathrm{atan}(y/x)`, but accounts for the signs of both
`x` and `y` to give the angle for the correct quadrant. The
following examples illustrate the difference::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> atan2(1,1), atan(1/1.)
(0.785398163397448, 0.785398163397448)
>>> atan2(1,-1), atan(1/-1.)
(2.35619449019234, -0.785398163397448)
>>> atan2(-1,1), atan(-1/1.)
(-0.785398163397448, -0.785398163397448)
>>> atan2(-1,-1), atan(-1/-1.)
(-2.35619449019234, 0.785398163397448)
The angle convention is the same as that used for the complex
argument; see :func:`~mpmath.arg`.
"""
fibonacci = r"""
``fibonacci(n)`` computes the `n`-th Fibonacci number, `F(n)`. The
Fibonacci numbers are defined by the recurrence `F(n) = F(n-1) + F(n-2)`
with the initial values `F(0) = 0`, `F(1) = 1`. :func:`~mpmath.fibonacci`
extends this definition to arbitrary real and complex arguments
using the formula
.. math ::
F(z) = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5}
where `\phi` is the golden ratio. :func:`~mpmath.fibonacci` also uses this
continuous formula to compute `F(n)` for extremely large `n`, where
calculating the exact integer would be wasteful.
For convenience, :func:`~mpmath.fib` is available as an alias for
:func:`~mpmath.fibonacci`.
**Basic examples**
Some small Fibonacci numbers are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for i in range(10):
... print(fibonacci(i))
...
0.0
1.0
1.0
2.0
3.0
5.0
8.0
13.0
21.0
34.0
>>> fibonacci(50)
12586269025.0
The recurrence for `F(n)` extends backwards to negative `n`::
>>> for i in range(10):
... print(fibonacci(-i))
...
0.0
1.0
-1.0
2.0
-3.0
5.0
-8.0
13.0
-21.0
34.0
Large Fibonacci numbers will be computed approximately unless
the precision is set high enough::
>>> fib(200)
2.8057117299251e+41
>>> mp.dps = 45
>>> fib(200)
280571172992510140037611932413038677189525.0
:func:`~mpmath.fibonacci` can compute approximate Fibonacci numbers
of stupendous size::
>>> mp.dps = 15
>>> fibonacci(10**25)
3.49052338550226e+2089876402499787337692720
**Real and complex arguments**
The extended Fibonacci function is an analytic function. The
property `F(z) = F(z-1) + F(z-2)` holds for arbitrary `z`::
>>> mp.dps = 15
>>> fib(pi)
2.1170270579161
>>> fib(pi-1) + fib(pi-2)
2.1170270579161
>>> fib(3+4j)
(-5248.51130728372 - 14195.962288353j)
>>> fib(2+4j) + fib(1+4j)
(-5248.51130728372 - 14195.962288353j)
The Fibonacci function has infinitely many roots on the
negative half-real axis. The first root is at 0, the second is
close to -0.18, and then there are infinitely many roots that
asymptotically approach `-n+1/2`::
>>> findroot(fib, -0.2)
-0.183802359692956
>>> findroot(fib, -2)
-1.57077646820395
>>> findroot(fib, -17)
-16.4999999596115
>>> findroot(fib, -24)
-23.5000000000479
**Mathematical relationships**
For large `n`, `F(n+1)/F(n)` approaches the golden ratio::
>>> mp.dps = 50
>>> fibonacci(101)/fibonacci(100)
1.6180339887498948482045868343656381177203127439638
>>> +phi
1.6180339887498948482045868343656381177203091798058
The sum of reciprocal Fibonacci numbers converges to an irrational
number for which no closed form expression is known::
>>> mp.dps = 15
>>> nsum(lambda n: 1/fib(n), [1, inf])
3.35988566624318
Amazingly, however, the sum of odd-index reciprocal Fibonacci
numbers can be expressed in terms of a Jacobi theta function::
>>> nsum(lambda n: 1/fib(2*n+1), [0, inf])
1.82451515740692
>>> sqrt(5)*jtheta(2,0,(3-sqrt(5))/2)**2/4
1.82451515740692
Some related sums can be done in closed form::
>>> nsum(lambda k: 1/(1+fib(2*k+1)), [0, inf])
1.11803398874989
>>> phi - 0.5
1.11803398874989
>>> f = lambda k:(-1)**(k+1) / sum(fib(n)**2 for n in range(1,int(k+1)))
>>> nsum(f, [1, inf])
0.618033988749895
>>> phi-1
0.618033988749895
**References**
1. http://mathworld.wolfram.com/FibonacciNumber.html
"""
altzeta = r"""
Gives the Dirichlet eta function, `\eta(s)`, also known as the
alternating zeta function. This function is defined in analogy
with the Riemann zeta function as providing the sum of the
alternating series
.. math ::
\eta(s) = \sum_{k=0}^{\infty} \frac{(-1)^k}{k^s}
= 1-\frac{1}{2^s}+\frac{1}{3^s}-\frac{1}{4^s}+\ldots
The eta function, unlike the Riemann zeta function, is an entire
function, having a finite value for all complex `s`. The special case
`\eta(1) = \log(2)` gives the value of the alternating harmonic series.
The alternating zeta function may expressed using the Riemann zeta function
as `\eta(s) = (1 - 2^{1-s}) \zeta(s)`. It can also be expressed
in terms of the Hurwitz zeta function, for example using
:func:`~mpmath.dirichlet` (see documentation for that function).
**Examples**
Some special values are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> altzeta(1)
0.693147180559945
>>> altzeta(0)
0.5
>>> altzeta(-1)
0.25
>>> altzeta(-2)
0.0
An example of a sum that can be computed more accurately and
efficiently via :func:`~mpmath.altzeta` than via numerical summation::
>>> sum(-(-1)**n / n**2.5 for n in range(1, 100))
0.86720495150398402
>>> altzeta(2.5)
0.867199889012184
At positive even integers, the Dirichlet eta function
evaluates to a rational multiple of a power of `\pi`::
>>> altzeta(2)
0.822467033424113
>>> pi**2/12
0.822467033424113
Like the Riemann zeta function, `\eta(s)`, approaches 1
as `s` approaches positive infinity, although it does
so from below rather than from above::
>>> altzeta(30)
0.999999999068682
>>> altzeta(inf)
1.0
>>> mp.pretty = False
>>> altzeta(1000, rounding='d')
mpf('0.99999999999999989')
>>> altzeta(1000, rounding='u')
mpf('1.0')
**References**
1. http://mathworld.wolfram.com/DirichletEtaFunction.html
2. http://en.wikipedia.org/wiki/Dirichlet_eta_function
"""
factorial = r"""
Computes the factorial, `x!`. For integers `n \ge 0`, we have
`n! = 1 \cdot 2 \cdots (n-1) \cdot n` and more generally the factorial
is defined for real or complex `x` by `x! = \Gamma(x+1)`.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for k in range(6):
... print("%s %s" % (k, fac(k)))
...
0 1.0
1 1.0
2 2.0
3 6.0
4 24.0
5 120.0
>>> fac(inf)
+inf
>>> fac(0.5), sqrt(pi)/2
(0.886226925452758, 0.886226925452758)
For large positive `x`, `x!` can be approximated by
Stirling's formula::
>>> x = 10**10
>>> fac(x)
2.32579620567308e+95657055186
>>> sqrt(2*pi*x)*(x/e)**x
2.32579597597705e+95657055186
:func:`~mpmath.fac` supports evaluation for astronomically large values::
>>> fac(10**30)
6.22311232304258e+29565705518096748172348871081098
Reciprocal factorials appear in the Taylor series of the
exponential function (among many other contexts)::
>>> nsum(lambda k: 1/fac(k), [0, inf]), exp(1)
(2.71828182845905, 2.71828182845905)
>>> nsum(lambda k: pi**k/fac(k), [0, inf]), exp(pi)
(23.1406926327793, 23.1406926327793)
"""
gamma = r"""
Computes the gamma function, `\Gamma(x)`. The gamma function is a
shifted version of the ordinary factorial, satisfying
`\Gamma(n) = (n-1)!` for integers `n > 0`. More generally, it
is defined by
.. math ::
\Gamma(x) = \int_0^{\infty} t^{x-1} e^{-t}\, dt
for any real or complex `x` with `\Re(x) > 0` and for `\Re(x) < 0`
by analytic continuation.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for k in range(1, 6):
... print("%s %s" % (k, gamma(k)))
...
1 1.0
2 1.0
3 2.0
4 6.0
5 24.0
>>> gamma(inf)
+inf
>>> gamma(0)
Traceback (most recent call last):
...
ValueError: gamma function pole
The gamma function of a half-integer is a rational multiple of
`\sqrt{\pi}`::
>>> gamma(0.5), sqrt(pi)
(1.77245385090552, 1.77245385090552)
>>> gamma(1.5), sqrt(pi)/2
(0.886226925452758, 0.886226925452758)
We can check the integral definition::
>>> gamma(3.5)
3.32335097044784
>>> quad(lambda t: t**2.5*exp(-t), [0,inf])
3.32335097044784
:func:`~mpmath.gamma` supports arbitrary-precision evaluation and
complex arguments::
>>> mp.dps = 50
>>> gamma(sqrt(3))
0.91510229697308632046045539308226554038315280564184
>>> mp.dps = 25
>>> gamma(2j)
(0.009902440080927490985955066 - 0.07595200133501806872408048j)
Arguments can also be large. Note that the gamma function grows
very quickly::
>>> mp.dps = 15
>>> gamma(10**20)
1.9328495143101e+1956570551809674817225
"""
psi = r"""
Gives the polygamma function of order `m` of `z`, `\psi^{(m)}(z)`.
Special cases are known as the *digamma function* (`\psi^{(0)}(z)`),
the *trigamma function* (`\psi^{(1)}(z)`), etc. The polygamma
functions are defined as the logarithmic derivatives of the gamma
function:
.. math ::
\psi^{(m)}(z) = \left(\frac{d}{dz}\right)^{m+1} \log \Gamma(z)
In particular, `\psi^{(0)}(z) = \Gamma'(z)/\Gamma(z)`. In the
present implementation of :func:`~mpmath.psi`, the order `m` must be a
nonnegative integer, while the argument `z` may be an arbitrary
complex number (with exception for the polygamma function's poles
at `z = 0, -1, -2, \ldots`).
**Examples**
For various rational arguments, the polygamma function reduces to
a combination of standard mathematical constants::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> psi(0, 1), -euler
(-0.5772156649015328606065121, -0.5772156649015328606065121)
>>> psi(1, '1/4'), pi**2+8*catalan
(17.19732915450711073927132, 17.19732915450711073927132)
>>> psi(2, '1/2'), -14*apery
(-16.82879664423431999559633, -16.82879664423431999559633)
The polygamma functions are derivatives of each other::
>>> diff(lambda x: psi(3, x), pi), psi(4, pi)
(-0.1105749312578862734526952, -0.1105749312578862734526952)
>>> quad(lambda x: psi(4, x), [2, 3]), psi(3,3)-psi(3,2)
(-0.375, -0.375)
The digamma function diverges logarithmically as `z \to \infty`,
while higher orders tend to zero::
>>> psi(0,inf), psi(1,inf), psi(2,inf)
(+inf, 0.0, 0.0)
Evaluation for a complex argument::
>>> psi(2, -1-2j)
(0.03902435405364952654838445 + 0.1574325240413029954685366j)
Evaluation is supported for large orders `m` and/or large
arguments `z`::
>>> psi(3, 10**100)
2.0e-300
>>> psi(250, 10**30+10**20*j)
(-1.293142504363642687204865e-7010 + 3.232856260909107391513108e-7018j)
**Application to infinite series**
Any infinite series where the summand is a rational function of
the index `k` can be evaluated in closed form in terms of polygamma
functions of the roots and poles of the summand::
>>> a = sqrt(2)
>>> b = sqrt(3)
>>> nsum(lambda k: 1/((k+a)**2*(k+b)), [0, inf])
0.4049668927517857061917531
>>> (psi(0,a)-psi(0,b)-a*psi(1,a)+b*psi(1,a))/(a-b)**2
0.4049668927517857061917531
This follows from the series representation (`m > 0`)
.. math ::
\psi^{(m)}(z) = (-1)^{m+1} m! \sum_{k=0}^{\infty}
\frac{1}{(z+k)^{m+1}}.
Since the roots of a polynomial may be complex, it is sometimes
necessary to use the complex polygamma function to evaluate
an entirely real-valued sum::
>>> nsum(lambda k: 1/(k**2-2*k+3), [0, inf])
1.694361433907061256154665
>>> nprint(polyroots([1,-2,3]))
[(1.0 - 1.41421j), (1.0 + 1.41421j)]
>>> r1 = 1-sqrt(2)*j
>>> r2 = r1.conjugate()
>>> (psi(0,-r2)-psi(0,-r1))/(r1-r2)
(1.694361433907061256154665 + 0.0j)
"""
digamma = r"""
Shortcut for ``psi(0,z)``.
"""
harmonic = r"""
If `n` is an integer, ``harmonic(n)`` gives a floating-point
approximation of the `n`-th harmonic number `H(n)`, defined as
.. math ::
H(n) = 1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}
The first few harmonic numbers are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(8):
... print("%s %s" % (n, harmonic(n)))
...
0 0.0
1 1.0
2 1.5
3 1.83333333333333
4 2.08333333333333
5 2.28333333333333
6 2.45
7 2.59285714285714
The infinite harmonic series `1 + 1/2 + 1/3 + \ldots` diverges::
>>> harmonic(inf)
+inf
:func:`~mpmath.harmonic` is evaluated using the digamma function rather
than by summing the harmonic series term by term. It can therefore
be computed quickly for arbitrarily large `n`, and even for
nonintegral arguments::
>>> harmonic(10**100)
230.835724964306
>>> harmonic(0.5)
0.613705638880109
>>> harmonic(3+4j)
(2.24757548223494 + 0.850502209186044j)
:func:`~mpmath.harmonic` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> harmonic(11)
3.0198773448773448773448773448773448773448773448773
>>> harmonic(pi)
1.8727388590273302654363491032336134987519132374152
The harmonic series diverges, but at a glacial pace. It is possible
to calculate the exact number of terms required before the sum
exceeds a given amount, say 100::
>>> mp.dps = 50
>>> v = 10**findroot(lambda x: harmonic(10**x) - 100, 10)
>>> v
15092688622113788323693563264538101449859496.864101
>>> v = int(ceil(v))
>>> print(v)
15092688622113788323693563264538101449859497
>>> harmonic(v-1)
99.999999999999999999999999999999999999999999942747
>>> harmonic(v)
100.000000000000000000000000000000000000000000009
"""
bernoulli = r"""
Computes the nth Bernoulli number, `B_n`, for any integer `n \ge 0`.
The Bernoulli numbers are rational numbers, but this function
returns a floating-point approximation. To obtain an exact
fraction, use :func:`~mpmath.bernfrac` instead.
**Examples**
Numerical values of the first few Bernoulli numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(15):
... print("%s %s" % (n, bernoulli(n)))
...
0 1.0
1 -0.5
2 0.166666666666667
3 0.0
4 -0.0333333333333333
5 0.0
6 0.0238095238095238
7 0.0
8 -0.0333333333333333
9 0.0
10 0.0757575757575758
11 0.0
12 -0.253113553113553
13 0.0
14 1.16666666666667
Bernoulli numbers can be approximated with arbitrary precision::
>>> mp.dps = 50
>>> bernoulli(100)
-2.8382249570693706959264156336481764738284680928013e+78
Arbitrarily large `n` are supported::
>>> mp.dps = 15
>>> bernoulli(10**20 + 2)
3.09136296657021e+1876752564973863312327
The Bernoulli numbers are related to the Riemann zeta function
at integer arguments::
>>> -bernoulli(8) * (2*pi)**8 / (2*fac(8))
1.00407735619794
>>> zeta(8)
1.00407735619794
**Algorithm**
For small `n` (`n < 3000`) :func:`~mpmath.bernoulli` uses a recurrence
formula due to Ramanujan. All results in this range are cached,
so sequential computation of small Bernoulli numbers is
guaranteed to be fast.
For larger `n`, `B_n` is evaluated in terms of the Riemann zeta
function.
"""
stieltjes = r"""
For a nonnegative integer `n`, ``stieltjes(n)`` computes the
`n`-th Stieltjes constant `\gamma_n`, defined as the
`n`-th coefficient in the Laurent series expansion of the
Riemann zeta function around the pole at `s = 1`. That is,
we have:
.. math ::
\zeta(s) = \frac{1}{s-1} \sum_{n=0}^{\infty}
\frac{(-1)^n}{n!} \gamma_n (s-1)^n
More generally, ``stieltjes(n, a)`` gives the corresponding
coefficient `\gamma_n(a)` for the Hurwitz zeta function
`\zeta(s,a)` (with `\gamma_n = \gamma_n(1)`).
**Examples**
The zeroth Stieltjes constant is just Euler's constant `\gamma`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> stieltjes(0)
0.577215664901533
Some more values are::
>>> stieltjes(1)
-0.0728158454836767
>>> stieltjes(10)
0.000205332814909065
>>> stieltjes(30)
0.00355772885557316
>>> stieltjes(1000)
-1.57095384420474e+486
>>> stieltjes(2000)
2.680424678918e+1109
>>> stieltjes(1, 2.5)
-0.23747539175716
An alternative way to compute `\gamma_1`::
>>> diff(extradps(15)(lambda x: 1/(x-1) - zeta(x)), 1)
-0.0728158454836767
:func:`~mpmath.stieltjes` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> stieltjes(2)
-0.0096903631928723184845303860352125293590658061013408
**Algorithm**
:func:`~mpmath.stieltjes` numerically evaluates the integral in
the following representation due to Ainsworth, Howell and
Coffey [1], [2]:
.. math ::
\gamma_n(a) = \frac{\log^n a}{2a} - \frac{\log^{n+1}(a)}{n+1} +
\frac{2}{a} \Re \int_0^{\infty}
\frac{(x/a-i)\log^n(a-ix)}{(1+x^2/a^2)(e^{2\pi x}-1)} dx.
For some reference values with `a = 1`, see e.g. [4].
**References**
1. O. R. Ainsworth & L. W. Howell, "An integral representation of
the generalized Euler-Mascheroni constants", NASA Technical
Paper 2456 (1985),
http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19850014994_1985014994.pdf
2. M. W. Coffey, "The Stieltjes constants, their relation to the
`\eta_j` coefficients, and representation of the Hurwitz
zeta function", arXiv:0706.0343v1 http://arxiv.org/abs/0706.0343
3. http://mathworld.wolfram.com/StieltjesConstants.html
4. http://pi.lacim.uqam.ca/piDATA/stieltjesgamma.txt
"""
gammaprod = r"""
Given iterables `a` and `b`, ``gammaprod(a, b)`` computes the
product / quotient of gamma functions:
.. math ::
\frac{\Gamma(a_0) \Gamma(a_1) \cdots \Gamma(a_p)}
{\Gamma(b_0) \Gamma(b_1) \cdots \Gamma(b_q)}
Unlike direct calls to :func:`~mpmath.gamma`, :func:`~mpmath.gammaprod` considers
the entire product as a limit and evaluates this limit properly if
any of the numerator or denominator arguments are nonpositive
integers such that poles of the gamma function are encountered.
That is, :func:`~mpmath.gammaprod` evaluates
.. math ::
\lim_{\epsilon \to 0}
\frac{\Gamma(a_0+\epsilon) \Gamma(a_1+\epsilon) \cdots
\Gamma(a_p+\epsilon)}
{\Gamma(b_0+\epsilon) \Gamma(b_1+\epsilon) \cdots
\Gamma(b_q+\epsilon)}
In particular:
* If there are equally many poles in the numerator and the
denominator, the limit is a rational number times the remaining,
regular part of the product.
* If there are more poles in the numerator, :func:`~mpmath.gammaprod`
returns ``+inf``.
* If there are more poles in the denominator, :func:`~mpmath.gammaprod`
returns 0.
**Examples**
The reciprocal gamma function `1/\Gamma(x)` evaluated at `x = 0`::
>>> from mpmath import *
>>> mp.dps = 15
>>> gammaprod([], [0])
0.0
A limit::
>>> gammaprod([-4], [-3])
-0.25
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=1)
-0.25
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=-1)
-0.25
"""
beta = r"""
Computes the beta function,
`B(x,y) = \Gamma(x) \Gamma(y) / \Gamma(x+y)`.
The beta function is also commonly defined by the integral
representation
.. math ::
B(x,y) = \int_0^1 t^{x-1} (1-t)^{y-1} \, dt
**Examples**
For integer and half-integer arguments where all three gamma
functions are finite, the beta function becomes either rational
number or a rational multiple of `\pi`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> beta(5, 2)
0.0333333333333333
>>> beta(1.5, 2)
0.266666666666667
>>> 16*beta(2.5, 1.5)
3.14159265358979
Where appropriate, :func:`~mpmath.beta` evaluates limits. A pole
of the beta function is taken to result in ``+inf``::
>>> beta(-0.5, 0.5)
0.0
>>> beta(-3, 3)
-0.333333333333333
>>> beta(-2, 3)
+inf
>>> beta(inf, 1)
0.0
>>> beta(inf, 0)
nan
:func:`~mpmath.beta` supports complex numbers and arbitrary precision
evaluation::
>>> beta(1, 2+j)
(0.4 - 0.2j)
>>> mp.dps = 25
>>> beta(j,0.5)
(1.079424249270925780135675 - 1.410032405664160838288752j)
>>> mp.dps = 50
>>> beta(pi, e)
0.037890298781212201348153837138927165984170287886464
Various integrals can be computed by means of the
beta function::
>>> mp.dps = 15
>>> quad(lambda t: t**2.5*(1-t)**2, [0, 1])
0.0230880230880231
>>> beta(3.5, 3)
0.0230880230880231
>>> quad(lambda t: sin(t)**4 * sqrt(cos(t)), [0, pi/2])
0.319504062596158
>>> beta(2.5, 0.75)/2
0.319504062596158
"""
betainc = r"""
``betainc(a, b, x1=0, x2=1, regularized=False)`` gives the generalized
incomplete beta function,
.. math ::
I_{x_1}^{x_2}(a,b) = \int_{x_1}^{x_2} t^{a-1} (1-t)^{b-1} dt.
When `x_1 = 0, x_2 = 1`, this reduces to the ordinary (complete)
beta function `B(a,b)`; see :func:`~mpmath.beta`.
With the keyword argument ``regularized=True``, :func:`~mpmath.betainc`
computes the regularized incomplete beta function
`I_{x_1}^{x_2}(a,b) / B(a,b)`. This is the cumulative distribution of the
beta distribution with parameters `a`, `b`.
.. note :
Implementations of the incomplete beta function in some other
software uses a different argument order. For example, Mathematica uses the
reversed argument order ``Beta[x1,x2,a,b]``. For the equivalent of SciPy's
three-argument incomplete beta integral (implicitly with `x1 = 0`), use
``betainc(a,b,0,x2,regularized=True)``.
**Examples**
Verifying that :func:`~mpmath.betainc` computes the integral in the
definition::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> x,y,a,b = 3, 4, 0, 6
>>> betainc(x, y, a, b)
-4010.4
>>> quad(lambda t: t**(x-1) * (1-t)**(y-1), [a, b])
-4010.4
The arguments may be arbitrary complex numbers::
>>> betainc(0.75, 1-4j, 0, 2+3j)
(0.2241657956955709603655887 + 0.3619619242700451992411724j)
With regularization::
>>> betainc(1, 2, 0, 0.25, regularized=True)
0.4375
>>> betainc(pi, e, 0, 1, regularized=True) # Complete
1.0
The beta integral satisfies some simple argument transformation
symmetries::
>>> mp.dps = 15
>>> betainc(2,3,4,5), -betainc(2,3,5,4), betainc(3,2,1-5,1-4)
(56.0833333333333, 56.0833333333333, 56.0833333333333)
The beta integral can often be evaluated analytically. For integer and
rational arguments, the incomplete beta function typically reduces to a
simple algebraic-logarithmic expression::
>>> mp.dps = 25
>>> identify(chop(betainc(0, 0, 3, 4)))
'-(log((9/8)))'
>>> identify(betainc(2, 3, 4, 5))
'(673/12)'
>>> identify(betainc(1.5, 1, 1, 2))
'((-12+sqrt(1152))/18)'
"""
binomial = r"""
Computes the binomial coefficient
.. math ::
{n \choose k} = \frac{n!}{k!(n-k)!}.
The binomial coefficient gives the number of ways that `k` items
can be chosen from a set of `n` items. More generally, the binomial
coefficient is a well-defined function of arbitrary real or
complex `n` and `k`, via the gamma function.
**Examples**
Generate Pascal's triangle::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint([binomial(n,k) for k in range(n+1)])
...
[1.0]
[1.0, 1.0]
[1.0, 2.0, 1.0]
[1.0, 3.0, 3.0, 1.0]
[1.0, 4.0, 6.0, 4.0, 1.0]
There is 1 way to select 0 items from the empty set, and 0 ways to
select 1 item from the empty set::
>>> binomial(0, 0)
1.0
>>> binomial(0, 1)
0.0
:func:`~mpmath.binomial` supports large arguments::
>>> binomial(10**20, 10**20-5)
8.33333333333333e+97
>>> binomial(10**20, 10**10)
2.60784095465201e+104342944813
Nonintegral binomial coefficients find use in series
expansions::
>>> nprint(taylor(lambda x: (1+x)**0.25, 0, 4))
[1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
>>> nprint([binomial(0.25, k) for k in range(5)])
[1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
An integral representation::
>>> n, k = 5, 3
>>> f = lambda t: exp(-j*k*t)*(1+exp(j*t))**n
>>> chop(quad(f, [-pi,pi])/(2*pi))
10.0
>>> binomial(n,k)
10.0
"""
rf = r"""
Computes the rising factorial or Pochhammer symbol,
.. math ::
x^{(n)} = x (x+1) \cdots (x+n-1) = \frac{\Gamma(x+n)}{\Gamma(x)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the rising factorial is a polynomial::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(taylor(lambda x: rf(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, 1.0, 1.0]
[0.0, 2.0, 3.0, 1.0]
[0.0, 6.0, 11.0, 6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> rf(2+3j, 5.5)
(-7202.03920483347 - 3777.58810701527j)
"""
ff = r"""
Computes the falling factorial,
.. math ::
(x)_n = x (x-1) \cdots (x-n+1) = \frac{\Gamma(x+1)}{\Gamma(x-n+1)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the falling factorial is a polynomial::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(taylor(lambda x: ff(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, -1.0, 1.0]
[0.0, 2.0, -3.0, 1.0]
[0.0, -6.0, 11.0, -6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> ff(2+3j, 5.5)
(-720.41085888203 + 316.101124983878j)
"""
fac2 = r"""
Computes the double factorial `x!!`, defined for integers
`x > 0` by
.. math ::
x!! = \begin{cases}
1 \cdot 3 \cdots (x-2) \cdot x & x \;\mathrm{odd} \\
2 \cdot 4 \cdots (x-2) \cdot x & x \;\mathrm{even}
\end{cases}
and more generally by [1]
.. math ::
x!! = 2^{x/2} \left(\frac{\pi}{2}\right)^{(\cos(\pi x)-1)/4}
\Gamma\left(\frac{x}{2}+1\right).
**Examples**
The integer sequence of double factorials begins::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint([fac2(n) for n in range(10)])
[1.0, 1.0, 2.0, 3.0, 8.0, 15.0, 48.0, 105.0, 384.0, 945.0]
For large `x`, double factorials follow a Stirling-like asymptotic
approximation::
>>> x = mpf(10000)
>>> fac2(x)
5.97272691416282e+17830
>>> sqrt(pi)*x**((x+1)/2)*exp(-x/2)
5.97262736954392e+17830
The recurrence formula `x!! = x (x-2)!!` can be reversed to
define the double factorial of negative odd integers (but
not negative even integers)::
>>> fac2(-1), fac2(-3), fac2(-5), fac2(-7)
(1.0, -1.0, 0.333333333333333, -0.0666666666666667)
>>> fac2(-2)
Traceback (most recent call last):
...
ValueError: gamma function pole
With the exception of the poles at negative even integers,
:func:`~mpmath.fac2` supports evaluation for arbitrary complex arguments.
The recurrence formula is valid generally::
>>> fac2(pi+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
>>> (pi+2j)*fac2(pi-2+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
Double factorials should not be confused with nested factorials,
which are immensely larger::
>>> fac(fac(20))
5.13805976125208e+43675043585825292774
>>> fac2(20)
3715891200.0
Double factorials appear, among other things, in series expansions
of Gaussian functions and the error function. Infinite series
include::
>>> nsum(lambda k: 1/fac2(k), [0, inf])
3.05940740534258
>>> sqrt(e)*(1+sqrt(pi/2)*erf(sqrt(2)/2))
3.05940740534258
>>> nsum(lambda k: 2**k/fac2(2*k-1), [1, inf])
4.06015693855741
>>> e * erf(1) * sqrt(pi)
4.06015693855741
A beautiful Ramanujan sum::
>>> nsum(lambda k: (-1)**k*(fac2(2*k-1)/fac2(2*k))**3, [0,inf])
0.90917279454693
>>> (gamma('9/8')/gamma('5/4')/gamma('7/8'))**2
0.90917279454693
**References**
1. http://functions.wolfram.com/GammaBetaErf/Factorial2/27/01/0002/
2. http://mathworld.wolfram.com/DoubleFactorial.html
"""
hyper = r"""
Evaluates the generalized hypergeometric function
.. math ::
\,_pF_q(a_1,\ldots,a_p; b_1,\ldots,b_q; z) =
\sum_{n=0}^\infty \frac{(a_1)_n (a_2)_n \ldots (a_p)_n}
{(b_1)_n(b_2)_n\ldots(b_q)_n} \frac{z^n}{n!}
where `(x)_n` denotes the rising factorial (see :func:`~mpmath.rf`).
The parameters lists ``a_s`` and ``b_s`` may contain integers,
real numbers, complex numbers, as well as exact fractions given in
the form of tuples `(p, q)`. :func:`~mpmath.hyper` is optimized to handle
integers and fractions more efficiently than arbitrary
floating-point parameters (since rational parameters are by
far the most common).
**Examples**
Verifying that :func:`~mpmath.hyper` gives the sum in the definition, by
comparison with :func:`~mpmath.nsum`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a,b,c,d = 2,3,4,5
>>> x = 0.25
>>> hyper([a,b],[c,d],x)
1.078903941164934876086237
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)*x**n/fac(n)
>>> nsum(fn, [0, inf])
1.078903941164934876086237
The parameters can be any combination of integers, fractions,
floats and complex numbers::
>>> a, b, c, d, e = 1, (-1,2), pi, 3+4j, (2,3)
>>> x = 0.2j
>>> hyper([a,b],[c,d,e],x)
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
>>> b, e = -0.5, mpf(2)/3
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)/rf(e,n)*x**n/fac(n)
>>> nsum(fn, [0, inf])
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
The `\,_0F_0` and `\,_1F_0` series are just elementary functions::
>>> a, z = sqrt(2), +pi
>>> hyper([],[],z)
23.14069263277926900572909
>>> exp(z)
23.14069263277926900572909
>>> hyper([a],[],z)
(-0.09069132879922920160334114 + 0.3283224323946162083579656j)
>>> (1-z)**(-a)
(-0.09069132879922920160334114 + 0.3283224323946162083579656j)
If any `a_k` coefficient is a nonpositive integer, the series terminates
into a finite polynomial::
>>> hyper([1,1,1,-3],[2,5],1)
0.7904761904761904761904762
>>> identify(_)
'(83/105)'
If any `b_k` is a nonpositive integer, the function is undefined (unless the
series terminates before the division by zero occurs)::
>>> hyper([1,1,1,-3],[-2,5],1)
Traceback (most recent call last):
...
ZeroDivisionError: pole in hypergeometric series
>>> hyper([1,1,1,-1],[-2,5],1)
1.1
Except for polynomial cases, the radius of convergence `R` of the hypergeometric
series is either `R = \infty` (if `p \le q`), `R = 1` (if `p = q+1`), or
`R = 0` (if `p > q+1`).
The analytic continuations of the functions with `p = q+1`, i.e. `\,_2F_1`,
`\,_3F_2`, `\,_4F_3`, etc, are all implemented and therefore these functions
can be evaluated for `|z| \ge 1`. The shortcuts :func:`~mpmath.hyp2f1`, :func:`~mpmath.hyp3f2`
are available to handle the most common cases (see their documentation),
but functions of higher degree are also supported via :func:`~mpmath.hyper`::
>>> hyper([1,2,3,4], [5,6,7], 1) # 4F3 at finite-valued branch point
1.141783505526870731311423
>>> hyper([4,5,6,7], [1,2,3], 1) # 4F3 at pole
+inf
>>> hyper([1,2,3,4,5], [6,7,8,9], 10) # 5F4
(1.543998916527972259717257 - 0.5876309929580408028816365j)
>>> hyper([1,2,3,4,5,6], [7,8,9,10,11], 1j) # 6F5
(0.9996565821853579063502466 + 0.0129721075905630604445669j)
Near `z = 1` with noninteger parameters::
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','41/8'], 1)
2.219433352235586121250027
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], 1)
+inf
>>> eps1 = extradps(6)(lambda: 1 - mpf('1e-6'))()
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], eps1)
2923978034.412973409330956
Please note that, as currently implemented, evaluation of `\,_pF_{p-1}`
with `p \ge 3` may be slow or inaccurate when `|z-1|` is small,
for some parameter values.
When `p > q+1`, ``hyper`` computes the (iterated) Borel sum of the divergent
series. For `\,_2F_0` the Borel sum has an analytic solution and can be
computed efficiently (see :func:`~mpmath.hyp2f0`). For higher degrees, the functions
is evaluated first by attempting to sum it directly as an asymptotic
series (this only works for tiny `|z|`), and then by evaluating the Borel
regularized sum using numerical integration. Except for
special parameter combinations, this can be extremely slow.
>>> hyper([1,1], [], 0.5) # regularization of 2F0
(1.340965419580146562086448 + 0.8503366631752726568782447j)
>>> hyper([1,1,1,1], [1], 0.5) # regularization of 4F1
(1.108287213689475145830699 + 0.5327107430640678181200491j)
With the following magnitude of argument, the asymptotic series for `\,_3F_1`
gives only a few digits. Using Borel summation, ``hyper`` can produce
a value with full accuracy::
>>> mp.dps = 15
>>> hyper([2,0.5,4], [5.25], '0.08', force_series=True)
Traceback (most recent call last):
...
NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
>>> hyper([2,0.5,4], [5.25], '0.08', asymp_tol=1e-4)
1.0725535790737
>>> hyper([2,0.5,4], [5.25], '0.08')
(1.07269542893559 + 5.54668863216891e-5j)
>>> hyper([2,0.5,4], [5.25], '-0.08', asymp_tol=1e-4)
0.946344925484879
>>> hyper([2,0.5,4], [5.25], '-0.08')
0.946312503737771
>>> mp.dps = 25
>>> hyper([2,0.5,4], [5.25], '-0.08')
0.9463125037377662296700858
Note that with the positive `z` value, there is a complex part in the
correct result, which falls below the tolerance of the asymptotic series.
"""
hypercomb = r"""
Computes a weighted combination of hypergeometric functions
.. math ::
\sum_{r=1}^N \left[ \prod_{k=1}^{l_r} {w_{r,k}}^{c_{r,k}}
\frac{\prod_{k=1}^{m_r} \Gamma(\alpha_{r,k})}{\prod_{k=1}^{n_r}
\Gamma(\beta_{r,k})}
\,_{p_r}F_{q_r}(a_{r,1},\ldots,a_{r,p}; b_{r,1},
\ldots, b_{r,q}; z_r)\right].
Typically the parameters are linear combinations of a small set of base
parameters; :func:`~mpmath.hypercomb` permits computing a correct value in
the case that some of the `\alpha`, `\beta`, `b` turn out to be
nonpositive integers, or if division by zero occurs for some `w^c`,
assuming that there are opposing singularities that cancel out.
The limit is computed by evaluating the function with the base
parameters perturbed, at a higher working precision.
The first argument should be a function that takes the perturbable
base parameters ``params`` as input and returns `N` tuples
``(w, c, alpha, beta, a, b, z)``, where the coefficients ``w``, ``c``,
gamma factors ``alpha``, ``beta``, and hypergeometric coefficients
``a``, ``b`` each should be lists of numbers, and ``z`` should be a single
number.
**Examples**
The following evaluates
.. math ::
(a-1) \frac{\Gamma(a-3)}{\Gamma(a-4)} \,_1F_1(a,a-1,z) = e^z(a-4)(a+z-1)
with `a=1, z=3`. There is a zero factor, two gamma function poles, and
the 1F1 function is singular; all singularities cancel out to give a finite
value::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> hypercomb(lambda a: [([a-1],[1],[a-3],[a-4],[a],[a-1],3)], [1])
-180.769832308689
>>> -9*exp(3)
-180.769832308689
"""
hyp0f1 = r"""
Gives the hypergeometric function `\,_0F_1`, sometimes known as the
confluent limit function, defined as
.. math ::
\,_0F_1(a,z) = \sum_{k=0}^{\infty} \frac{1}{(a)_k} \frac{z^k}{k!}.
This function satisfies the differential equation `z f''(z) + a f'(z) = f(z)`,
and is related to the Bessel function of the first kind (see :func:`~mpmath.besselj`).
``hyp0f1(a,z)`` is equivalent to ``hyper([],[a],z)``; see documentation for
:func:`~mpmath.hyper` for more information.
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp0f1(2, 0.25)
1.130318207984970054415392
>>> hyp0f1((1,2), 1234567)
6.27287187546220705604627e+964
>>> hyp0f1(3+4j, 1000000j)
(3.905169561300910030267132e+606 + 3.807708544441684513934213e+606j)
Evaluation is supported for arbitrarily large values of `z`,
using asymptotic expansions::
>>> hyp0f1(1, 10**50)
2.131705322874965310390701e+8685889638065036553022565
>>> hyp0f1(1, -10**50)
1.115945364792025420300208e-13
Verifying the differential equation::
>>> a = 2.5
>>> f = lambda z: hyp0f1(a,z)
>>> for z in [0, 10, 3+4j]:
... chop(z*diff(f,z,2) + a*diff(f,z) - f(z))
...
0.0
0.0
0.0
"""
hyp1f1 = r"""
Gives the confluent hypergeometric function of the first kind,
.. math ::
\,_1F_1(a,b,z) = \sum_{k=0}^{\infty} \frac{(a)_k}{(b)_k} \frac{z^k}{k!},
also known as Kummer's function and sometimes denoted by `M(a,b,z)`. This
function gives one solution to the confluent (Kummer's) differential equation
.. math ::
z f''(z) + (b-z) f'(z) - af(z) = 0.
A second solution is given by the `U` function; see :func:`~mpmath.hyperu`.
Solutions are also given in an alternate form by the Whittaker
functions (:func:`~mpmath.whitm`, :func:`~mpmath.whitw`).
``hyp1f1(a,b,z)`` is equivalent
to ``hyper([a],[b],z)``; see documentation for :func:`~mpmath.hyper` for more
information.
**Examples**
Evaluation for real and complex values of the argument `z`, with
fixed parameters `a = 2, b = -1/3`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp1f1(2, (-1,3), 3.25)
-2815.956856924817275640248
>>> hyp1f1(2, (-1,3), -3.25)
-1.145036502407444445553107
>>> hyp1f1(2, (-1,3), 1000)
-8.021799872770764149793693e+441
>>> hyp1f1(2, (-1,3), -1000)
0.000003131987633006813594535331
>>> hyp1f1(2, (-1,3), 100+100j)
(-3.189190365227034385898282e+48 - 1.106169926814270418999315e+49j)
Parameters may be complex::
>>> hyp1f1(2+3j, -1+j, 10j)
(261.8977905181045142673351 + 160.8930312845682213562172j)
Arbitrarily large values of `z` are supported::
>>> hyp1f1(3, 4, 10**20)
3.890569218254486878220752e+43429448190325182745
>>> hyp1f1(3, 4, -10**20)
6.0e-60
>>> hyp1f1(3, 4, 10**20*j)
(-1.935753855797342532571597e-20 - 2.291911213325184901239155e-20j)
Verifying the differential equation::
>>> a, b = 1.5, 2
>>> f = lambda z: hyp1f1(a,b,z)
>>> for z in [0, -10, 3, 3+4j]:
... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
...
0.0
0.0
0.0
0.0
An integral representation::
>>> a, b = 1.5, 3
>>> z = 1.5
>>> hyp1f1(a,b,z)
2.269381460919952778587441
>>> g = lambda t: exp(z*t)*t**(a-1)*(1-t)**(b-a-1)
>>> gammaprod([b],[a,b-a])*quad(g, [0,1])
2.269381460919952778587441
"""
hyp1f2 = r"""
Gives the hypergeometric function `\,_1F_2(a_1,a_2;b_1,b_2; z)`.
The call ``hyp1f2(a1,b1,b2,z)`` is equivalent to
``hyper([a1],[b1,b2],z)``.
Evaluation works for complex and arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, b, c = 1.5, (-1,3), 2.25
>>> hyp1f2(a, b, c, 10**20)
-1.159388148811981535941434e+8685889639
>>> hyp1f2(a, b, c, -10**20)
-12.60262607892655945795907
>>> hyp1f2(a, b, c, 10**20*j)
(4.237220401382240876065501e+6141851464 - 2.950930337531768015892987e+6141851464j)
>>> hyp1f2(2+3j, -2j, 0.5j, 10-20j)
(135881.9905586966432662004 - 86681.95885418079535738828j)
"""
hyp2f2 = r"""
Gives the hypergeometric function `\,_2F_2(a_1,a_2;b_1,b_2; z)`.
The call ``hyp2f2(a1,a2,b1,b2,z)`` is equivalent to
``hyper([a1,a2],[b1,b2],z)``.
Evaluation works for complex and arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, b, c, d = 1.5, (-1,3), 2.25, 4
>>> hyp2f2(a, b, c, d, 10**20)
-5.275758229007902299823821e+43429448190325182663
>>> hyp2f2(a, b, c, d, -10**20)
2561445.079983207701073448
>>> hyp2f2(a, b, c, d, 10**20*j)
(2218276.509664121194836667 - 1280722.539991603850462856j)
>>> hyp2f2(2+3j, -2j, 0.5j, 4j, 10-20j)
(80500.68321405666957342788 - 20346.82752982813540993502j)
"""
hyp2f3 = r"""
Gives the hypergeometric function `\,_2F_3(a_1,a_2;b_1,b_2,b_3; z)`.
The call ``hyp2f3(a1,a2,b1,b2,b3,z)`` is equivalent to
``hyper([a1,a2],[b1,b2,b3],z)``.
Evaluation works for arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a1,a2,b1,b2,b3 = 1.5, (-1,3), 2.25, 4, (1,5)
>>> hyp2f3(a1,a2,b1,b2,b3,10**20)
-4.169178177065714963568963e+8685889590
>>> hyp2f3(a1,a2,b1,b2,b3,-10**20)
7064472.587757755088178629
>>> hyp2f3(a1,a2,b1,b2,b3,10**20*j)
(-5.163368465314934589818543e+6141851415 + 1.783578125755972803440364e+6141851416j)
>>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10-20j)
(-2280.938956687033150740228 + 13620.97336609573659199632j)
>>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10000000-20000000j)
(4.849835186175096516193e+3504 - 3.365981529122220091353633e+3504j)
"""
hyp2f1 = r"""
Gives the Gauss hypergeometric function `\,_2F_1` (often simply referred to as
*the* hypergeometric function), defined for `|z| < 1` as
.. math ::
\,_2F_1(a,b,c,z) = \sum_{k=0}^{\infty}
\frac{(a)_k (b)_k}{(c)_k} \frac{z^k}{k!}.
and for `|z| \ge 1` by analytic continuation, with a branch cut on `(1, \infty)`
when necessary.
Special cases of this function include many of the orthogonal polynomials as
well as the incomplete beta function and other functions. Properties of the
Gauss hypergeometric function are documented comprehensively in many references,
for example Abramowitz & Stegun, section 15.
The implementation supports the analytic continuation as well as evaluation
close to the unit circle where `|z| \approx 1`. The syntax ``hyp2f1(a,b,c,z)``
is equivalent to ``hyper([a,b],[c],z)``.
**Examples**
Evaluation with `z` inside, outside and on the unit circle, for
fixed parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp2f1(2, (1,2), 4, 0.75)
1.303703703703703703703704
>>> hyp2f1(2, (1,2), 4, -1.75)
0.7431290566046919177853916
>>> hyp2f1(2, (1,2), 4, 1.75)
(1.418075801749271137026239 - 1.114976146679907015775102j)
>>> hyp2f1(2, (1,2), 4, 1)
1.6
>>> hyp2f1(2, (1,2), 4, -1)
0.8235498012182875315037882
>>> hyp2f1(2, (1,2), 4, j)
(0.9144026291433065674259078 + 0.2050415770437884900574923j)
>>> hyp2f1(2, (1,2), 4, 2+j)
(0.9274013540258103029011549 + 0.7455257875808100868984496j)
>>> hyp2f1(2, (1,2), 4, 0.25j)
(0.9931169055799728251931672 + 0.06154836525312066938147793j)
Evaluation with complex parameter values::
>>> hyp2f1(1+j, 0.75, 10j, 1+5j)
(0.8834833319713479923389638 + 0.7053886880648105068343509j)
Evaluation with `z = 1`::
>>> hyp2f1(-2.5, 3.5, 1.5, 1)
0.0
>>> hyp2f1(-2.5, 3, 4, 1)
0.06926406926406926406926407
>>> hyp2f1(2, 3, 4, 1)
+inf
Evaluation for huge arguments::
>>> hyp2f1((-1,3), 1.75, 4, '1e100')
(7.883714220959876246415651e+32 + 1.365499358305579597618785e+33j)
>>> hyp2f1((-1,3), 1.75, 4, '1e1000000')
(7.883714220959876246415651e+333332 + 1.365499358305579597618785e+333333j)
>>> hyp2f1((-1,3), 1.75, 4, '1e1000000j')
(1.365499358305579597618785e+333333 - 7.883714220959876246415651e+333332j)
An integral representation::
>>> a,b,c,z = -0.5, 1, 2.5, 0.25
>>> g = lambda t: t**(b-1) * (1-t)**(c-b-1) * (1-t*z)**(-a)
>>> gammaprod([c],[b,c-b]) * quad(g, [0,1])
0.9480458814362824478852618
>>> hyp2f1(a,b,c,z)
0.9480458814362824478852618
Verifying the hypergeometric differential equation::
>>> f = lambda z: hyp2f1(a,b,c,z)
>>> chop(z*(1-z)*diff(f,z,2) + (c-(a+b+1)*z)*diff(f,z) - a*b*f(z))
0.0
"""
hyp3f2 = r"""
Gives the generalized hypergeometric function `\,_3F_2`, defined for `|z| < 1`
as
.. math ::
\,_3F_2(a_1,a_2,a_3,b_1,b_2,z) = \sum_{k=0}^{\infty}
\frac{(a_1)_k (a_2)_k (a_3)_k}{(b_1)_k (b_2)_k} \frac{z^k}{k!}.
and for `|z| \ge 1` by analytic continuation. The analytic structure of this
function is similar to that of `\,_2F_1`, generally with a singularity at
`z = 1` and a branch cut on `(1, \infty)`.
Evaluation is supported inside, on, and outside
the circle of convergence `|z| = 1`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp3f2(1,2,3,4,5,0.25)
1.083533123380934241548707
>>> hyp3f2(1,2+2j,3,4,5,-10+10j)
(0.1574651066006004632914361 - 0.03194209021885226400892963j)
>>> hyp3f2(1,2,3,4,5,-10)
0.3071141169208772603266489
>>> hyp3f2(1,2,3,4,5,10)
(-0.4857045320523947050581423 - 0.5988311440454888436888028j)
>>> hyp3f2(0.25,1,1,2,1.5,1)
1.157370995096772047567631
>>> (8-pi-2*ln2)/3
1.157370995096772047567631
>>> hyp3f2(1+j,0.5j,2,1,-2j,-1)
(1.74518490615029486475959 + 0.1454701525056682297614029j)
>>> hyp3f2(1+j,0.5j,2,1,-2j,sqrt(j))
(0.9829816481834277511138055 - 0.4059040020276937085081127j)
>>> hyp3f2(-3,2,1,-5,4,1)
1.41
>>> hyp3f2(-3,2,1,-5,4,2)
2.12
Evaluation very close to the unit circle::
>>> hyp3f2(1,2,3,4,5,'1.0001')
(1.564877796743282766872279 - 3.76821518787438186031973e-11j)
>>> hyp3f2(1,2,3,4,5,'1+0.0001j')
(1.564747153061671573212831 + 0.0001305757570366084557648482j)
>>> hyp3f2(1,2,3,4,5,'0.9999')
1.564616644881686134983664
>>> hyp3f2(1,2,3,4,5,'-0.9999')
0.7823896253461678060196207
.. note ::
Evaluation for `|z-1|` small can currently be inaccurate or slow
for some parameter combinations.
For various parameter combinations, `\,_3F_2` admits representation in terms
of hypergeometric functions of lower degree, or in terms of
simpler functions::
>>> for a, b, z in [(1,2,-1), (2,0.5,1)]:
... hyp2f1(a,b,a+b+0.5,z)**2
... hyp3f2(2*a,a+b,2*b,a+b+0.5,2*a+2*b,z)
...
0.4246104461966439006086308
0.4246104461966439006086308
7.111111111111111111111111
7.111111111111111111111111
>>> z = 2+3j
>>> hyp3f2(0.5,1,1.5,2,2,z)
(0.7621440939243342419729144 + 0.4249117735058037649915723j)
>>> 4*(pi-2*ellipe(z))/(pi*z)
(0.7621440939243342419729144 + 0.4249117735058037649915723j)
"""
hyperu = r"""
Gives the Tricomi confluent hypergeometric function `U`, also known as
the Kummer or confluent hypergeometric function of the second kind. This
function gives a second linearly independent solution to the confluent
hypergeometric differential equation (the first is provided by `\,_1F_1` --
see :func:`~mpmath.hyp1f1`).
**Examples**
Evaluation for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyperu(2,3,4)
0.0625
>>> hyperu(0.25, 5, 1000)
0.1779949416140579573763523
>>> hyperu(0.25, 5, -1000)
(0.1256256609322773150118907 - 0.1256256609322773150118907j)
The `U` function may be singular at `z = 0`::
>>> hyperu(1.5, 2, 0)
+inf
>>> hyperu(1.5, -2, 0)
0.1719434921288400112603671
Verifying the differential equation::
>>> a, b = 1.5, 2
>>> f = lambda z: hyperu(a,b,z)
>>> for z in [-10, 3, 3+4j]:
... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
...
0.0
0.0
0.0
An integral representation::
>>> a,b,z = 2, 3.5, 4.25
>>> hyperu(a,b,z)
0.06674960718150520648014567
>>> quad(lambda t: exp(-z*t)*t**(a-1)*(1+t)**(b-a-1),[0,inf]) / gamma(a)
0.06674960718150520648014567
[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm
"""
hyp2f0 = r"""
Gives the hypergeometric function `\,_2F_0`, defined formally by the
series
.. math ::
\,_2F_0(a,b;;z) = \sum_{n=0}^{\infty} (a)_n (b)_n \frac{z^n}{n!}.
This series usually does not converge. For small enough `z`, it can be viewed
as an asymptotic series that may be summed directly with an appropriate
truncation. When this is not the case, :func:`~mpmath.hyp2f0` gives a regularized sum,
or equivalently, it uses a representation in terms of the
hypergeometric U function [1]. The series also converges when either `a` or `b`
is a nonpositive integer, as it then terminates into a polynomial
after `-a` or `-b` terms.
**Examples**
Evaluation is supported for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp2f0((2,3), 1.25, -100)
0.07095851870980052763312791
>>> hyp2f0((2,3), 1.25, 100)
(-0.03254379032170590665041131 + 0.07269254613282301012735797j)
>>> hyp2f0(-0.75, 1-j, 4j)
(-0.3579987031082732264862155 - 3.052951783922142735255881j)
Even with real arguments, the regularized value of 2F0 is often complex-valued,
but the imaginary part decreases exponentially as `z \to 0`. In the following
example, the first call uses complex evaluation while the second has a small
enough `z` to evaluate using the direct series and thus the returned value
is strictly real (this should be taken to indicate that the imaginary
part is less than ``eps``)::
>>> mp.dps = 15
>>> hyp2f0(1.5, 0.5, 0.05)
(1.04166637647907 + 8.34584913683906e-8j)
>>> hyp2f0(1.5, 0.5, 0.0005)
1.00037535207621
The imaginary part can be retrieved by increasing the working precision::
>>> mp.dps = 80
>>> nprint(hyp2f0(1.5, 0.5, 0.009).imag)
1.23828e-46
In the polynomial case (the series terminating), 2F0 can evaluate exactly::
>>> mp.dps = 15
>>> hyp2f0(-6,-6,2)
291793.0
>>> identify(hyp2f0(-2,1,0.25))
'(5/8)'
The coefficients of the polynomials can be recovered using Taylor expansion::
>>> nprint(taylor(lambda x: hyp2f0(-3,0.5,x), 0, 10))
[1.0, -1.5, 2.25, -1.875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint(taylor(lambda x: hyp2f0(-4,0.5,x), 0, 10))
[1.0, -2.0, 4.5, -7.5, 6.5625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm
"""
gammainc = r"""
``gammainc(z, a=0, b=inf)`` computes the (generalized) incomplete
gamma function with integration limits `[a, b]`:
.. math ::
\Gamma(z,a,b) = \int_a^b t^{z-1} e^{-t} \, dt
The generalized incomplete gamma function reduces to the
following special cases when one or both endpoints are fixed:
* `\Gamma(z,0,\infty)` is the standard ("complete")
gamma function, `\Gamma(z)` (available directly
as the mpmath function :func:`~mpmath.gamma`)
* `\Gamma(z,a,\infty)` is the "upper" incomplete gamma
function, `\Gamma(z,a)`
* `\Gamma(z,0,b)` is the "lower" incomplete gamma
function, `\gamma(z,b)`.
Of course, we have
`\Gamma(z,0,x) + \Gamma(z,x,\infty) = \Gamma(z)`
for all `z` and `x`.
Note however that some authors reverse the order of the
arguments when defining the lower and upper incomplete
gamma function, so one should be careful to get the correct
definition.
If also given the keyword argument ``regularized=True``,
:func:`~mpmath.gammainc` computes the "regularized" incomplete gamma
function
.. math ::
P(z,a,b) = \frac{\Gamma(z,a,b)}{\Gamma(z)}.
**Examples**
We can compare with numerical quadrature to verify that
:func:`~mpmath.gammainc` computes the integral in the definition::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> gammainc(2+3j, 4, 10)
(0.00977212668627705160602312 - 0.0770637306312989892451977j)
>>> quad(lambda t: t**(2+3j-1) * exp(-t), [4, 10])
(0.00977212668627705160602312 - 0.0770637306312989892451977j)
Argument symmetries follow directly from the integral definition::
>>> gammainc(3, 4, 5) + gammainc(3, 5, 4)
0.0
>>> gammainc(3,0,2) + gammainc(3,2,4); gammainc(3,0,4)
1.523793388892911312363331
1.523793388892911312363331
>>> findroot(lambda z: gammainc(2,z,3), 1)
3.0
Evaluation for arbitrarily large arguments::
>>> gammainc(10, 100)
4.083660630910611272288592e-26
>>> gammainc(10, 10000000000000000)
5.290402449901174752972486e-4342944819032375
>>> gammainc(3+4j, 1000000+1000000j)
(-1.257913707524362408877881e-434284 + 2.556691003883483531962095e-434284j)
Evaluation of a generalized incomplete gamma function automatically chooses
the representation that gives a more accurate result, depending on which
parameter is larger::
>>> gammainc(10000000, 3) - gammainc(10000000, 2) # Bad
0.0
>>> gammainc(10000000, 2, 3) # Good
1.755146243738946045873491e+4771204
>>> gammainc(2, 0, 100000001) - gammainc(2, 0, 100000000) # Bad
0.0
>>> gammainc(2, 100000000, 100000001) # Good
4.078258353474186729184421e-43429441
The incomplete gamma functions satisfy simple recurrence
relations::
>>> mp.dps = 25
>>> z, a = mpf(3.5), mpf(2)
>>> gammainc(z+1, a); z*gammainc(z,a) + a**z*exp(-a)
10.60130296933533459267329
10.60130296933533459267329
>>> gammainc(z+1,0,a); z*gammainc(z,0,a) - a**z*exp(-a)
1.030425427232114336470932
1.030425427232114336470932
Evaluation at integers and poles::
>>> gammainc(-3, -4, -5)
(-0.2214577048967798566234192 + 0.0j)
>>> gammainc(-3, 0, 5)
+inf
If `z` is an integer, the recurrence reduces the incomplete gamma
function to `P(a) \exp(-a) + Q(b) \exp(-b)` where `P` and
`Q` are polynomials::
>>> gammainc(1, 2); exp(-2)
0.1353352832366126918939995
0.1353352832366126918939995
>>> mp.dps = 50
>>> identify(gammainc(6, 1, 2), ['exp(-1)', 'exp(-2)'])
'(326*exp(-1) + (-872)*exp(-2))'
The incomplete gamma functions reduce to functions such as
the exponential integral Ei and the error function for special
arguments::
>>> mp.dps = 25
>>> gammainc(0, 4); -ei(-4)
0.00377935240984890647887486
0.00377935240984890647887486
>>> gammainc(0.5, 0, 2); sqrt(pi)*erf(sqrt(2))
1.691806732945198336509541
1.691806732945198336509541
"""
erf = r"""
Computes the error function, `\mathrm{erf}(x)`. The error
function is the normalized antiderivative of the Gaussian function
`\exp(-t^2)`. More precisely,
.. math::
\mathrm{erf}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(-t^2) \,dt
**Basic examples**
Simple values and limits include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erf(0)
0.0
>>> erf(1)
0.842700792949715
>>> erf(-1)
-0.842700792949715
>>> erf(inf)
1.0
>>> erf(-inf)
-1.0
For large real `x`, `\mathrm{erf}(x)` approaches 1 very
rapidly::
>>> erf(3)
0.999977909503001
>>> erf(5)
0.999999999998463
The error function is an odd function::
>>> nprint(chop(taylor(erf, 0, 5)))
[0.0, 1.12838, 0.0, -0.376126, 0.0, 0.112838]
:func:`~mpmath.erf` implements arbitrary-precision evaluation and
supports complex numbers::
>>> mp.dps = 50
>>> erf(0.5)
0.52049987781304653768274665389196452873645157575796
>>> mp.dps = 25
>>> erf(1+j)
(1.316151281697947644880271 + 0.1904534692378346862841089j)
Evaluation is supported for large arguments::
>>> mp.dps = 25
>>> erf('1e1000')
1.0
>>> erf('-1e1000')
-1.0
>>> erf('1e-1000')
1.128379167095512573896159e-1000
>>> erf('1e7j')
(0.0 + 8.593897639029319267398803e+43429448190317j)
>>> erf('1e7+1e7j')
(0.9999999858172446172631323 + 3.728805278735270407053139e-8j)
**Related functions**
See also :func:`~mpmath.erfc`, which is more accurate for large `x`,
and :func:`~mpmath.erfi` which gives the antiderivative of
`\exp(t^2)`.
The Fresnel integrals :func:`~mpmath.fresnels` and :func:`~mpmath.fresnelc`
are also related to the error function.
"""
erfc = r"""
Computes the complementary error function,
`\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`.
This function avoids cancellation that occurs when naively
computing the complementary error function as ``1-erf(x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> 1 - erf(10)
0.0
>>> erfc(10)
2.08848758376254e-45
:func:`~mpmath.erfc` works accurately even for ludicrously large
arguments::
>>> erfc(10**10)
4.3504398860243e-43429448190325182776
Complex arguments are supported::
>>> erfc(500+50j)
(1.19739830969552e-107492 + 1.46072418957528e-107491j)
"""
erfi = r"""
Computes the imaginary error function, `\mathrm{erfi}(x)`.
The imaginary error function is defined in analogy with the
error function, but with a positive sign in the integrand:
.. math ::
\mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt
Whereas the error function rapidly converges to 1 as `x` grows,
the imaginary error function rapidly diverges to infinity.
The functions are related as
`\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex
numbers `x`.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfi(0)
0.0
>>> erfi(1)
1.65042575879754
>>> erfi(-1)
-1.65042575879754
>>> erfi(inf)
+inf
>>> erfi(-inf)
-inf
Note the symmetry between erf and erfi::
>>> erfi(3j)
(0.0 + 0.999977909503001j)
>>> erf(3)
0.999977909503001
>>> erf(1+2j)
(-0.536643565778565 - 5.04914370344703j)
>>> erfi(2+1j)
(-5.04914370344703 - 0.536643565778565j)
Large arguments are supported::
>>> erfi(1000)
1.71130938718796e+434291
>>> erfi(10**10)
7.3167287567024e+43429448190325182754
>>> erfi(-10**10)
-7.3167287567024e+43429448190325182754
>>> erfi(1000-500j)
(2.49895233563961e+325717 + 2.6846779342253e+325717j)
>>> erfi(100000j)
(0.0 + 1.0j)
>>> erfi(-100000j)
(0.0 - 1.0j)
"""
erfinv = r"""
Computes the inverse error function, satisfying
.. math ::
\mathrm{erf}(\mathrm{erfinv}(x)) =
\mathrm{erfinv}(\mathrm{erf}(x)) = x.
This function is defined only for `-1 \le x \le 1`.
**Examples**
Special values include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfinv(0)
0.0
>>> erfinv(1)
+inf
>>> erfinv(-1)
-inf
The domain is limited to the standard interval::
>>> erfinv(2)
Traceback (most recent call last):
...
ValueError: erfinv(x) is defined only for -1 <= x <= 1
It is simple to check that :func:`~mpmath.erfinv` computes inverse values of
:func:`~mpmath.erf` as promised::
>>> erf(erfinv(0.75))
0.75
>>> erf(erfinv(-0.995))
-0.995
:func:`~mpmath.erfinv` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> x = erf(2)
>>> x
0.99532226501895273416206925636725292861089179704006
>>> erfinv(x)
2.0
A definite integral involving the inverse error function::
>>> mp.dps = 15
>>> quad(erfinv, [0, 1])
0.564189583547756
>>> 1/sqrt(pi)
0.564189583547756
The inverse error function can be used to generate random numbers
with a Gaussian distribution (although this is a relatively
inefficient algorithm)::
>>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP
[-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012]
"""
npdf = r"""
``npdf(x, mu=0, sigma=1)`` evaluates the probability density
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
Elementary properties of the probability distribution can
be verified using numerical integration::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(npdf, [-inf, inf])
1.0
>>> quad(lambda x: npdf(x, 3), [3, inf])
0.5
>>> quad(lambda x: npdf(x, 3, 2), [3, inf])
0.5
See also :func:`~mpmath.ncdf`, which gives the cumulative
distribution.
"""
ncdf = r"""
``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
See also :func:`~mpmath.npdf`, which gives the probability density.
Elementary properties include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ncdf(pi, mu=pi)
0.5
>>> ncdf(-inf)
0.0
>>> ncdf(+inf)
1.0
The cumulative distribution is the integral of the density
function having identical mu and sigma::
>>> mp.dps = 15
>>> diff(ncdf, 2)
0.053990966513188
>>> npdf(2)
0.053990966513188
>>> diff(lambda x: ncdf(x, 1, 0.5), 0)
0.107981933026376
>>> npdf(0, 1, 0.5)
0.107981933026376
"""
expint = r"""
:func:`~mpmath.expint(n,z)` gives the generalized exponential integral
or En-function,
.. math ::
\mathrm{E}_n(z) = \int_1^{\infty} \frac{e^{-zt}}{t^n} dt,
where `n` and `z` may both be complex numbers. The case with `n = 1` is
also given by :func:`~mpmath.e1`.
**Examples**
Evaluation at real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expint(1, 6.25)
0.0002704758872637179088496194
>>> expint(-3, 2+3j)
(0.00299658467335472929656159 + 0.06100816202125885450319632j)
>>> expint(2+3j, 4-5j)
(0.001803529474663565056945248 - 0.002235061547756185403349091j)
At negative integer values of `n`, `E_n(z)` reduces to a
rational-exponential function::
>>> f = lambda n, z: fac(n)*sum(z**k/fac(k-1) for k in range(1,n+2))/\
... exp(z)/z**(n+2)
>>> n = 3
>>> z = 1/pi
>>> expint(-n,z)
584.2604820613019908668219
>>> f(n,z)
584.2604820613019908668219
>>> n = 5
>>> expint(-n,z)
115366.5762594725451811138
>>> f(n,z)
115366.5762594725451811138
"""
e1 = r"""
Computes the exponential integral `\mathrm{E}_1(z)`, given by
.. math ::
\mathrm{E}_1(z) = \int_z^{\infty} \frac{e^{-t}}{t} dt.
This is equivalent to :func:`~mpmath.expint` with `n = 1`.
**Examples**
Two ways to evaluate this function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> e1(6.25)
0.0002704758872637179088496194
>>> expint(1,6.25)
0.0002704758872637179088496194
The E1-function is essentially the same as the Ei-function (:func:`~mpmath.ei`)
with negated argument, except for an imaginary branch cut term::
>>> e1(2.5)
0.02491491787026973549562801
>>> -ei(-2.5)
0.02491491787026973549562801
>>> e1(-2.5)
(-7.073765894578600711923552 - 3.141592653589793238462643j)
>>> -ei(2.5)
-7.073765894578600711923552
"""
ei = r"""
Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`.
The exponential integral is defined as
.. math ::
\mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt.
When the integration range includes `t = 0`, the exponential
integral is interpreted as providing the Cauchy principal value.
For real `x`, the Ei-function behaves roughly like
`\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`.
The Ei-function is related to the more general family of exponential
integral functions denoted by `E_n`, which are available as :func:`~mpmath.expint`.
**Basic examples**
Some basic values and limits are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ei(0)
-inf
>>> ei(1)
1.89511781635594
>>> ei(inf)
+inf
>>> ei(-inf)
0.0
For `x < 0`, the defining integral can be evaluated
numerically as a reference::
>>> ei(-4)
-0.00377935240984891
>>> quad(lambda t: exp(t)/t, [-inf, -4])
-0.00377935240984891
:func:`~mpmath.ei` supports complex arguments and arbitrary
precision evaluation::
>>> mp.dps = 50
>>> ei(pi)
10.928374389331410348638445906907535171566338835056
>>> mp.dps = 25
>>> ei(3+4j)
(-4.154091651642689822535359 + 4.294418620024357476985535j)
**Related functions**
The exponential integral is closely related to the logarithmic
integral. See :func:`~mpmath.li` for additional information.
The exponential integral is related to the hyperbolic
and trigonometric integrals (see :func:`~mpmath.chi`, :func:`~mpmath.shi`,
:func:`~mpmath.ci`, :func:`~mpmath.si`) similarly to how the ordinary
exponential function is related to the hyperbolic and
trigonometric functions::
>>> mp.dps = 15
>>> ei(3)
9.93383257062542
>>> chi(3) + shi(3)
9.93383257062542
>>> chop(ci(3j) - j*si(3j) - pi*j/2)
9.93383257062542
Beware that logarithmic corrections, as in the last example
above, are required to obtain the correct branch in general.
For details, see [1].
The exponential integral is also a special case of the
hypergeometric function `\,_2F_2`::
>>> z = 0.6
>>> z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler
0.769881289937359
>>> ei(z)
0.769881289937359
**References**
1. Relations between Ei and other functions:
http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/
2. Abramowitz & Stegun, section 5:
http://people.math.sfu.ca/~cbm/aands/page_228.htm
3. Asymptotic expansion for Ei:
http://mathworld.wolfram.com/En-Function.html
"""
li = r"""
Computes the logarithmic integral or li-function
`\mathrm{li}(x)`, defined by
.. math ::
\mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt
The logarithmic integral has a singularity at `x = 1`.
Alternatively, ``li(x, offset=True)`` computes the offset
logarithmic integral (used in number theory)
.. math ::
\mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt.
These two functions are related via the simple identity
`\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`.
The logarithmic integral should also not be confused with
the polylogarithm (also denoted by Li), which is implemented
as :func:`~mpmath.polylog`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> li(0)
0.0
>>> li(1)
-inf
>>> li(1)
-inf
>>> li(2)
1.04516378011749278484458888919
>>> findroot(li, 2)
1.45136923488338105028396848589
>>> li(inf)
+inf
>>> li(2, offset=True)
0.0
>>> li(1, offset=True)
-inf
>>> li(0, offset=True)
-1.04516378011749278484458888919
>>> li(10, offset=True)
5.12043572466980515267839286347
The logarithmic integral can be evaluated for arbitrary
complex arguments::
>>> mp.dps = 20
>>> li(3+4j)
(3.1343755504645775265 + 2.6769247817778742392j)
The logarithmic integral is related to the exponential integral::
>>> ei(log(3))
2.1635885946671919729
>>> li(3)
2.1635885946671919729
The logarithmic integral grows like `O(x/\log(x))`::
>>> mp.dps = 15
>>> x = 10**100
>>> x/log(x)
4.34294481903252e+97
>>> li(x)
4.3619719871407e+97
The prime number theorem states that the number of primes less
than `x` is asymptotic to `\mathrm{Li}(x)` (equivalently
`\mathrm{li}(x)`). For example, it is known that there are
exactly 1,925,320,391,606,803,968,923 prime numbers less than
`10^{23}` [1]. The logarithmic integral provides a very
accurate estimate::
>>> li(10**23, offset=True)
1.92532039161405e+21
A definite integral is::
>>> quad(li, [0, 1])
-0.693147180559945
>>> -ln(2)
-0.693147180559945
**References**
1. http://mathworld.wolfram.com/PrimeCountingFunction.html
2. http://mathworld.wolfram.com/LogarithmicIntegral.html
"""
ci = r"""
Computes the cosine integral,
.. math ::
\mathrm{Ci}(x) = -\int_x^{\infty} \frac{\cos t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cos t - 1}{t}\,dt
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ci(0)
-inf
>>> ci(1)
0.3374039229009681346626462
>>> ci(pi)
0.07366791204642548599010096
>>> ci(inf)
0.0
>>> ci(-inf)
(0.0 + 3.141592653589793238462643j)
>>> ci(2+3j)
(1.408292501520849518759125 - 2.983617742029605093121118j)
The cosine integral behaves roughly like the sinc function
(see :func:`~mpmath.sinc`) for large real `x`::
>>> ci(10**10)
-4.875060251748226537857298e-11
>>> sinc(10**10)
-4.875060250875106915277943e-11
>>> chop(limit(ci, inf))
0.0
It has infinitely many roots on the positive real axis::
>>> findroot(ci, 1)
0.6165054856207162337971104
>>> findroot(ci, 2)
3.384180422551186426397851
Evaluation is supported for `z` anywhere in the complex plane::
>>> ci(10**6*(1+j))
(4.449410587611035724984376e+434287 + 9.75744874290013526417059e+434287j)
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> -quadosc(lambda t: cos(t)/t, [5, inf], omega=1)
-0.190029749656644
>>> ci(5)
-0.190029749656644
Some infinite series can be evaluated using the
cosine integral::
>>> nsum(lambda k: (-1)**k/(fac(2*k)*(2*k)), [1,inf])
-0.239811742000565
>>> ci(1) - euler
-0.239811742000565
"""
si = r"""
Computes the sine integral,
.. math ::
\mathrm{Si}(x) = \int_0^x \frac{\sin t}{t}\,dt.
The sine integral is thus the antiderivative of the sinc
function (see :func:`~mpmath.sinc`).
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> si(0)
0.0
>>> si(1)
0.9460830703671830149413533
>>> si(-1)
-0.9460830703671830149413533
>>> si(pi)
1.851937051982466170361053
>>> si(inf)
1.570796326794896619231322
>>> si(-inf)
-1.570796326794896619231322
>>> si(2+3j)
(4.547513889562289219853204 + 1.399196580646054789459839j)
The sine integral approaches `\pi/2` for large real `x`::
>>> si(10**10)
1.570796326707584656968511
>>> pi/2
1.570796326794896619231322
Evaluation is supported for `z` anywhere in the complex plane::
>>> si(10**6*(1+j))
(-9.75744874290013526417059e+434287 + 4.449410587611035724984376e+434287j)
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> quad(sinc, [0, 5])
1.54993124494467
>>> si(5)
1.54993124494467
Some infinite series can be evaluated using the
sine integral::
>>> nsum(lambda k: (-1)**k/(fac(2*k+1)*(2*k+1)), [0,inf])
0.946083070367183
>>> si(1)
0.946083070367183
"""
chi = r"""
Computes the hyperbolic cosine integral, defined
in analogy with the cosine integral (see :func:`~mpmath.ci`) as
.. math ::
\mathrm{Chi}(x) = -\int_x^{\infty} \frac{\cosh t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cosh t - 1}{t}\,dt
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> chi(0)
-inf
>>> chi(1)
0.8378669409802082408946786
>>> chi(inf)
+inf
>>> findroot(chi, 0.5)
0.5238225713898644064509583
>>> chi(2+3j)
(-0.1683628683277204662429321 + 2.625115880451325002151688j)
Evaluation is supported for `z` anywhere in the complex plane::
>>> chi(10**6*(1+j))
(4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
"""
shi = r"""
Computes the hyperbolic sine integral, defined
in analogy with the sine integral (see :func:`~mpmath.si`) as
.. math ::
\mathrm{Shi}(x) = \int_0^x \frac{\sinh t}{t}\,dt.
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> shi(0)
0.0
>>> shi(1)
1.057250875375728514571842
>>> shi(-1)
-1.057250875375728514571842
>>> shi(inf)
+inf
>>> shi(2+3j)
(-0.1931890762719198291678095 + 2.645432555362369624818525j)
Evaluation is supported for `z` anywhere in the complex plane::
>>> shi(10**6*(1+j))
(4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
"""
fresnels = r"""
Computes the Fresnel sine integral
.. math ::
S(x) = \int_0^x \sin\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> fresnels(0)
0.0
>>> fresnels(inf)
0.5
>>> fresnels(-inf)
-0.5
>>> fresnels(1)
0.4382591473903547660767567
>>> fresnels(1+2j)
(36.72546488399143842838788 + 15.58775110440458732748279j)
Comparing with the definition::
>>> fresnels(3)
0.4963129989673750360976123
>>> quad(lambda t: sin(pi*t**2/2), [0,3])
0.4963129989673750360976123
"""
fresnelc = r"""
Computes the Fresnel cosine integral
.. math ::
C(x) = \int_0^x \cos\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> fresnelc(0)
0.0
>>> fresnelc(inf)
0.5
>>> fresnelc(-inf)
-0.5
>>> fresnelc(1)
0.7798934003768228294742064
>>> fresnelc(1+2j)
(16.08787137412548041729489 - 36.22568799288165021578758j)
Comparing with the definition::
>>> fresnelc(3)
0.6057207892976856295561611
>>> quad(lambda t: cos(pi*t**2/2), [0,3])
0.6057207892976856295561611
"""
airyai = r"""
Computes the Airy function `\operatorname{Ai}(z)`, which is
the solution of the Airy differential equation `f''(z) - z f(z) = 0`
with initial conditions
.. math ::
\operatorname{Ai}(0) =
\frac{1}{3^{2/3}\Gamma\left(\frac{2}{3}\right)}
\operatorname{Ai}'(0) =
-\frac{1}{3^{1/3}\Gamma\left(\frac{1}{3}\right)}.
Other common ways of defining the Ai-function include
integrals such as
.. math ::
\operatorname{Ai}(x) = \frac{1}{\pi}
\int_0^{\infty} \cos\left(\frac{1}{3}t^3+xt\right) dt
\qquad x \in \mathbb{R}
\operatorname{Ai}(z) = \frac{\sqrt{3}}{2\pi}
\int_0^{\infty}
\exp\left(-\frac{t^3}{3}-\frac{z^3}{3t^3}\right) dt.
The Ai-function is an entire function with a turning point,
behaving roughly like a slowly decaying sine wave for `z < 0` and
like a rapidly decreasing exponential for `z > 0`.
A second solution of the Airy differential equation
is given by `\operatorname{Bi}(z)` (see :func:`~mpmath.airybi`).
Optionally, with *derivative=alpha*, :func:`airyai` can compute the
`\alpha`-th order fractional derivative with respect to `z`.
For `\alpha = n = 1,2,3,\ldots` this gives the derivative
`\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots`
this gives the `n`-fold iterated integral
.. math ::
f_0(z) = \operatorname{Ai}(z)
f_n(z) = \int_0^z f_{n-1}(t) dt.
The Ai-function has infinitely many zeros, all located along the
negative half of the real axis. They can be computed with
:func:`~mpmath.airyaizero`.
**Plots**
.. literalinclude :: /plots/ai.py
.. image :: /plots/ai.png
.. literalinclude :: /plots/ai_c.py
.. image :: /plots/ai_c.png
**Basic examples**
Limits and values include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airyai(0); 1/(power(3,'2/3')*gamma('2/3'))
0.3550280538878172392600632
0.3550280538878172392600632
>>> airyai(1)
0.1352924163128814155241474
>>> airyai(-1)
0.5355608832923521187995166
>>> airyai(inf); airyai(-inf)
0.0
0.0
Evaluation is supported for large magnitudes of the argument::
>>> airyai(-100)
0.1767533932395528780908311
>>> airyai(100)
2.634482152088184489550553e-291
>>> airyai(50+50j)
(-5.31790195707456404099817e-68 - 1.163588003770709748720107e-67j)
>>> airyai(-50+50j)
(1.041242537363167632587245e+158 + 3.347525544923600321838281e+157j)
Huge arguments are also fine::
>>> airyai(10**10)
1.162235978298741779953693e-289529654602171
>>> airyai(-10**10)
0.0001736206448152818510510181
>>> w = airyai(10**10*(1+j))
>>> w.real
5.711508683721355528322567e-186339621747698
>>> w.imag
1.867245506962312577848166e-186339621747697
The first root of the Ai-function is::
>>> findroot(airyai, -2)
-2.338107410459767038489197
>>> airyaizero(1)
-2.338107410459767038489197
**Properties and relations**
Verifying the Airy differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(airyai(z,2) - z*airyai(z))
...
0.0
0.0
0.0
0.0
The first few terms of the Taylor series expansion around `z = 0`
(every third term is zero)::
>>> nprint(taylor(airyai, 0, 5))
[0.355028, -0.258819, 0.0, 0.0591713, -0.0215683, 0.0]
The Airy functions satisfy the Wronskian relation
`\operatorname{Ai}(z) \operatorname{Bi}'(z) -
\operatorname{Ai}'(z) \operatorname{Bi}(z) = 1/\pi`::
>>> z = -0.5
>>> airyai(z)*airybi(z,1) - airyai(z,1)*airybi(z)
0.3183098861837906715377675
>>> 1/pi
0.3183098861837906715377675
The Airy functions can be expressed in terms of Bessel
functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
>>> z = -3
>>> airyai(z)
-0.3788142936776580743472439
>>> y = 2*power(-z,'3/2')/3
>>> (sqrt(-z) * (besselj('1/3',y) + besselj('-1/3',y)))/3
-0.3788142936776580743472439
**Derivatives and integrals**
Derivatives of the Ai-function (directly and using :func:`~mpmath.diff`)::
>>> airyai(-3,1); diff(airyai,-3)
0.3145837692165988136507873
0.3145837692165988136507873
>>> airyai(-3,2); diff(airyai,-3,2)
1.136442881032974223041732
1.136442881032974223041732
>>> airyai(1000,1); diff(airyai,1000)
-2.943133917910336090459748e-9156
-2.943133917910336090459748e-9156
Several derivatives at `z = 0`::
>>> airyai(0,0); airyai(0,1); airyai(0,2)
0.3550280538878172392600632
-0.2588194037928067984051836
0.0
>>> airyai(0,3); airyai(0,4); airyai(0,5)
0.3550280538878172392600632
-0.5176388075856135968103671
0.0
>>> airyai(0,15); airyai(0,16); airyai(0,17)
1292.30211615165475090663
-3188.655054727379756351861
0.0
The integral of the Ai-function::
>>> airyai(3,-1); quad(airyai, [0,3])
0.3299203760070217725002701
0.3299203760070217725002701
>>> airyai(-10,-1); quad(airyai, [0,-10])
-0.765698403134212917425148
-0.765698403134212917425148
Integrals of high or fractional order::
>>> airyai(-2,0.5); differint(airyai,-2,0.5,0)
(0.0 + 0.2453596101351438273844725j)
(0.0 + 0.2453596101351438273844725j)
>>> airyai(-2,-4); differint(airyai,-2,-4,0)
0.2939176441636809580339365
0.2939176441636809580339365
>>> airyai(0,-1); airyai(0,-2); airyai(0,-3)
0.0
0.0
0.0
Integrals of the Ai-function can be evaluated at limit points::
>>> airyai(-1000000,-1); airyai(-inf,-1)
-0.6666843728311539978751512
-0.6666666666666666666666667
>>> airyai(10,-1); airyai(+inf,-1)
0.3333333332991690159427932
0.3333333333333333333333333
>>> airyai(+inf,-2); airyai(+inf,-3)
+inf
+inf
>>> airyai(-1000000,-2); airyai(-inf,-2)
666666.4078472650651209742
+inf
>>> airyai(-1000000,-3); airyai(-inf,-3)
-333333074513.7520264995733
-inf
**References**
1. [DLMF]_ Chapter 9: Airy and Related Functions
2. [WolframFunctions]_ section: Bessel-Type Functions
"""
airybi = r"""
Computes the Airy function `\operatorname{Bi}(z)`, which is
the solution of the Airy differential equation `f''(z) - z f(z) = 0`
with initial conditions
.. math ::
\operatorname{Bi}(0) =
\frac{1}{3^{1/6}\Gamma\left(\frac{2}{3}\right)}
\operatorname{Bi}'(0) =
\frac{3^{1/6}}{\Gamma\left(\frac{1}{3}\right)}.
Like the Ai-function (see :func:`~mpmath.airyai`), the Bi-function
is oscillatory for `z < 0`, but it grows rather than decreases
for `z > 0`.
Optionally, as for :func:`~mpmath.airyai`, derivatives, integrals
and fractional derivatives can be computed with the *derivative*
parameter.
The Bi-function has infinitely many zeros along the negative
half-axis, as well as complex zeros, which can all be computed
with :func:`~mpmath.airybizero`.
**Plots**
.. literalinclude :: /plots/bi.py
.. image :: /plots/bi.png
.. literalinclude :: /plots/bi_c.py
.. image :: /plots/bi_c.png
**Basic examples**
Limits and values include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airybi(0); 1/(power(3,'1/6')*gamma('2/3'))
0.6149266274460007351509224
0.6149266274460007351509224
>>> airybi(1)
1.207423594952871259436379
>>> airybi(-1)
0.10399738949694461188869
>>> airybi(inf); airybi(-inf)
+inf
0.0
Evaluation is supported for large magnitudes of the argument::
>>> airybi(-100)
0.02427388768016013160566747
>>> airybi(100)
6.041223996670201399005265e+288
>>> airybi(50+50j)
(-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
>>> airybi(-50+50j)
(-3.347525544923600321838281e+157 + 1.041242537363167632587245e+158j)
Huge arguments::
>>> airybi(10**10)
1.369385787943539818688433e+289529654602165
>>> airybi(-10**10)
0.001775656141692932747610973
>>> w = airybi(10**10*(1+j))
>>> w.real
-6.559955931096196875845858e+186339621747689
>>> w.imag
-6.822462726981357180929024e+186339621747690
The first real root of the Bi-function is::
>>> findroot(airybi, -1); airybizero(1)
-1.17371322270912792491998
-1.17371322270912792491998
**Properties and relations**
Verifying the Airy differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(airybi(z,2) - z*airybi(z))
...
0.0
0.0
0.0
0.0
The first few terms of the Taylor series expansion around `z = 0`
(every third term is zero)::
>>> nprint(taylor(airybi, 0, 5))
[0.614927, 0.448288, 0.0, 0.102488, 0.0373574, 0.0]
The Airy functions can be expressed in terms of Bessel
functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
>>> z = -3
>>> airybi(z)
-0.1982896263749265432206449
>>> p = 2*power(-z,'3/2')/3
>>> sqrt(-mpf(z)/3)*(besselj('-1/3',p) - besselj('1/3',p))
-0.1982896263749265432206449
**Derivatives and integrals**
Derivatives of the Bi-function (directly and using :func:`~mpmath.diff`)::
>>> airybi(-3,1); diff(airybi,-3)
-0.675611222685258537668032
-0.675611222685258537668032
>>> airybi(-3,2); diff(airybi,-3,2)
0.5948688791247796296619346
0.5948688791247796296619346
>>> airybi(1000,1); diff(airybi,1000)
1.710055114624614989262335e+9156
1.710055114624614989262335e+9156
Several derivatives at `z = 0`::
>>> airybi(0,0); airybi(0,1); airybi(0,2)
0.6149266274460007351509224
0.4482883573538263579148237
0.0
>>> airybi(0,3); airybi(0,4); airybi(0,5)
0.6149266274460007351509224
0.8965767147076527158296474
0.0
>>> airybi(0,15); airybi(0,16); airybi(0,17)
2238.332923903442675949357
5522.912562599140729510628
0.0
The integral of the Bi-function::
>>> airybi(3,-1); quad(airybi, [0,3])
10.06200303130620056316655
10.06200303130620056316655
>>> airybi(-10,-1); quad(airybi, [0,-10])
-0.01504042480614002045135483
-0.01504042480614002045135483
Integrals of high or fractional order::
>>> airybi(-2,0.5); differint(airybi, -2, 0.5, 0)
(0.0 + 0.5019859055341699223453257j)
(0.0 + 0.5019859055341699223453257j)
>>> airybi(-2,-4); differint(airybi,-2,-4,0)
0.2809314599922447252139092
0.2809314599922447252139092
>>> airybi(0,-1); airybi(0,-2); airybi(0,-3)
0.0
0.0
0.0
Integrals of the Bi-function can be evaluated at limit points::
>>> airybi(-1000000,-1); airybi(-inf,-1)
0.000002191261128063434047966873
0.0
>>> airybi(10,-1); airybi(+inf,-1)
147809803.1074067161675853
+inf
>>> airybi(+inf,-2); airybi(+inf,-3)
+inf
+inf
>>> airybi(-1000000,-2); airybi(-inf,-2)
0.4482883750599908479851085
0.4482883573538263579148237
>>> gamma('2/3')*power(3,'2/3')/(2*pi)
0.4482883573538263579148237
>>> airybi(-100000,-3); airybi(-inf,-3)
-44828.52827206932872493133
-inf
>>> airybi(-100000,-4); airybi(-inf,-4)
2241411040.437759489540248
+inf
"""
airyaizero = r"""
Gives the `k`-th zero of the Airy Ai-function,
i.e. the `k`-th number `a_k` ordered by magnitude for which
`\operatorname{Ai}(a_k) = 0`.
Optionally, with *derivative=1*, the corresponding
zero `a'_k` of the derivative function, i.e.
`\operatorname{Ai}'(a'_k) = 0`, is computed.
**Examples**
Some values of `a_k`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airyaizero(1)
-2.338107410459767038489197
>>> airyaizero(2)
-4.087949444130970616636989
>>> airyaizero(3)
-5.520559828095551059129856
>>> airyaizero(1000)
-281.0315196125215528353364
Some values of `a'_k`::
>>> airyaizero(1,1)
-1.018792971647471089017325
>>> airyaizero(2,1)
-3.248197582179836537875424
>>> airyaizero(3,1)
-4.820099211178735639400616
>>> airyaizero(1000,1)
-280.9378080358935070607097
Verification::
>>> chop(airyai(airyaizero(1)))
0.0
>>> chop(airyai(airyaizero(1,1),1))
0.0
"""
airybizero = r"""
With *complex=False*, gives the `k`-th real zero of the Airy Bi-function,
i.e. the `k`-th number `b_k` ordered by magnitude for which
`\operatorname{Bi}(b_k) = 0`.
With *complex=True*, gives the `k`-th complex zero in the upper
half plane `\beta_k`. Also the conjugate `\overline{\beta_k}`
is a zero.
Optionally, with *derivative=1*, the corresponding
zero `b'_k` or `\beta'_k` of the derivative function, i.e.
`\operatorname{Bi}'(b'_k) = 0` or `\operatorname{Bi}'(\beta'_k) = 0`,
is computed.
**Examples**
Some values of `b_k`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airybizero(1)
-1.17371322270912792491998
>>> airybizero(2)
-3.271093302836352715680228
>>> airybizero(3)
-4.830737841662015932667709
>>> airybizero(1000)
-280.9378112034152401578834
Some values of `b_k`::
>>> airybizero(1,1)
-2.294439682614123246622459
>>> airybizero(2,1)
-4.073155089071828215552369
>>> airybizero(3,1)
-5.512395729663599496259593
>>> airybizero(1000,1)
-281.0315164471118527161362
Some values of `\beta_k`::
>>> airybizero(1,complex=True)
(0.9775448867316206859469927 + 2.141290706038744575749139j)
>>> airybizero(2,complex=True)
(1.896775013895336346627217 + 3.627291764358919410440499j)
>>> airybizero(3,complex=True)
(2.633157739354946595708019 + 4.855468179979844983174628j)
>>> airybizero(1000,complex=True)
(140.4978560578493018899793 + 243.3907724215792121244867j)
Some values of `\beta'_k`::
>>> airybizero(1,1,complex=True)
(0.2149470745374305676088329 + 1.100600143302797880647194j)
>>> airybizero(2,1,complex=True)
(1.458168309223507392028211 + 2.912249367458445419235083j)
>>> airybizero(3,1,complex=True)
(2.273760763013482299792362 + 4.254528549217097862167015j)
>>> airybizero(1000,1,complex=True)
(140.4509972835270559730423 + 243.3096175398562811896208j)
Verification::
>>> chop(airybi(airybizero(1)))
0.0
>>> chop(airybi(airybizero(1,1),1))
0.0
>>> u = airybizero(1,complex=True)
>>> chop(airybi(u))
0.0
>>> chop(airybi(conj(u)))
0.0
The complex zeros (in the upper and lower half-planes respectively)
asymptotically approach the rays `z = R \exp(\pm i \pi /3)`::
>>> arg(airybizero(1,complex=True))
1.142532510286334022305364
>>> arg(airybizero(1000,complex=True))
1.047271114786212061583917
>>> arg(airybizero(1000000,complex=True))
1.047197624741816183341355
>>> pi/3
1.047197551196597746154214
"""
ellipk = r"""
Evaluates the complete elliptic integral of the first kind,
`K(m)`, defined by
.. math ::
K(m) = \int_0^{\pi/2} \frac{dt}{\sqrt{1-m \sin^2 t}} \, = \,
\frac{\pi}{2} \,_2F_1\left(\frac{1}{2}, \frac{1}{2}, 1, m\right).
Note that the argument is the parameter `m = k^2`,
not the modulus `k` which is sometimes used.
**Plots**
.. literalinclude :: /plots/ellipk.py
.. image :: /plots/ellipk.png
**Examples**
Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipk(0)
1.570796326794896619231322
>>> ellipk(inf)
(0.0 + 0.0j)
>>> ellipk(-inf)
0.0
>>> ellipk(1)
+inf
>>> ellipk(-1)
1.31102877714605990523242
>>> ellipk(2)
(1.31102877714605990523242 - 1.31102877714605990523242j)
Verifying the defining integral and hypergeometric
representation::
>>> ellipk(0.5)
1.85407467730137191843385
>>> quad(lambda t: (1-0.5*sin(t)**2)**-0.5, [0, pi/2])
1.85407467730137191843385
>>> pi/2*hyp2f1(0.5,0.5,1,0.5)
1.85407467730137191843385
Evaluation is supported for arbitrary complex `m`::
>>> ellipk(3+4j)
(0.9111955638049650086562171 + 0.6313342832413452438845091j)
A definite integral::
>>> quad(ellipk, [0, 1])
2.0
"""
agm = r"""
``agm(a, b)`` computes the arithmetic-geometric mean of `a` and
`b`, defined as the limit of the following iteration:
.. math ::
a_0 = a
b_0 = b
a_{n+1} = \frac{a_n+b_n}{2}
b_{n+1} = \sqrt{a_n b_n}
This function can be called with a single argument, computing
`\mathrm{agm}(a,1) = \mathrm{agm}(1,a)`.
**Examples**
It is a well-known theorem that the geometric mean of
two distinct positive numbers is less than the arithmetic
mean. It follows that the arithmetic-geometric mean lies
between the two means::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> a = mpf(3)
>>> b = mpf(4)
>>> sqrt(a*b)
3.46410161513775
>>> agm(a,b)
3.48202767635957
>>> (a+b)/2
3.5
The arithmetic-geometric mean is scale-invariant::
>>> agm(10*e, 10*pi)
29.261085515723
>>> 10*agm(e, pi)
29.261085515723
As an order-of-magnitude estimate, `\mathrm{agm}(1,x) \approx x`
for large `x`::
>>> agm(10**10)
643448704.760133
>>> agm(10**50)
1.34814309345871e+48
For tiny `x`, `\mathrm{agm}(1,x) \approx -\pi/(2 \log(x/4))`::
>>> agm('0.01')
0.262166887202249
>>> -pi/2/log('0.0025')
0.262172347753122
The arithmetic-geometric mean can also be computed for complex
numbers::
>>> agm(3, 2+j)
(2.51055133276184 + 0.547394054060638j)
The AGM iteration converges very quickly (each step doubles
the number of correct digits), so :func:`~mpmath.agm` supports efficient
high-precision evaluation::
>>> mp.dps = 10000
>>> a = agm(1,2)
>>> str(a)[-10:]
'1679581912'
**Mathematical relations**
The arithmetic-geometric mean may be used to evaluate the
following two parametric definite integrals:
.. math ::
I_1 = \int_0^{\infty}
\frac{1}{\sqrt{(x^2+a^2)(x^2+b^2)}} \,dx
I_2 = \int_0^{\pi/2}
\frac{1}{\sqrt{a^2 \cos^2(x) + b^2 \sin^2(x)}} \,dx
We have::
>>> mp.dps = 15
>>> a = 3
>>> b = 4
>>> f1 = lambda x: ((x**2+a**2)*(x**2+b**2))**-0.5
>>> f2 = lambda x: ((a*cos(x))**2 + (b*sin(x))**2)**-0.5
>>> quad(f1, [0, inf])
0.451115405388492
>>> quad(f2, [0, pi/2])
0.451115405388492
>>> pi/(2*agm(a,b))
0.451115405388492
A formula for `\Gamma(1/4)`::
>>> gamma(0.25)
3.62560990822191
>>> sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2)))
3.62560990822191
**Possible issues**
The branch cut chosen for complex `a` and `b` is somewhat
arbitrary.
"""
gegenbauer = r"""
Evaluates the Gegenbauer polynomial, or ultraspherical polynomial,
.. math ::
C_n^{(a)}(z) = {n+2a-1 \choose n} \,_2F_1\left(-n, n+2a;
a+\frac{1}{2}; \frac{1}{2}(1-z)\right).
When `n` is a nonnegative integer, this formula gives a polynomial
in `z` of degree `n`, but all parameters are permitted to be
complex numbers. With `a = 1/2`, the Gegenbauer polynomial
reduces to a Legendre polynomial.
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> gegenbauer(3, 0.5, -10)
-2485.0
>>> gegenbauer(1000, 10, 100)
3.012757178975667428359374e+2322
>>> gegenbauer(2+3j, -0.75, -1000j)
(-5038991.358609026523401901 + 9414549.285447104177860806j)
Evaluation at negative integer orders::
>>> gegenbauer(-4, 2, 1.75)
-1.0
>>> gegenbauer(-4, 3, 1.75)
0.0
>>> gegenbauer(-4, 2j, 1.75)
0.0
>>> gegenbauer(-7, 0.5, 3)
8989.0
The Gegenbauer polynomials solve the differential equation::
>>> n, a = 4.5, 1+2j
>>> f = lambda z: gegenbauer(n, a, z)
>>> for z in [0, 0.75, -0.5j]:
... chop((1-z**2)*diff(f,z,2) - (2*a+1)*z*diff(f,z) + n*(n+2*a)*f(z))
...
0.0
0.0
0.0
The Gegenbauer polynomials have generating function
`(1-2zt+t^2)^{-a}`::
>>> a, z = 2.5, 1
>>> taylor(lambda t: (1-2*z*t+t**2)**(-a), 0, 3)
[1.0, 5.0, 15.0, 35.0]
>>> [gegenbauer(n,a,z) for n in range(4)]
[1.0, 5.0, 15.0, 35.0]
The Gegenbauer polynomials are orthogonal on `[-1, 1]` with respect
to the weight `(1-z^2)^{a-\frac{1}{2}}`::
>>> a, n, m = 2.5, 4, 5
>>> Cn = lambda z: gegenbauer(n, a, z, zeroprec=1000)
>>> Cm = lambda z: gegenbauer(m, a, z, zeroprec=1000)
>>> chop(quad(lambda z: Cn(z)*Cm(z)*(1-z**2)*(a-0.5), [-1, 1]))
0.0
"""
laguerre = r"""
Gives the generalized (associated) Laguerre polynomial, defined by
.. math ::
L_n^a(z) = \frac{\Gamma(n+b+1)}{\Gamma(b+1) \Gamma(n+1)}
\,_1F_1(-n, a+1, z).
With `a = 0` and `n` a nonnegative integer, this reduces to an ordinary
Laguerre polynomial, the sequence of which begins
`L_0(z) = 1, L_1(z) = 1-z, L_2(z) = z^2-2z+1, \ldots`.
The Laguerre polynomials are orthogonal with respect to the weight
`z^a e^{-z}` on `[0, \infty)`.
**Plots**
.. literalinclude :: /plots/laguerre.py
.. image :: /plots/laguerre.png
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> laguerre(5, 0, 0.25)
0.03726399739583333333333333
>>> laguerre(1+j, 0.5, 2+3j)
(4.474921610704496808379097 - 11.02058050372068958069241j)
>>> laguerre(2, 0, 10000)
49980001.0
>>> laguerre(2.5, 0, 10000)
-9.327764910194842158583189e+4328
The first few Laguerre polynomials, normalized to have integer
coefficients::
>>> for n in range(7):
... chop(taylor(lambda z: fac(n)*laguerre(n, 0, z), 0, n))
...
[1.0]
[1.0, -1.0]
[2.0, -4.0, 1.0]
[6.0, -18.0, 9.0, -1.0]
[24.0, -96.0, 72.0, -16.0, 1.0]
[120.0, -600.0, 600.0, -200.0, 25.0, -1.0]
[720.0, -4320.0, 5400.0, -2400.0, 450.0, -36.0, 1.0]
Verifying orthogonality::
>>> Lm = lambda t: laguerre(m,a,t)
>>> Ln = lambda t: laguerre(n,a,t)
>>> a, n, m = 2.5, 2, 3
>>> chop(quad(lambda t: exp(-t)*t**a*Lm(t)*Ln(t), [0,inf]))
0.0
"""
hermite = r"""
Evaluates the Hermite polynomial `H_n(z)`, which may be defined using
the recurrence
.. math ::
H_0(z) = 1
H_1(z) = 2z
H_{n+1} = 2z H_n(z) - 2n H_{n-1}(z).
The Hermite polynomials are orthogonal on `(-\infty, \infty)` with
respect to the weight `e^{-z^2}`. More generally, allowing arbitrary complex
values of `n`, the Hermite function `H_n(z)` is defined as
.. math ::
H_n(z) = (2z)^n \,_2F_0\left(-\frac{n}{2}, \frac{1-n}{2},
-\frac{1}{z^2}\right)
for `\Re{z} > 0`, or generally
.. math ::
H_n(z) = 2^n \sqrt{\pi} \left(
\frac{1}{\Gamma\left(\frac{1-n}{2}\right)}
\,_1F_1\left(-\frac{n}{2}, \frac{1}{2}, z^2\right) -
\frac{2z}{\Gamma\left(-\frac{n}{2}\right)}
\,_1F_1\left(\frac{1-n}{2}, \frac{3}{2}, z^2\right)
\right).
**Plots**
.. literalinclude :: /plots/hermite.py
.. image :: /plots/hermite.png
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hermite(0, 10)
1.0
>>> hermite(1, 10); hermite(2, 10)
20.0
398.0
>>> hermite(10000, 2)
4.950440066552087387515653e+19334
>>> hermite(3, -10**8)
-7999999999999998800000000.0
>>> hermite(-3, -10**8)
1.675159751729877682920301e+4342944819032534
>>> hermite(2+3j, -1+2j)
(-0.07652130602993513389421901 - 0.1084662449961914580276007j)
Coefficients of the first few Hermite polynomials are::
>>> for n in range(7):
... chop(taylor(lambda z: hermite(n, z), 0, n))
...
[1.0]
[0.0, 2.0]
[-2.0, 0.0, 4.0]
[0.0, -12.0, 0.0, 8.0]
[12.0, 0.0, -48.0, 0.0, 16.0]
[0.0, 120.0, 0.0, -160.0, 0.0, 32.0]
[-120.0, 0.0, 720.0, 0.0, -480.0, 0.0, 64.0]
Values at `z = 0`::
>>> for n in range(-5, 9):
... hermite(n, 0)
...
0.02769459142039868792653387
0.08333333333333333333333333
0.2215567313631895034122709
0.5
0.8862269254527580136490837
1.0
0.0
-2.0
0.0
12.0
0.0
-120.0
0.0
1680.0
Hermite functions satisfy the differential equation::
>>> n = 4
>>> f = lambda z: hermite(n, z)
>>> z = 1.5
>>> chop(diff(f,z,2) - 2*z*diff(f,z) + 2*n*f(z))
0.0
Verifying orthogonality::
>>> chop(quad(lambda t: hermite(2,t)*hermite(4,t)*exp(-t**2), [-inf,inf]))
0.0
"""
jacobi = r"""
``jacobi(n, a, b, x)`` evaluates the Jacobi polynomial
`P_n^{(a,b)}(x)`. The Jacobi polynomials are a special
case of the hypergeometric function `\,_2F_1` given by:
.. math ::
P_n^{(a,b)}(x) = {n+a \choose n}
\,_2F_1\left(-n,1+a+b+n,a+1,\frac{1-x}{2}\right).
Note that this definition generalizes to nonintegral values
of `n`. When `n` is an integer, the hypergeometric series
terminates after a finite number of terms, giving
a polynomial in `x`.
**Evaluation of Jacobi polynomials**
A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> jacobi(4, 0.5, 0.25, 1)
2.4609375
>>> binomial(4+0.5, 4)
2.4609375
A Jacobi polynomial of degree `n` is equal to its
Taylor polynomial of degree `n`. The explicit
coefficients of Jacobi polynomials can therefore
be recovered easily using :func:`~mpmath.taylor`::
>>> for n in range(5):
... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n))
...
[1.0]
[-0.5, 2.5]
[-0.75, -1.5, 5.25]
[0.5, -3.5, -3.5, 10.5]
[0.625, 2.5, -11.25, -7.5, 20.625]
For nonintegral `n`, the Jacobi "polynomial" is no longer
a polynomial::
>>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4))
[0.309983, 1.84119, -1.26933, 1.26699, -1.34808]
**Orthogonality**
The Jacobi polynomials are orthogonal on the interval
`[-1, 1]` with respect to the weight function
`w(x) = (1-x)^a (1+x)^b`. That is,
`w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to
zero if `m \ne n` and to a nonzero number if `m = n`.
The orthogonality is easy to verify using numerical
quadrature::
>>> P = jacobi
>>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x)
>>> a = 2
>>> b = 3
>>> m, n = 3, 4
>>> chop(quad(f, [-1, 1]), 1)
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.9047619047619
**Differential equation**
The Jacobi polynomials are solutions of the differential
equation
.. math ::
(1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0.
We can verify that :func:`~mpmath.jacobi` approximately satisfies
this equation::
>>> from mpmath import *
>>> mp.dps = 15
>>> a = 2.5
>>> b = 4
>>> n = 3
>>> y = lambda x: jacobi(n,a,b,x)
>>> x = pi
>>> A0 = n*(n+a+b+1)*y(x)
>>> A1 = (b-a-(a+b+2)*x)*diff(y,x)
>>> A2 = (1-x**2)*diff(y,x,2)
>>> nprint(A2 + A1 + A0, 1)
4.0e-12
The difference of order `10^{-12}` is as close to zero as
it could be at 15-digit working precision, since the terms
are large::
>>> A0, A1, A2
(26560.2328981879, -21503.7641037294, -5056.46879445852)
"""
legendre = r"""
``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`.
The Legendre polynomials are given by the formula
.. math ::
P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n.
Alternatively, they can be computed recursively using
.. math ::
P_0(x) = 1
P_1(x) = x
(n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x).
A third definition is in terms of the hypergeometric function
`\,_2F_1`, whereby they can be generalized to arbitrary `n`:
.. math ::
P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right)
**Plots**
.. literalinclude :: /plots/legendre.py
.. image :: /plots/legendre.png
**Basic evaluation**
The Legendre polynomials assume fixed values at the points
`x = -1` and `x = 1`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint([legendre(n, 1) for n in range(6)])
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> nprint([legendre(n, -1) for n in range(6)])
[1.0, -1.0, 1.0, -1.0, 1.0, -1.0]
The coefficients of Legendre polynomials can be recovered
using degree-`n` Taylor expansion::
>>> for n in range(5):
... nprint(chop(taylor(lambda x: legendre(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-0.5, 0.0, 1.5]
[0.0, -1.5, 0.0, 2.5]
[0.375, 0.0, -3.75, 0.0, 4.375]
The roots of Legendre polynomials are located symmetrically
on the interval `[-1, 1]`::
>>> for n in range(5):
... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1]))
...
[]
[0.0]
[-0.57735, 0.57735]
[-0.774597, 0.0, 0.774597]
[-0.861136, -0.339981, 0.339981, 0.861136]
An example of an evaluation for arbitrary `n`::
>>> legendre(0.75, 2+4j)
(1.94952805264875 + 2.1071073099422j)
**Orthogonality**
The Legendre polynomials are orthogonal on `[-1, 1]` with respect
to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)`
integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`::
>>> m, n = 3, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.0
>>> m, n = 4, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.222222222222222
**Differential equation**
The Legendre polynomials satisfy the differential equation
.. math ::
((1-x^2) y')' + n(n+1) y' = 0.
We can verify this numerically::
>>> n = 3.6
>>> x = 0.73
>>> P = legendre
>>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x)
>>> B = n*(n+1)*P(n,x)
>>> nprint(A+B,1)
9.0e-16
"""
legenp = r"""
Calculates the (associated) Legendre function of the first kind of
degree *n* and order *m*, `P_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the first kind, `P_n(z)`. The parameters may be
complex numbers.
In terms of the Gauss hypergeometric function, the (associated) Legendre
function is defined as
.. math ::
P_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(1+z)^{m/2}}{(1-z)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
With *type=3* instead of *type=2*, the alternative
definition
.. math ::
\hat{P}_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(z+1)^{m/2}}{(z-1)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
is used. These functions correspond respectively to ``LegendreP[n,m,2,z]``
and ``LegendreP[n,m,3,z]`` in Mathematica.
The general solution of the (associated) Legendre differential equation
.. math ::
(1-z^2) f''(z) - 2zf'(z) + \left(n(n+1)-\frac{m^2}{1-z^2}\right)f(z) = 0
is given by `C_1 P_n^m(z) + C_2 Q_n^m(z)` for arbitrary constants
`C_1`, `C_2`, where `Q_n^m(z)` is a Legendre function of the
second kind as implemented by :func:`~mpmath.legenq`.
**Examples**
Evaluation for arbitrary parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> legenp(2, 0, 10); legendre(2, 10)
149.5
149.5
>>> legenp(-2, 0.5, 2.5)
(1.972260393822275434196053 - 1.972260393822275434196053j)
>>> legenp(2+3j, 1-j, -0.5+4j)
(-3.335677248386698208736542 - 5.663270217461022307645625j)
>>> chop(legenp(3, 2, -1.5, type=2))
28.125
>>> chop(legenp(3, 2, -1.5, type=3))
-28.125
Verifying the associated Legendre differential equation::
>>> n, m = 2, -0.5
>>> C1, C2 = 1, -3
>>> f = lambda z: C1*legenp(n,m,z) + C2*legenq(n,m,z)
>>> deq = lambda z: (1-z**2)*diff(f,z,2) - 2*z*diff(f,z) + \
... (n*(n+1)-m**2/(1-z**2))*f(z)
>>> for z in [0, 2, -1.5, 0.5+2j]:
... chop(deq(mpmathify(z)))
...
0.0
0.0
0.0
0.0
"""
legenq = r"""
Calculates the (associated) Legendre function of the second kind of
degree *n* and order *m*, `Q_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the second kind, `Q_n(z)`. The parameters may
complex numbers.
The Legendre functions of the second kind give a second set of
solutions to the (associated) Legendre differential equation.
(See :func:`~mpmath.legenp`.)
Unlike the Legendre functions of the first kind, they are not
polynomials of `z` for integer `n`, `m` but rational or logarithmic
functions with poles at `z = \pm 1`.
There are various ways to define Legendre functions of
the second kind, giving rise to different complex structure.
A version can be selected using the *type* keyword argument.
The *type=2* and *type=3* functions are given respectively by
.. math ::
Q_n^m(z) = \frac{\pi}{2 \sin(\pi m)}
\left( \cos(\pi m) P_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} P_n^{-m}(z)\right)
\hat{Q}_n^m(z) = \frac{\pi}{2 \sin(\pi m)} e^{\pi i m}
\left( \hat{P}_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} \hat{P}_n^{-m}(z)\right)
where `P` and `\hat{P}` are the *type=2* and *type=3* Legendre functions
of the first kind. The formulas above should be understood as limits
when `m` is an integer.
These functions correspond to ``LegendreQ[n,m,2,z]`` (or ``LegendreQ[n,m,z]``)
and ``LegendreQ[n,m,3,z]`` in Mathematica. The *type=3* function
is essentially the same as the function defined in
Abramowitz & Stegun (eq. 8.1.3) but with `(z+1)^{m/2}(z-1)^{m/2}` instead
of `(z^2-1)^{m/2}`, giving slightly different branches.
**Examples**
Evaluation for arbitrary parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> legenq(2, 0, 0.5)
-0.8186632680417568557122028
>>> legenq(-1.5, -2, 2.5)
(0.6655964618250228714288277 + 0.3937692045497259717762649j)
>>> legenq(2-j, 3+4j, -6+5j)
(-10001.95256487468541686564 - 6011.691337610097577791134j)
Different versions of the function::
>>> legenq(2, 1, 0.5)
0.7298060598018049369381857
>>> legenq(2, 1, 1.5)
(-7.902916572420817192300921 + 0.1998650072605976600724502j)
>>> legenq(2, 1, 0.5, type=3)
(2.040524284763495081918338 - 0.7298060598018049369381857j)
>>> chop(legenq(2, 1, 1.5, type=3))
-0.1998650072605976600724502
"""
chebyt = r"""
``chebyt(n, x)`` evaluates the Chebyshev polynomial of the first
kind `T_n(x)`, defined by the identity
.. math ::
T_n(\cos x) = \cos(n x).
The Chebyshev polynomials of the first kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Plots**
.. literalinclude :: /plots/chebyt.py
.. image :: /plots/chebyt.png
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyt(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-1.0, 0.0, 2.0]
[0.0, -3.0, 0.0, 4.0]
[1.0, 0.0, -8.0, 0.0, 8.0]
**Orthogonality**
The Chebyshev polynomials of the first kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = 1/\sqrt{1-x^2}`::
>>> f = lambda x: chebyt(m,x)*chebyt(n,x)/sqrt(1-x**2)
>>> m, n = 3, 4
>>> nprint(quad(f, [-1, 1]),1)
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.57079632596448
"""
chebyu = r"""
``chebyu(n, x)`` evaluates the Chebyshev polynomial of the second
kind `U_n(x)`, defined by the identity
.. math ::
U_n(\cos x) = \frac{\sin((n+1)x)}{\sin(x)}.
The Chebyshev polynomials of the second kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Plots**
.. literalinclude :: /plots/chebyu.py
.. image :: /plots/chebyu.png
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyu(n, x), 0, n)))
...
[1.0]
[0.0, 2.0]
[-1.0, 0.0, 4.0]
[0.0, -4.0, 0.0, 8.0]
[1.0, 0.0, -12.0, 0.0, 16.0]
**Orthogonality**
The Chebyshev polynomials of the second kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = \sqrt{1-x^2}`::
>>> f = lambda x: chebyu(m,x)*chebyu(n,x)*sqrt(1-x**2)
>>> m, n = 3, 4
>>> quad(f, [-1, 1])
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.5707963267949
"""
besselj = r"""
``besselj(n, x, derivative=0)`` gives the Bessel function of the first kind
`J_n(x)`. Bessel functions of the first kind are defined as
solutions of the differential equation
.. math ::
x^2 y'' + x y' + (x^2 - n^2) y = 0
which appears, among other things, when solving the radial
part of Laplace's equation in cylindrical coordinates. This
equation has two solutions for given `n`, where the
`J_n`-function is the solution that is nonsingular at `x = 0`.
For positive integer `n`, `J_n(x)` behaves roughly like a sine
(odd `n`) or cosine (even `n`) multiplied by a magnitude factor
that decays slowly as `x \to \pm\infty`.
Generally, `J_n` is a special case of the hypergeometric
function `\,_0F_1`:
.. math ::
J_n(x) = \frac{x^n}{2^n \Gamma(n+1)}
\,_0F_1\left(n+1,-\frac{x^2}{4}\right)
With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} J_n(x)
is computed.
**Plots**
.. literalinclude :: /plots/besselj.py
.. image :: /plots/besselj.png
.. literalinclude :: /plots/besselj_c.py
.. image :: /plots/besselj_c.png
**Examples**
Evaluation is supported for arbitrary arguments, and at
arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> besselj(2, 1000)
-0.024777229528606
>>> besselj(4, 0.75)
0.000801070086542314
>>> besselj(2, 1000j)
(-2.48071721019185e+432 + 6.41567059811949e-437j)
>>> mp.dps = 25
>>> besselj(0.75j, 3+4j)
(-2.778118364828153309919653 - 1.5863603889018621585533j)
>>> mp.dps = 50
>>> besselj(1, pi)
0.28461534317975275734531059968613140570981118184947
Arguments may be large::
>>> mp.dps = 25
>>> besselj(0, 10000)
-0.007096160353388801477265164
>>> besselj(0, 10**10)
0.000002175591750246891726859055
>>> besselj(2, 10**100)
7.337048736538615712436929e-51
>>> besselj(2, 10**5*j)
(-3.540725411970948860173735e+43426 + 4.4949812409615803110051e-43433j)
The Bessel functions of the first kind satisfy simple
symmetries around `x = 0`::
>>> mp.dps = 15
>>> nprint([besselj(n,0) for n in range(5)])
[1.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint([besselj(n,pi) for n in range(5)])
[-0.304242, 0.284615, 0.485434, 0.333458, 0.151425]
>>> nprint([besselj(n,-pi) for n in range(5)])
[-0.304242, -0.284615, 0.485434, -0.333458, 0.151425]
Roots of Bessel functions are often used::
>>> nprint([findroot(j0, k) for k in [2, 5, 8, 11, 14]])
[2.40483, 5.52008, 8.65373, 11.7915, 14.9309]
>>> nprint([findroot(j1, k) for k in [3, 7, 10, 13, 16]])
[3.83171, 7.01559, 10.1735, 13.3237, 16.4706]
The roots are not periodic, but the distance between successive
roots asymptotically approaches `2 \pi`. Bessel functions of
the first kind have the following normalization::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
For `n = 1/2` or `n = -1/2`, the Bessel function reduces to a
trigonometric function::
>>> x = 10
>>> besselj(0.5, x), sqrt(2/(pi*x))*sin(x)
(-0.13726373575505, -0.13726373575505)
>>> besselj(-0.5, x), sqrt(2/(pi*x))*cos(x)
(-0.211708866331398, -0.211708866331398)
Derivatives of any order can be computed (negative orders
correspond to integration)::
>>> mp.dps = 25
>>> besselj(0, 7.5, 1)
-0.1352484275797055051822405
>>> diff(lambda x: besselj(0,x), 7.5)
-0.1352484275797055051822405
>>> besselj(0, 7.5, 10)
-0.1377811164763244890135677
>>> diff(lambda x: besselj(0,x), 7.5, 10)
-0.1377811164763244890135677
>>> besselj(0,7.5,-1) - besselj(0,3.5,-1)
-0.1241343240399987693521378
>>> quad(j0, [3.5, 7.5])
-0.1241343240399987693521378
Differentiation with a noninteger order gives the fractional derivative
in the sense of the Riemann-Liouville differintegral, as computed by
:func:`~mpmath.differint`::
>>> mp.dps = 15
>>> besselj(1, 3.5, 0.75)
-0.385977722939384
>>> differint(lambda x: besselj(1, x), 3.5, 0.75)
-0.385977722939384
"""
besseli = r"""
``besseli(n, x, derivative=0)`` gives the modified Bessel function of the
first kind,
.. math ::
I_n(x) = i^{-n} J_n(ix).
With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} I_n(x)
is computed.
**Plots**
.. literalinclude :: /plots/besseli.py
.. image :: /plots/besseli.png
.. literalinclude :: /plots/besseli_c.py
.. image :: /plots/besseli_c.png
**Examples**
Some values of `I_n(x)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besseli(0,0)
1.0
>>> besseli(1,0)
0.0
>>> besseli(0,1)
1.266065877752008335598245
>>> besseli(3.5, 2+3j)
(-0.2904369752642538144289025 - 0.4469098397654815837307006j)
Arguments may be large::
>>> besseli(2, 1000)
2.480717210191852440616782e+432
>>> besseli(2, 10**10)
4.299602851624027900335391e+4342944813
>>> besseli(2, 6000+10000j)
(-2.114650753239580827144204e+2603 + 4.385040221241629041351886e+2602j)
For integers `n`, the following integral representation holds::
>>> mp.dps = 15
>>> n = 3
>>> x = 2.3
>>> quad(lambda t: exp(x*cos(t))*cos(n*t), [0,pi])/pi
0.349223221159309
>>> besseli(n,x)
0.349223221159309
Derivatives and antiderivatives of any order can be computed::
>>> mp.dps = 25
>>> besseli(2, 7.5, 1)
195.8229038931399062565883
>>> diff(lambda x: besseli(2,x), 7.5)
195.8229038931399062565883
>>> besseli(2, 7.5, 10)
153.3296508971734525525176
>>> diff(lambda x: besseli(2,x), 7.5, 10)
153.3296508971734525525176
>>> besseli(2,7.5,-1) - besseli(2,3.5,-1)
202.5043900051930141956876
>>> quad(lambda x: besseli(2,x), [3.5, 7.5])
202.5043900051930141956876
"""
bessely = r"""
``bessely(n, x, derivative=0)`` gives the Bessel function of the second kind,
.. math ::
Y_n(x) = \frac{J_n(x) \cos(\pi n) - J_{-n}(x)}{\sin(\pi n)}.
For `n` an integer, this formula should be understood as a
limit. With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} Y_n(x)
is computed.
**Plots**
.. literalinclude :: /plots/bessely.py
.. image :: /plots/bessely.png
.. literalinclude :: /plots/bessely_c.py
.. image :: /plots/bessely_c.png
**Examples**
Some values of `Y_n(x)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> bessely(0,0), bessely(1,0), bessely(2,0)
(-inf, -inf, -inf)
>>> bessely(1, pi)
0.3588729167767189594679827
>>> bessely(0.5, 3+4j)
(9.242861436961450520325216 - 3.085042824915332562522402j)
Arguments may be large::
>>> bessely(0, 10000)
0.00364780555898660588668872
>>> bessely(2.5, 10**50)
-4.8952500412050989295774e-26
>>> bessely(2.5, -10**50)
(0.0 + 4.8952500412050989295774e-26j)
Derivatives and antiderivatives of any order can be computed::
>>> bessely(2, 3.5, 1)
0.3842618820422660066089231
>>> diff(lambda x: bessely(2, x), 3.5)
0.3842618820422660066089231
>>> bessely(0.5, 3.5, 1)
-0.2066598304156764337900417
>>> diff(lambda x: bessely(0.5, x), 3.5)
-0.2066598304156764337900417
>>> diff(lambda x: bessely(2, x), 0.5, 10)
-208173867409.5547350101511
>>> bessely(2, 0.5, 10)
-208173867409.5547350101511
>>> bessely(2, 100.5, 100)
0.02668487547301372334849043
>>> quad(lambda x: bessely(2,x), [1,3])
-1.377046859093181969213262
>>> bessely(2,3,-1) - bessely(2,1,-1)
-1.377046859093181969213262
"""
besselk = r"""
``besselk(n, x)`` gives the modified Bessel function of the
second kind,
.. math ::
K_n(x) = \frac{\pi}{2} \frac{I_{-n}(x)-I_{n}(x)}{\sin(\pi n)}
For `n` an integer, this formula should be understood as a
limit.
**Plots**
.. literalinclude :: /plots/besselk.py
.. image :: /plots/besselk.png
.. literalinclude :: /plots/besselk_c.py
.. image :: /plots/besselk_c.png
**Examples**
Evaluation is supported for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besselk(0,1)
0.4210244382407083333356274
>>> besselk(0, -1)
(0.4210244382407083333356274 - 3.97746326050642263725661j)
>>> besselk(3.5, 2+3j)
(-0.02090732889633760668464128 + 0.2464022641351420167819697j)
>>> besselk(2+3j, 0.5)
(0.9615816021726349402626083 + 0.1918250181801757416908224j)
Arguments may be large::
>>> besselk(0, 100)
4.656628229175902018939005e-45
>>> besselk(1, 10**6)
4.131967049321725588398296e-434298
>>> besselk(1, 10**6*j)
(0.001140348428252385844876706 - 0.0005200017201681152909000961j)
>>> besselk(4.5, fmul(10**50, j, exact=True))
(1.561034538142413947789221e-26 + 1.243554598118700063281496e-25j)
The point `x = 0` is a singularity (logarithmic if `n = 0`)::
>>> besselk(0,0)
+inf
>>> besselk(1,0)
+inf
>>> for n in range(-4, 5):
... print(besselk(n, '1e-1000'))
...
4.8e+4001
8.0e+3000
2.0e+2000
1.0e+1000
2302.701024509704096466802
1.0e+1000
2.0e+2000
8.0e+3000
4.8e+4001
"""
hankel1 = r"""
``hankel1(n,x)`` computes the Hankel function of the first kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(1)}(x) = J_n(x) + i Y_n(x).
**Plots**
.. literalinclude :: /plots/hankel1.py
.. image :: /plots/hankel1.png
.. literalinclude :: /plots/hankel1_c.py
.. image :: /plots/hankel1_c.png
**Examples**
The Hankel function is generally complex-valued::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hankel1(2, pi)
(0.4854339326315091097054957 - 0.0999007139290278787734903j)
>>> hankel1(3.5, pi)
(0.2340002029630507922628888 - 0.6419643823412927142424049j)
"""
hankel2 = r"""
``hankel2(n,x)`` computes the Hankel function of the second kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(2)}(x) = J_n(x) - i Y_n(x).
**Plots**
.. literalinclude :: /plots/hankel2.py
.. image :: /plots/hankel2.png
.. literalinclude :: /plots/hankel2_c.py
.. image :: /plots/hankel2_c.png
**Examples**
The Hankel function is generally complex-valued::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hankel2(2, pi)
(0.4854339326315091097054957 + 0.0999007139290278787734903j)
>>> hankel2(3.5, pi)
(0.2340002029630507922628888 + 0.6419643823412927142424049j)
"""
lambertw = r"""
The Lambert W function `W(z)` is defined as the inverse function
of `w \exp(w)`. In other words, the value of `W(z)` is such that
`z = W(z) \exp(W(z))` for any complex number `z`.
The Lambert W function is a multivalued function with infinitely
many branches `W_k(z)`, indexed by `k \in \mathbb{Z}`. Each branch
gives a different solution `w` of the equation `z = w \exp(w)`.
All branches are supported by :func:`~mpmath.lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real `z > -1/e`, and the
`k = -1` branch is real for `-1/e < z < 0`. All branches except
`k = 0` have a logarithmic singularity at `z = 0`.
The definition, implementation and choice of branches
is based on [Corless]_.
**Plots**
.. literalinclude :: /plots/lambertw.py
.. image :: /plots/lambertw.png
.. literalinclude :: /plots/lambertw_c.py
.. image :: /plots/lambertw_c.png
**Basic examples**
The Lambert W function is the inverse of `w \exp(w)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> w = lambertw(1)
>>> w
0.5671432904097838729999687
>>> w*exp(w)
1.0
Any branch gives a valid inverse::
>>> w = lambertw(1, k=3)
>>> w
(-2.853581755409037807206819 + 17.11353553941214591260783j)
>>> w = lambertw(1, k=25)
>>> w
(-5.047020464221569709378686 + 155.4763860949415867162066j)
>>> chop(w*exp(w))
1.0
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower `z^{z^{z^{\ldots}}}`::
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(mpf(0.5), 100)
0.6411857445049859844862005
>>> -lambertw(-log(0.5))/log(0.5)
0.6411857445049859844862005
**Properties**
The Lambert W function grows roughly like the natural logarithm
for large arguments::
>>> lambertw(1000); log(1000)
5.249602852401596227126056
6.907755278982137052053974
>>> lambertw(10**100); log(10**100)
224.8431064451185015393731
230.2585092994045684017991
The principal branch of the Lambert W function has a rational
Taylor series expansion around `z = 0`::
>>> nprint(taylor(lambertw, 0, 6), 10)
[0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8]
Some special values and limits are::
>>> lambertw(0)
0.0
>>> lambertw(1)
0.5671432904097838729999687
>>> lambertw(e)
1.0
>>> lambertw(inf)
+inf
>>> lambertw(0, k=-1)
-inf
>>> lambertw(0, k=3)
-inf
>>> lambertw(inf, k=2)
(+inf + 12.56637061435917295385057j)
>>> lambertw(inf, k=3)
(+inf + 18.84955592153875943077586j)
>>> lambertw(-inf, k=3)
(+inf + 21.9911485751285526692385j)
The `k = 0` and `k = -1` branches join at `z = -1/e` where
`W(z) = -1` for both branches. Since `-1/e` can only be represented
approximately with binary floating-point numbers, evaluating the
Lambert W function at this point only gives `-1` approximately::
>>> lambertw(-1/e, 0)
-0.9999999999998371330228251
>>> lambertw(-1/e, -1)
-1.000000000000162866977175
If `-1/e` happens to round in the negative direction, there might be
a small imaginary part::
>>> mp.dps = 15
>>> lambertw(-1/e)
(-1.0 + 8.22007971483662e-9j)
>>> lambertw(-1/e+eps)
-0.999999966242188
**References**
1. [Corless]_
"""
barnesg = r"""
Evaluates the Barnes G-function, which generalizes the
superfactorial (:func:`~mpmath.superfac`) and by extension also the
hyperfactorial (:func:`~mpmath.hyperfac`) to the complex numbers
in an analogous way to how the gamma function generalizes
the ordinary factorial.
The Barnes G-function may be defined in terms of a Weierstrass
product:
.. math ::
G(z+1) = (2\pi)^{z/2} e^{-[z(z+1)+\gamma z^2]/2}
\prod_{n=1}^\infty
\left[\left(1+\frac{z}{n}\right)^ne^{-z+z^2/(2n)}\right]
For positive integers `n`, we have have relation to superfactorials
`G(n) = \mathrm{sf}(n-2) = 0! \cdot 1! \cdots (n-2)!`.
**Examples**
Some elementary values and limits of the Barnes G-function::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> barnesg(1), barnesg(2), barnesg(3)
(1.0, 1.0, 1.0)
>>> barnesg(4)
2.0
>>> barnesg(5)
12.0
>>> barnesg(6)
288.0
>>> barnesg(7)
34560.0
>>> barnesg(8)
24883200.0
>>> barnesg(inf)
+inf
>>> barnesg(0), barnesg(-1), barnesg(-2)
(0.0, 0.0, 0.0)
Closed-form values are known for some rational arguments::
>>> barnesg('1/2')
0.603244281209446
>>> sqrt(exp(0.25+log(2)/12)/sqrt(pi)/glaisher**3)
0.603244281209446
>>> barnesg('1/4')
0.29375596533861
>>> nthroot(exp('3/8')/exp(catalan/pi)/
... gamma(0.25)**3/sqrt(glaisher)**9, 4)
0.29375596533861
The Barnes G-function satisfies the functional equation
`G(z+1) = \Gamma(z) G(z)`::
>>> z = pi
>>> barnesg(z+1)
2.39292119327948
>>> gamma(z)*barnesg(z)
2.39292119327948
The asymptotic growth rate of the Barnes G-function is related to
the Glaisher-Kinkelin constant::
>>> limit(lambda n: barnesg(n+1)/(n**(n**2/2-mpf(1)/12)*
... (2*pi)**(n/2)*exp(-3*n**2/4)), inf)
0.847536694177301
>>> exp('1/12')/glaisher
0.847536694177301
The Barnes G-function can be differentiated in closed form::
>>> z = 3
>>> diff(barnesg, z)
0.264507203401607
>>> barnesg(z)*((z-1)*psi(0,z)-z+(log(2*pi)+1)/2)
0.264507203401607
Evaluation is supported for arbitrary arguments and at arbitrary
precision::
>>> barnesg(6.5)
2548.7457695685
>>> barnesg(-pi)
0.00535976768353037
>>> barnesg(3+4j)
(-0.000676375932234244 - 4.42236140124728e-5j)
>>> mp.dps = 50
>>> barnesg(1/sqrt(2))
0.81305501090451340843586085064413533788206204124732
>>> q = barnesg(10j)
>>> q.real
0.000000000021852360840356557241543036724799812371995850552234
>>> q.imag
-0.00000000000070035335320062304849020654215545839053210041457588
>>> mp.dps = 15
>>> barnesg(100)
3.10361006263698e+6626
>>> barnesg(-101)
0.0
>>> barnesg(-10.5)
5.94463017605008e+25
>>> barnesg(-10000.5)
-6.14322868174828e+167480422
>>> barnesg(1000j)
(5.21133054865546e-1173597 + 4.27461836811016e-1173597j)
>>> barnesg(-1000+1000j)
(2.43114569750291e+1026623 + 2.24851410674842e+1026623j)
**References**
1. Whittaker & Watson, *A Course of Modern Analysis*,
Cambridge University Press, 4th edition (1927), p.264
2. http://en.wikipedia.org/wiki/Barnes_G-function
3. http://mathworld.wolfram.com/BarnesG-Function.html
"""
superfac = r"""
Computes the superfactorial, defined as the product of
consecutive factorials
.. math ::
\mathrm{sf}(n) = \prod_{k=1}^n k!
For general complex `z`, `\mathrm{sf}(z)` is defined
in terms of the Barnes G-function (see :func:`~mpmath.barnesg`).
**Examples**
The first few superfactorials are (OEIS A000178)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(10):
... print("%s %s" % (n, superfac(n)))
...
0 1.0
1 1.0
2 2.0
3 12.0
4 288.0
5 34560.0
6 24883200.0
7 125411328000.0
8 5.05658474496e+15
9 1.83493347225108e+21
Superfactorials grow very rapidly::
>>> superfac(1000)
3.24570818422368e+1177245
>>> superfac(10**10)
2.61398543581249e+467427913956904067453
Evaluation is supported for arbitrary arguments::
>>> mp.dps = 25
>>> superfac(pi)
17.20051550121297985285333
>>> superfac(2+3j)
(-0.005915485633199789627466468 + 0.008156449464604044948738263j)
>>> diff(superfac, 1)
0.2645072034016070205673056
**References**
1. http://oeis.org/A000178
"""
hyperfac = r"""
Computes the hyperfactorial, defined for integers as the product
.. math ::
H(n) = \prod_{k=1}^n k^k.
The hyperfactorial satisfies the recurrence formula `H(z) = z^z H(z-1)`.
It can be defined more generally in terms of the Barnes G-function (see
:func:`~mpmath.barnesg`) and the gamma function by the formula
.. math ::
H(z) = \frac{\Gamma(z+1)^z}{G(z)}.
The extension to complex numbers can also be done via
the integral representation
.. math ::
H(z) = (2\pi)^{-z/2} \exp \left[
{z+1 \choose 2} + \int_0^z \log(t!)\,dt
\right].
**Examples**
The rapidly-growing sequence of hyperfactorials begins
(OEIS A002109)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(10):
... print("%s %s" % (n, hyperfac(n)))
...
0 1.0
1 1.0
2 4.0
3 108.0
4 27648.0
5 86400000.0
6 4031078400000.0
7 3.3197663987712e+18
8 5.56964379417266e+25
9 2.15779412229419e+34
Some even larger hyperfactorials are::
>>> hyperfac(1000)
5.46458120882585e+1392926
>>> hyperfac(10**10)
4.60408207642219e+489142638002418704309
The hyperfactorial can be evaluated for arbitrary arguments::
>>> hyperfac(0.5)
0.880449235173423
>>> diff(hyperfac, 1)
0.581061466795327
>>> hyperfac(pi)
205.211134637462
>>> hyperfac(-10+1j)
(3.01144471378225e+46 - 2.45285242480185e+46j)
The recurrence property of the hyperfactorial holds
generally::
>>> z = 3-4*j
>>> hyperfac(z)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> z**z * hyperfac(z-1)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> z = mpf(-0.6)
>>> chop(z**z * hyperfac(z-1))
1.28170142849352
>>> hyperfac(z)
1.28170142849352
The hyperfactorial may also be computed using the integral
definition::
>>> z = 2.5
>>> hyperfac(z)
15.9842119922237
>>> (2*pi)**(-z/2)*exp(binomial(z+1,2) +
... quad(lambda t: loggamma(t+1), [0, z]))
15.9842119922237
:func:`~mpmath.hyperfac` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> hyperfac(10)
215779412229418562091680268288000000000000000.0
>>> hyperfac(1/sqrt(2))
0.89404818005227001975423476035729076375705084390942
**References**
1. http://oeis.org/A002109
2. http://mathworld.wolfram.com/Hyperfactorial.html
"""
rgamma = r"""
Computes the reciprocal of the gamma function, `1/\Gamma(z)`. This
function evaluates to zero at the poles
of the gamma function, `z = 0, -1, -2, \ldots`.
**Examples**
Basic examples::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> rgamma(1)
1.0
>>> rgamma(4)
0.1666666666666666666666667
>>> rgamma(0); rgamma(-1)
0.0
0.0
>>> rgamma(1000)
2.485168143266784862783596e-2565
>>> rgamma(inf)
0.0
A definite integral that can be evaluated in terms of elementary
integrals::
>>> quad(rgamma, [0,inf])
2.807770242028519365221501
>>> e + quad(lambda t: exp(-t)/(pi**2+log(t)**2), [0,inf])
2.807770242028519365221501
"""
loggamma = r"""
Computes the principal branch of the log-gamma function,
`\ln \Gamma(z)`. Unlike `\ln(\Gamma(z))`, which has infinitely many
complex branch cuts, the principal log-gamma function only has a single
branch cut along the negative half-axis. The principal branch
continuously matches the asymptotic Stirling expansion
.. math ::
\ln \Gamma(z) \sim \frac{\ln(2 \pi)}{2} +
\left(z-\frac{1}{2}\right) \ln(z) - z + O(z^{-1}).
The real parts of both functions agree, but their imaginary
parts generally differ by `2 n \pi` for some `n \in \mathbb{Z}`.
They coincide for `z \in \mathbb{R}, z > 0`.
Computationally, it is advantageous to use :func:`~mpmath.loggamma`
instead of :func:`~mpmath.gamma` for extremely large arguments.
**Examples**
Comparing with `\ln(\Gamma(z))`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> loggamma('13.2'); log(gamma('13.2'))
20.49400419456603678498394
20.49400419456603678498394
>>> loggamma(3+4j)
(-1.756626784603784110530604 + 4.742664438034657928194889j)
>>> log(gamma(3+4j))
(-1.756626784603784110530604 - 1.540520869144928548730397j)
>>> log(gamma(3+4j)) + 2*pi*j
(-1.756626784603784110530604 + 4.742664438034657928194889j)
Note the imaginary parts for negative arguments::
>>> loggamma(-0.5); loggamma(-1.5); loggamma(-2.5)
(1.265512123484645396488946 - 3.141592653589793238462643j)
(0.8600470153764810145109327 - 6.283185307179586476925287j)
(-0.05624371649767405067259453 - 9.42477796076937971538793j)
Some special values::
>>> loggamma(1); loggamma(2)
0.0
0.0
>>> loggamma(3); +ln2
0.6931471805599453094172321
0.6931471805599453094172321
>>> loggamma(3.5); log(15*sqrt(pi)/8)
1.200973602347074224816022
1.200973602347074224816022
>>> loggamma(inf)
+inf
Huge arguments are permitted::
>>> loggamma('1e30')
6.807755278982137052053974e+31
>>> loggamma('1e300')
6.897755278982137052053974e+302
>>> loggamma('1e3000')
6.906755278982137052053974e+3003
>>> loggamma('1e100000000000000000000')
2.302585092994045684007991e+100000000000000000020
>>> loggamma('1e30j')
(-1.570796326794896619231322e+30 + 6.807755278982137052053974e+31j)
>>> loggamma('1e300j')
(-1.570796326794896619231322e+300 + 6.897755278982137052053974e+302j)
>>> loggamma('1e3000j')
(-1.570796326794896619231322e+3000 + 6.906755278982137052053974e+3003j)
The log-gamma function can be integrated analytically
on any interval of unit length::
>>> z = 0
>>> quad(loggamma, [z,z+1]); log(2*pi)/2
0.9189385332046727417803297
0.9189385332046727417803297
>>> z = 3+4j
>>> quad(loggamma, [z,z+1]); (log(z)-1)*z + log(2*pi)/2
(-0.9619286014994750641314421 + 5.219637303741238195688575j)
(-0.9619286014994750641314421 + 5.219637303741238195688575j)
The derivatives of the log-gamma function are given by the
polygamma function (:func:`~mpmath.psi`)::
>>> diff(loggamma, -4+3j); psi(0, -4+3j)
(1.688493531222971393607153 + 2.554898911356806978892748j)
(1.688493531222971393607153 + 2.554898911356806978892748j)
>>> diff(loggamma, -4+3j, 2); psi(1, -4+3j)
(-0.1539414829219882371561038 - 0.1020485197430267719746479j)
(-0.1539414829219882371561038 - 0.1020485197430267719746479j)
The log-gamma function satisfies an additive form of the
recurrence relation for the ordinary gamma function::
>>> z = 2+3j
>>> loggamma(z); loggamma(z+1) - log(z)
(-2.092851753092733349564189 + 2.302396543466867626153708j)
(-2.092851753092733349564189 + 2.302396543466867626153708j)
"""
siegeltheta = r"""
Computes the Riemann-Siegel theta function,
.. math ::
\theta(t) = \frac{
\log\Gamma\left(\frac{1+2it}{4}\right) -
\log\Gamma\left(\frac{1-2it}{4}\right)
}{2i} - \frac{\log \pi}{2} t.
The Riemann-Siegel theta function is important in
providing the phase factor for the Z-function
(see :func:`~mpmath.siegelz`). Evaluation is supported for real and
complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> siegeltheta(0)
0.0
>>> siegeltheta(inf)
+inf
>>> siegeltheta(-inf)
-inf
>>> siegeltheta(1)
-1.767547952812290388302216
>>> siegeltheta(10+0.25j)
(-3.068638039426838572528867 + 0.05804937947429712998395177j)
Arbitrary derivatives may be computed with derivative = k
>>> siegeltheta(1234, derivative=2)
0.0004051864079114053109473741
>>> diff(siegeltheta, 1234, n=2)
0.0004051864079114053109473741
The Riemann-Siegel theta function has odd symmetry around `t = 0`,
two local extreme points and three real roots including 0 (located
symmetrically)::
>>> nprint(chop(taylor(siegeltheta, 0, 5)))
[0.0, -2.68609, 0.0, 2.69433, 0.0, -6.40218]
>>> findroot(diffun(siegeltheta), 7)
6.28983598883690277966509
>>> findroot(siegeltheta, 20)
17.84559954041086081682634
For large `t`, there is a famous asymptotic formula
for `\theta(t)`, to first order given by::
>>> t = mpf(10**6)
>>> siegeltheta(t)
5488816.353078403444882823
>>> -t*log(2*pi/t)/2-t/2
5488816.745777464310273645
"""
grampoint = r"""
Gives the `n`-th Gram point `g_n`, defined as the solution
to the equation `\theta(g_n) = \pi n` where `\theta(t)`
is the Riemann-Siegel theta function (:func:`~mpmath.siegeltheta`).
The first few Gram points are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> grampoint(0)
17.84559954041086081682634
>>> grampoint(1)
23.17028270124630927899664
>>> grampoint(2)
27.67018221781633796093849
>>> grampoint(3)
31.71797995476405317955149
Checking the definition::
>>> siegeltheta(grampoint(3))
9.42477796076937971538793
>>> 3*pi
9.42477796076937971538793
A large Gram point::
>>> grampoint(10**10)
3293531632.728335454561153
Gram points are useful when studying the Z-function
(:func:`~mpmath.siegelz`). See the documentation of that function
for additional examples.
:func:`~mpmath.grampoint` can solve the defining equation for
nonintegral `n`. There is a fixed point where `g(x) = x`::
>>> findroot(lambda x: grampoint(x) - x, 10000)
9146.698193171459265866198
**References**
1. http://mathworld.wolfram.com/GramPoint.html
"""
siegelz = r"""
Computes the Z-function, also known as the Riemann-Siegel Z function,
.. math ::
Z(t) = e^{i \theta(t)} \zeta(1/2+it)
where `\zeta(s)` is the Riemann zeta function (:func:`~mpmath.zeta`)
and where `\theta(t)` denotes the Riemann-Siegel theta function
(see :func:`~mpmath.siegeltheta`).
Evaluation is supported for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> siegelz(1)
-0.7363054628673177346778998
>>> siegelz(3+4j)
(-0.1852895764366314976003936 - 0.2773099198055652246992479j)
The first four derivatives are supported, using the
optional *derivative* keyword argument::
>>> siegelz(1234567, derivative=3)
56.89689348495089294249178
>>> diff(siegelz, 1234567, n=3)
56.89689348495089294249178
The Z-function has a Maclaurin expansion::
>>> nprint(chop(taylor(siegelz, 0, 4)))
[-1.46035, 0.0, 2.73588, 0.0, -8.39357]
The Z-function `Z(t)` is equal to `\pm |\zeta(s)|` on the
critical line `s = 1/2+it` (i.e. for real arguments `t`
to `Z`). Its zeros coincide with those of the Riemann zeta
function::
>>> findroot(siegelz, 14)
14.13472514173469379045725
>>> findroot(siegelz, 20)
21.02203963877155499262848
>>> findroot(zeta, 0.5+14j)
(0.5 + 14.13472514173469379045725j)
>>> findroot(zeta, 0.5+20j)
(0.5 + 21.02203963877155499262848j)
Since the Z-function is real-valued on the critical line
(and unlike `|\zeta(s)|` analytic), it is useful for
investigating the zeros of the Riemann zeta function.
For example, one can use a root-finding algorithm based
on sign changes::
>>> findroot(siegelz, [100, 200], solver='bisect')
176.4414342977104188888926
To locate roots, Gram points `g_n` which can be computed
by :func:`~mpmath.grampoint` are useful. If `(-1)^n Z(g_n)` is
positive for two consecutive `n`, then `Z(t)` must have
a zero between those points::
>>> g10 = grampoint(10)
>>> g11 = grampoint(11)
>>> (-1)**10 * siegelz(g10) > 0
True
>>> (-1)**11 * siegelz(g11) > 0
True
>>> findroot(siegelz, [g10, g11], solver='bisect')
56.44624769706339480436776
>>> g10, g11
(54.67523744685325626632663, 57.54516517954725443703014)
"""
riemannr = r"""
Evaluates the Riemann R function, a smooth approximation of the
prime counting function `\pi(x)` (see :func:`~mpmath.primepi`). The Riemann
R function gives a fast numerical approximation useful e.g. to
roughly estimate the number of primes in a given interval.
The Riemann R function is computed using the rapidly convergent Gram
series,
.. math ::
R(x) = 1 + \sum_{k=1}^{\infty}
\frac{\log^k x}{k k! \zeta(k+1)}.
From the Gram series, one sees that the Riemann R function is a
well-defined analytic function (except for a branch cut along
the negative real half-axis); it can be evaluated for arbitrary
real or complex arguments.
The Riemann R function gives a very accurate approximation
of the prime counting function. For example, it is wrong by at
most 2 for `x < 1000`, and for `x = 10^9` differs from the exact
value of `\pi(x)` by 79, or less than two parts in a million.
It is about 10 times more accurate than the logarithmic integral
estimate (see :func:`~mpmath.li`), which however is even faster to evaluate.
It is orders of magnitude more accurate than the extremely
fast `x/\log x` estimate.
**Examples**
For small arguments, the Riemann R function almost exactly
gives the prime counting function if rounded to the nearest
integer::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> primepi(50), riemannr(50)
(15, 14.9757023241462)
>>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(100))
1
>>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(300))
2
The Riemann R function can be evaluated for arguments far too large
for exact determination of `\pi(x)` to be computationally
feasible with any presently known algorithm::
>>> riemannr(10**30)
1.46923988977204e+28
>>> riemannr(10**100)
4.3619719871407e+97
>>> riemannr(10**1000)
4.3448325764012e+996
A comparison of the Riemann R function and logarithmic integral estimates
for `\pi(x)` using exact values of `\pi(10^n)` up to `n = 9`.
The fractional error is shown in parentheses::
>>> exact = [4,25,168,1229,9592,78498,664579,5761455,50847534]
>>> for n, p in enumerate(exact):
... n += 1
... r, l = riemannr(10**n), li(10**n)
... rerr, lerr = nstr((r-p)/p,3), nstr((l-p)/p,3)
... print("%i %i %s(%s) %s(%s)" % (n, p, r, rerr, l, lerr))
...
1 4 4.56458314100509(0.141) 6.1655995047873(0.541)
2 25 25.6616332669242(0.0265) 30.1261415840796(0.205)
3 168 168.359446281167(0.00214) 177.609657990152(0.0572)
4 1229 1226.93121834343(-0.00168) 1246.13721589939(0.0139)
5 9592 9587.43173884197(-0.000476) 9629.8090010508(0.00394)
6 78498 78527.3994291277(0.000375) 78627.5491594622(0.00165)
7 664579 664667.447564748(0.000133) 664918.405048569(0.000511)
8 5761455 5761551.86732017(1.68e-5) 5762209.37544803(0.000131)
9 50847534 50847455.4277214(-1.55e-6) 50849234.9570018(3.35e-5)
The derivative of the Riemann R function gives the approximate
probability for a number of magnitude `x` to be prime::
>>> diff(riemannr, 1000)
0.141903028110784
>>> mpf(primepi(1050) - primepi(950)) / 100
0.15
Evaluation is supported for arbitrary arguments and at arbitrary
precision::
>>> mp.dps = 30
>>> riemannr(7.5)
3.72934743264966261918857135136
>>> riemannr(-4+2j)
(-0.551002208155486427591793957644 + 2.16966398138119450043195899746j)
"""
primepi = r"""
Evaluates the prime counting function, `\pi(x)`, which gives
the number of primes less than or equal to `x`. The argument
`x` may be fractional.
The prime counting function is very expensive to evaluate
precisely for large `x`, and the present implementation is
not optimized in any way. For numerical approximation of the
prime counting function, it is better to use :func:`~mpmath.primepi2`
or :func:`~mpmath.riemannr`.
Some values of the prime counting function::
>>> from mpmath import *
>>> [primepi(k) for k in range(20)]
[0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8]
>>> primepi(3.5)
2
>>> primepi(100000)
9592
"""
primepi2 = r"""
Returns an interval (as an ``mpi`` instance) providing bounds
for the value of the prime counting function `\pi(x)`. For small
`x`, :func:`~mpmath.primepi2` returns an exact interval based on
the output of :func:`~mpmath.primepi`. For `x > 2656`, a loose interval
based on Schoenfeld's inequality
.. math ::
|\pi(x) - \mathrm{li}(x)| < \frac{\sqrt x \log x}{8 \pi}
is returned. This estimate is rigorous assuming the truth of
the Riemann hypothesis, and can be computed very quickly.
**Examples**
Exact values of the prime counting function for small `x`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> iv.dps = 15; iv.pretty = True
>>> primepi2(10)
[4.0, 4.0]
>>> primepi2(100)
[25.0, 25.0]
>>> primepi2(1000)
[168.0, 168.0]
Loose intervals are generated for moderately large `x`:
>>> primepi2(10000), primepi(10000)
([1209.0, 1283.0], 1229)
>>> primepi2(50000), primepi(50000)
([5070.0, 5263.0], 5133)
As `x` increases, the absolute error gets worse while the relative
error improves. The exact value of `\pi(10^{23})` is
1925320391606803968923, and :func:`~mpmath.primepi2` gives 9 significant
digits::
>>> p = primepi2(10**23)
>>> p
[1.9253203909477020467e+21, 1.925320392280406229e+21]
>>> mpf(p.delta) / mpf(p.a)
6.9219865355293e-10
A more precise, nonrigorous estimate for `\pi(x)` can be
obtained using the Riemann R function (:func:`~mpmath.riemannr`).
For large enough `x`, the value returned by :func:`~mpmath.primepi2`
essentially amounts to a small perturbation of the value returned by
:func:`~mpmath.riemannr`::
>>> primepi2(10**100)
[4.3619719871407024816e+97, 4.3619719871407032404e+97]
>>> riemannr(10**100)
4.3619719871407e+97
"""
primezeta = r"""
Computes the prime zeta function, which is defined
in analogy with the Riemann zeta function (:func:`~mpmath.zeta`)
as
.. math ::
P(s) = \sum_p \frac{1}{p^s}
where the sum is taken over all prime numbers `p`. Although
this sum only converges for `\mathrm{Re}(s) > 1`, the
function is defined by analytic continuation in the
half-plane `\mathrm{Re}(s) > 0`.
**Examples**
Arbitrary-precision evaluation for real and complex arguments is
supported::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> primezeta(2)
0.452247420041065498506543364832
>>> primezeta(pi)
0.15483752698840284272036497397
>>> mp.dps = 50
>>> primezeta(3)
0.17476263929944353642311331466570670097541212192615
>>> mp.dps = 20
>>> primezeta(3+4j)
(-0.12085382601645763295 - 0.013370403397787023602j)
The prime zeta function has a logarithmic pole at `s = 1`,
with residue equal to the difference of the Mertens and
Euler constants::
>>> primezeta(1)
+inf
>>> extradps(25)(lambda x: primezeta(1+x)+log(x))(+eps)
-0.31571845205389007685
>>> mertens-euler
-0.31571845205389007685
The analytic continuation to `0 < \mathrm{Re}(s) \le 1`
is implemented. In this strip the function exhibits
very complex behavior; on the unit interval, it has poles at
`1/n` for every squarefree integer `n`::
>>> primezeta(0.5) # Pole at s = 1/2
(-inf + 3.1415926535897932385j)
>>> primezeta(0.25)
(-1.0416106801757269036 + 0.52359877559829887308j)
>>> primezeta(0.5+10j)
(0.54892423556409790529 + 0.45626803423487934264j)
Although evaluation works in principle for any `\mathrm{Re}(s) > 0`,
it should be noted that the evaluation time increases exponentially
as `s` approaches the imaginary axis.
For large `\mathrm{Re}(s)`, `P(s)` is asymptotic to `2^{-s}`::
>>> primezeta(inf)
0.0
>>> primezeta(10), mpf(2)**-10
(0.00099360357443698021786, 0.0009765625)
>>> primezeta(1000)
9.3326361850321887899e-302
>>> primezeta(1000+1000j)
(-3.8565440833654995949e-302 - 8.4985390447553234305e-302j)
**References**
Carl-Erik Froberg, "On the prime zeta function",
BIT 8 (1968), pp. 187-202.
"""
bernpoly = r"""
Evaluates the Bernoulli polynomial `B_n(z)`.
The first few Bernoulli polynomials are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(6):
... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n)))
...
[1.0]
[-0.5, 1.0]
[0.166667, -1.0, 1.0]
[0.0, 0.5, -1.5, 1.0]
[-0.0333333, 0.0, 1.0, -2.0, 1.0]
[0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0]
At `z = 0`, the Bernoulli polynomial evaluates to a
Bernoulli number (see :func:`~mpmath.bernoulli`)::
>>> bernpoly(12, 0), bernoulli(12)
(-0.253113553113553, -0.253113553113553)
>>> bernpoly(13, 0), bernoulli(13)
(0.0, 0.0)
Evaluation is accurate for large `n` and small `z`::
>>> mp.dps = 25
>>> bernpoly(100, 0.5)
2.838224957069370695926416e+78
>>> bernpoly(1000, 10.5)
5.318704469415522036482914e+1769
"""
polylog = r"""
Computes the polylogarithm, defined by the sum
.. math ::
\mathrm{Li}_s(z) = \sum_{k=1}^{\infty} \frac{z^k}{k^s}.
This series is convergent only for `|z| < 1`, so elsewhere
the analytic continuation is implied.
The polylogarithm should not be confused with the logarithmic
integral (also denoted by Li or li), which is implemented
as :func:`~mpmath.li`.
**Examples**
The polylogarithm satisfies a huge number of functional identities.
A sample of polylogarithm evaluations is shown below::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> polylog(1,0.5), log(2)
(0.693147180559945, 0.693147180559945)
>>> polylog(2,0.5), (pi**2-6*log(2)**2)/12
(0.582240526465012, 0.582240526465012)
>>> polylog(2,-phi), -log(phi)**2-pi**2/10
(-1.21852526068613, -1.21852526068613)
>>> polylog(3,0.5), 7*zeta(3)/8-pi**2*log(2)/12+log(2)**3/6
(0.53721319360804, 0.53721319360804)
:func:`~mpmath.polylog` can evaluate the analytic continuation of the
polylogarithm when `s` is an integer::
>>> polylog(2, 10)
(0.536301287357863 - 7.23378441241546j)
>>> polylog(2, -10)
-4.1982778868581
>>> polylog(2, 10j)
(-3.05968879432873 + 3.71678149306807j)
>>> polylog(-2, 10)
-0.150891632373114
>>> polylog(-2, -10)
0.067618332081142
>>> polylog(-2, 10j)
(0.0384353698579347 + 0.0912451798066779j)
Some more examples, with arguments on the unit circle (note that
the series definition cannot be used for computation here)::
>>> polylog(2,j)
(-0.205616758356028 + 0.915965594177219j)
>>> j*catalan-pi**2/48
(-0.205616758356028 + 0.915965594177219j)
>>> polylog(3,exp(2*pi*j/3))
(-0.534247512515375 + 0.765587078525922j)
>>> -4*zeta(3)/9 + 2*j*pi**3/81
(-0.534247512515375 + 0.765587078525921j)
Polylogarithms of different order are related by integration
and differentiation::
>>> s, z = 3, 0.5
>>> polylog(s+1, z)
0.517479061673899
>>> quad(lambda t: polylog(s,t)/t, [0, z])
0.517479061673899
>>> z*diff(lambda t: polylog(s+2,t), z)
0.517479061673899
Taylor series expansions around `z = 0` are::
>>> for n in range(-3, 4):
... nprint(taylor(lambda x: polylog(n,x), 0, 5))
...
[0.0, 1.0, 8.0, 27.0, 64.0, 125.0]
[0.0, 1.0, 4.0, 9.0, 16.0, 25.0]
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0]
[0.0, 1.0, 0.5, 0.333333, 0.25, 0.2]
[0.0, 1.0, 0.25, 0.111111, 0.0625, 0.04]
[0.0, 1.0, 0.125, 0.037037, 0.015625, 0.008]
The series defining the polylogarithm is simultaneously
a Taylor series and an L-series. For certain values of `z`, the
polylogarithm reduces to a pure zeta function::
>>> polylog(pi, 1), zeta(pi)
(1.17624173838258, 1.17624173838258)
>>> polylog(pi, -1), -altzeta(pi)
(-0.909670702980385, -0.909670702980385)
Evaluation for arbitrary, nonintegral `s` is supported
for `z` within the unit circle:
>>> polylog(3+4j, 0.25)
(0.24258605789446 - 0.00222938275488344j)
>>> nsum(lambda k: 0.25**k / k**(3+4j), [1,inf])
(0.24258605789446 - 0.00222938275488344j)
It is also currently supported outside of the unit circle for `z`
not too large in magnitude::
>>> polylog(1+j, 20+40j)
(-7.1421172179728 - 3.92726697721369j)
>>> polylog(1+j, 200+400j)
Traceback (most recent call last):
...
NotImplementedError: polylog for arbitrary s and z
**References**
1. Richard Crandall, "Note on fast polylogarithm computation"
http://people.reed.edu/~crandall/papers/Polylog.pdf
2. http://en.wikipedia.org/wiki/Polylogarithm
3. http://mathworld.wolfram.com/Polylogarithm.html
"""
bell = r"""
For `n` a nonnegative integer, ``bell(n,x)`` evaluates the Bell
polynomial `B_n(x)`, the first few of which are
.. math ::
B_0(x) = 1
B_1(x) = x
B_2(x) = x^2+x
B_3(x) = x^3+3x^2+x
If `x = 1` or :func:`~mpmath.bell` is called with only one argument, it
gives the `n`-th Bell number `B_n`, which is the number of
partitions of a set with `n` elements. By setting the precision to
at least `\log_{10} B_n` digits, :func:`~mpmath.bell` provides fast
calculation of exact Bell numbers.
In general, :func:`~mpmath.bell` computes
.. math ::
B_n(x) = e^{-x} \left(\mathrm{sinc}(\pi n) + E_n(x)\right)
where `E_n(x)` is the generalized exponential function implemented
by :func:`~mpmath.polyexp`. This is an extension of Dobinski's formula [1],
where the modification is the sinc term ensuring that `B_n(x)` is
continuous in `n`; :func:`~mpmath.bell` can thus be evaluated,
differentiated, etc for arbitrary complex arguments.
**Examples**
Simple evaluations::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> bell(0, 2.5)
1.0
>>> bell(1, 2.5)
2.5
>>> bell(2, 2.5)
8.75
Evaluation for arbitrary complex arguments::
>>> bell(5.75+1j, 2-3j)
(-10767.71345136587098445143 - 15449.55065599872579097221j)
The first few Bell polynomials::
>>> for k in range(7):
... nprint(taylor(lambda x: bell(k,x), 0, k))
...
[1.0]
[0.0, 1.0]
[0.0, 1.0, 1.0]
[0.0, 1.0, 3.0, 1.0]
[0.0, 1.0, 7.0, 6.0, 1.0]
[0.0, 1.0, 15.0, 25.0, 10.0, 1.0]
[0.0, 1.0, 31.0, 90.0, 65.0, 15.0, 1.0]
The first few Bell numbers and complementary Bell numbers::
>>> [int(bell(k)) for k in range(10)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147]
>>> [int(bell(k,-1)) for k in range(10)]
[1, -1, 0, 1, 1, -2, -9, -9, 50, 267]
Large Bell numbers::
>>> mp.dps = 50
>>> bell(50)
185724268771078270438257767181908917499221852770.0
>>> bell(50,-1)
-29113173035759403920216141265491160286912.0
Some even larger values::
>>> mp.dps = 25
>>> bell(1000,-1)
-1.237132026969293954162816e+1869
>>> bell(1000)
2.989901335682408421480422e+1927
>>> bell(1000,2)
6.591553486811969380442171e+1987
>>> bell(1000,100.5)
9.101014101401543575679639e+2529
A determinant identity satisfied by Bell numbers::
>>> mp.dps = 15
>>> N = 8
>>> det([[bell(k+j) for j in range(N)] for k in range(N)])
125411328000.0
>>> superfac(N-1)
125411328000.0
**References**
1. http://mathworld.wolfram.com/DobinskisFormula.html
"""
polyexp = r"""
Evaluates the polyexponential function, defined for arbitrary
complex `s`, `z` by the series
.. math ::
E_s(z) = \sum_{k=1}^{\infty} \frac{k^s}{k!} z^k.
`E_s(z)` is constructed from the exponential function analogously
to how the polylogarithm is constructed from the ordinary
logarithm; as a function of `s` (with `z` fixed), `E_s` is an L-series
It is an entire function of both `s` and `z`.
The polyexponential function provides a generalization of the
Bell polynomials `B_n(x)` (see :func:`~mpmath.bell`) to noninteger orders `n`.
In terms of the Bell polynomials,
.. math ::
E_s(z) = e^z B_s(z) - \mathrm{sinc}(\pi s).
Note that `B_n(x)` and `e^{-x} E_n(x)` are identical if `n`
is a nonzero integer, but not otherwise. In particular, they differ
at `n = 0`.
**Examples**
Evaluating a series::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> nsum(lambda k: sqrt(k)/fac(k), [1,inf])
2.101755547733791780315904
>>> polyexp(0.5,1)
2.101755547733791780315904
Evaluation for arbitrary arguments::
>>> polyexp(-3-4j, 2.5+2j)
(2.351660261190434618268706 + 1.202966666673054671364215j)
Evaluation is accurate for tiny function values::
>>> polyexp(4, -100)
3.499471750566824369520223e-36
If `n` is a nonpositive integer, `E_n` reduces to a special
instance of the hypergeometric function `\,_pF_q`::
>>> n = 3
>>> x = pi
>>> polyexp(-n,x)
4.042192318847986561771779
>>> x*hyper([1]*(n+1), [2]*(n+1), x)
4.042192318847986561771779
"""
cyclotomic = r"""
Evaluates the cyclotomic polynomial `\Phi_n(x)`, defined by
.. math ::
\Phi_n(x) = \prod_{\zeta} (x - \zeta)
where `\zeta` ranges over all primitive `n`-th roots of unity
(see :func:`~mpmath.unitroots`). An equivalent representation, used
for computation, is
.. math ::
\Phi_n(x) = \prod_{d\mid n}(x^d-1)^{\mu(n/d)} = \Phi_n(x)
where `\mu(m)` denotes the Moebius function. The cyclotomic
polynomials are integer polynomials, the first of which can be
written explicitly as
.. math ::
\Phi_0(x) = 1
\Phi_1(x) = x - 1
\Phi_2(x) = x + 1
\Phi_3(x) = x^3 + x^2 + 1
\Phi_4(x) = x^2 + 1
\Phi_5(x) = x^4 + x^3 + x^2 + x + 1
\Phi_6(x) = x^2 - x + 1
**Examples**
The coefficients of low-order cyclotomic polynomials can be recovered
using Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(9):
... p = chop(taylor(lambda x: cyclotomic(n,x), 0, 10))
... print("%s %s" % (n, nstr(p[:10+1-p[::-1].index(1)])))
...
0 [1.0]
1 [-1.0, 1.0]
2 [1.0, 1.0]
3 [1.0, 1.0, 1.0]
4 [1.0, 0.0, 1.0]
5 [1.0, 1.0, 1.0, 1.0, 1.0]
6 [1.0, -1.0, 1.0]
7 [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
8 [1.0, 0.0, 0.0, 0.0, 1.0]
The definition as a product over primitive roots may be checked
by computing the product explicitly (for a real argument, this
method will generally introduce numerical noise in the imaginary
part)::
>>> mp.dps = 25
>>> z = 3+4j
>>> cyclotomic(10, z)
(-419.0 - 360.0j)
>>> fprod(z-r for r in unitroots(10, primitive=True))
(-419.0 - 360.0j)
>>> z = 3
>>> cyclotomic(10, z)
61.0
>>> fprod(z-r for r in unitroots(10, primitive=True))
(61.0 - 3.146045605088568607055454e-25j)
Up to permutation, the roots of a given cyclotomic polynomial
can be checked to agree with the list of primitive roots::
>>> p = taylor(lambda x: cyclotomic(6,x), 0, 6)[:3]
>>> for r in polyroots(p[::-1]):
... print(r)
...
(0.5 - 0.8660254037844386467637232j)
(0.5 + 0.8660254037844386467637232j)
>>>
>>> for r in unitroots(6, primitive=True):
... print(r)
...
(0.5 + 0.8660254037844386467637232j)
(0.5 - 0.8660254037844386467637232j)
"""
meijerg = r"""
Evaluates the Meijer G-function, defined as
.. math ::
G^{m,n}_{p,q} \left( \left. \begin{matrix}
a_1, \dots, a_n ; a_{n+1} \dots a_p \\
b_1, \dots, b_m ; b_{m+1} \dots b_q
\end{matrix}\; \right| \; z ; r \right) =
\frac{1}{2 \pi i} \int_L
\frac{\prod_{j=1}^m \Gamma(b_j+s) \prod_{j=1}^n\Gamma(1-a_j-s)}
{\prod_{j=n+1}^{p}\Gamma(a_j+s) \prod_{j=m+1}^q \Gamma(1-b_j-s)}
z^{-s/r} ds
for an appropriate choice of the contour `L` (see references).
There are `p` elements `a_j`.
The argument *a_s* should be a pair of lists, the first containing the
`n` elements `a_1, \ldots, a_n` and the second containing
the `p-n` elements `a_{n+1}, \ldots a_p`.
There are `q` elements `b_j`.
The argument *b_s* should be a pair of lists, the first containing the
`m` elements `b_1, \ldots, b_m` and the second containing
the `q-m` elements `b_{m+1}, \ldots b_q`.
The implicit tuple `(m, n, p, q)` constitutes the order or degree of the
Meijer G-function, and is determined by the lengths of the coefficient
vectors. Confusingly, the indices in this tuple appear in a different order
from the coefficients, but this notation is standard. The many examples
given below should hopefully clear up any potential confusion.
**Algorithm**
The Meijer G-function is evaluated as a combination of hypergeometric series.
There are two versions of the function, which can be selected with
the optional *series* argument.
*series=1* uses a sum of `m` `\,_pF_{q-1}` functions of `z`
*series=2* uses a sum of `n` `\,_qF_{p-1}` functions of `1/z`
The default series is chosen based on the degree and `|z|` in order
to be consistent with Mathematica's. This definition of the Meijer G-function
has a discontinuity at `|z| = 1` for some orders, which can
be avoided by explicitly specifying a series.
Keyword arguments are forwarded to :func:`~mpmath.hypercomb`.
**Examples**
Many standard functions are special cases of the Meijer G-function
(possibly rescaled and/or with branch cut corrections). We define
some test parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a = mpf(0.75)
>>> b = mpf(1.5)
>>> z = mpf(2.25)
The exponential function:
`e^z = G^{1,0}_{0,1} \left( \left. \begin{matrix} - \\ 0 \end{matrix} \;
\right| \; -z \right)`
>>> meijerg([[],[]], [[0],[]], -z)
9.487735836358525720550369
>>> exp(z)
9.487735836358525720550369
The natural logarithm:
`\log(1+z) = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 0
\end{matrix} \; \right| \; -z \right)`
>>> meijerg([[1,1],[]], [[1],[0]], z)
1.178654996341646117219023
>>> log(1+z)
1.178654996341646117219023
A rational function:
`\frac{z}{z+1} = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 1
\end{matrix} \; \right| \; z \right)`
>>> meijerg([[1,1],[]], [[1],[1]], z)
0.6923076923076923076923077
>>> z/(z+1)
0.6923076923076923076923077
The sine and cosine functions:
`\frac{1}{\sqrt \pi} \sin(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
- \\ \frac{1}{2}, 0 \end{matrix} \; \right| \; z \right)`
`\frac{1}{\sqrt \pi} \cos(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
- \\ 0, \frac{1}{2} \end{matrix} \; \right| \; z \right)`
>>> meijerg([[],[]], [[0.5],[0]], (z/2)**2)
0.4389807929218676682296453
>>> sin(z)/sqrt(pi)
0.4389807929218676682296453
>>> meijerg([[],[]], [[0],[0.5]], (z/2)**2)
-0.3544090145996275423331762
>>> cos(z)/sqrt(pi)
-0.3544090145996275423331762
Bessel functions:
`J_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; z \right)`
`Y_a(2 \sqrt z) = G^{2,0}_{1,3} \left( \left.
\begin{matrix} \frac{-a-1}{2} \\ \frac{a}{2}, -\frac{a}{2}, \frac{-a-1}{2}
\end{matrix} \; \right| \; z \right)`
`(-z)^{a/2} z^{-a/2} I_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; -z \right)`
`2 K_a(2 \sqrt z) = G^{2,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; z \right)`
As the example with the Bessel *I* function shows, a branch
factor is required for some arguments when inverting the square root.
>>> meijerg([[],[]], [[a/2],[-a/2]], (z/2)**2)
0.5059425789597154858527264
>>> besselj(a,z)
0.5059425789597154858527264
>>> meijerg([[],[(-a-1)/2]], [[a/2,-a/2],[(-a-1)/2]], (z/2)**2)
0.1853868950066556941442559
>>> bessely(a, z)
0.1853868950066556941442559
>>> meijerg([[],[]], [[a/2],[-a/2]], -(z/2)**2)
(0.8685913322427653875717476 + 2.096964974460199200551738j)
>>> (-z)**(a/2) / z**(a/2) * besseli(a, z)
(0.8685913322427653875717476 + 2.096964974460199200551738j)
>>> 0.5*meijerg([[],[]], [[a/2,-a/2],[]], (z/2)**2)
0.09334163695597828403796071
>>> besselk(a,z)
0.09334163695597828403796071
Error functions:
`\sqrt{\pi} z^{2(a-1)} \mathrm{erfc}(z) = G^{2,0}_{1,2} \left( \left.
\begin{matrix} a \\ a-1, a-\frac{1}{2}
\end{matrix} \; \right| \; z, \frac{1}{2} \right)`
>>> meijerg([[],[a]], [[a-1,a-0.5],[]], z, 0.5)
0.00172839843123091957468712
>>> sqrt(pi) * z**(2*a-2) * erfc(z)
0.00172839843123091957468712
A Meijer G-function of higher degree, (1,1,2,3):
>>> meijerg([[a],[b]], [[a],[b,a-1]], z)
1.55984467443050210115617
>>> sin((b-a)*pi)/pi*(exp(z)-1)*z**(a-1)
1.55984467443050210115617
A Meijer G-function of still higher degree, (4,1,2,4), that can
be expanded as a messy combination of exponential integrals:
>>> meijerg([[a],[2*b-a]], [[b,a,b-0.5,-1-a+2*b],[]], z)
0.3323667133658557271898061
>>> chop(4**(a-b+1)*sqrt(pi)*gamma(2*b-2*a)*z**a*\
... expint(2*b-2*a, -2*sqrt(-z))*expint(2*b-2*a, 2*sqrt(-z)))
0.3323667133658557271898061
In the following case, different series give different values::
>>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2))
-0.06417628097442437076207337
>>> meijerg([[1],[0.25]],[[3],[0.5]],-2,series=1)
0.1428699426155117511873047
>>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2,series=2))
-0.06417628097442437076207337
**References**
1. http://en.wikipedia.org/wiki/Meijer_G-function
2. http://mathworld.wolfram.com/MeijerG-Function.html
3. http://functions.wolfram.com/HypergeometricFunctions/MeijerG/
4. http://functions.wolfram.com/HypergeometricFunctions/MeijerG1/
"""
clsin = r"""
Computes the Clausen sine function, defined formally by the series
.. math ::
\mathrm{Cl}_s(z) = \sum_{k=1}^{\infty} \frac{\sin(kz)}{k^s}.
The special case `\mathrm{Cl}_2(z)` (i.e. ``clsin(2,z)``) is the classical
"Clausen function". More generally, the Clausen function is defined for
complex `s` and `z`, even when the series does not converge. The
Clausen function is related to the polylogarithm (:func:`~mpmath.polylog`) as
.. math ::
\mathrm{Cl}_s(z) = \frac{1}{2i}\left(\mathrm{Li}_s\left(e^{iz}\right) -
\mathrm{Li}_s\left(e^{-iz}\right)\right)
= \mathrm{Im}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}),
and this representation can be taken to provide the analytic continuation of the
series. The complementary function :func:`~mpmath.clcos` gives the corresponding
cosine sum.
**Examples**
Evaluation for arbitrarily chosen `s` and `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> s, z = 3, 4
>>> clsin(s, z); nsum(lambda k: sin(z*k)/k**s, [1,inf])
-0.6533010136329338746275795
-0.6533010136329338746275795
Using `z + \pi` instead of `z` gives an alternating series::
>>> clsin(s, z+pi)
0.8860032351260589402871624
>>> nsum(lambda k: (-1)**k*sin(z*k)/k**s, [1,inf])
0.8860032351260589402871624
With `s = 1`, the sum can be expressed in closed form
using elementary functions::
>>> z = 1 + sqrt(3)
>>> clsin(1, z)
0.2047709230104579724675985
>>> chop((log(1-exp(-j*z)) - log(1-exp(j*z)))/(2*j))
0.2047709230104579724675985
>>> nsum(lambda k: sin(k*z)/k, [1,inf])
0.2047709230104579724675985
The classical Clausen function `\mathrm{Cl}_2(\theta)` gives the
value of the integral `\int_0^{\theta} -\ln(2\sin(x/2)) dx` for
`0 < \theta < 2 \pi`::
>>> cl2 = lambda t: clsin(2, t)
>>> cl2(3.5)
-0.2465045302347694216534255
>>> -quad(lambda x: ln(2*sin(0.5*x)), [0, 3.5])
-0.2465045302347694216534255
This function is symmetric about `\theta = \pi` with zeros and extreme
points::
>>> cl2(0); cl2(pi/3); chop(cl2(pi)); cl2(5*pi/3); chop(cl2(2*pi))
0.0
1.014941606409653625021203
0.0
-1.014941606409653625021203
0.0
Catalan's constant is a special value::
>>> cl2(pi/2)
0.9159655941772190150546035
>>> +catalan
0.9159655941772190150546035
The Clausen sine function can be expressed in closed form when
`s` is an odd integer (becoming zero when `s` < 0)::
>>> z = 1 + sqrt(2)
>>> clsin(1, z); (pi-z)/2
0.3636895456083490948304773
0.3636895456083490948304773
>>> clsin(3, z); pi**2/6*z - pi*z**2/4 + z**3/12
0.5661751584451144991707161
0.5661751584451144991707161
>>> clsin(-1, z)
0.0
>>> clsin(-3, z)
0.0
It can also be expressed in closed form for even integer `s \le 0`,
providing a finite sum for series such as
`\sin(z) + \sin(2z) + \sin(3z) + \ldots`::
>>> z = 1 + sqrt(2)
>>> clsin(0, z)
0.1903105029507513881275865
>>> cot(z/2)/2
0.1903105029507513881275865
>>> clsin(-2, z)
-0.1089406163841548817581392
>>> -cot(z/2)*csc(z/2)**2/4
-0.1089406163841548817581392
Call with ``pi=True`` to multiply `z` by `\pi` exactly::
>>> clsin(3, 3*pi)
-8.892316224968072424732898e-26
>>> clsin(3, 3, pi=True)
0.0
Evaluation for complex `s`, `z` in a nonconvergent case::
>>> s, z = -1-j, 1+2j
>>> clsin(s, z)
(-0.593079480117379002516034 + 0.9038644233367868273362446j)
>>> extraprec(20)(nsum)(lambda k: sin(k*z)/k**s, [1,inf])
(-0.593079480117379002516034 + 0.9038644233367868273362446j)
"""
clcos = r"""
Computes the Clausen cosine function, defined formally by the series
.. math ::
\mathrm{\widetilde{Cl}}_s(z) = \sum_{k=1}^{\infty} \frac{\cos(kz)}{k^s}.
This function is complementary to the Clausen sine function
:func:`~mpmath.clsin`. In terms of the polylogarithm,
.. math ::
\mathrm{\widetilde{Cl}}_s(z) =
\frac{1}{2}\left(\mathrm{Li}_s\left(e^{iz}\right) +
\mathrm{Li}_s\left(e^{-iz}\right)\right)
= \mathrm{Re}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}).
**Examples**
Evaluation for arbitrarily chosen `s` and `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> s, z = 3, 4
>>> clcos(s, z); nsum(lambda k: cos(z*k)/k**s, [1,inf])
-0.6518926267198991308332759
-0.6518926267198991308332759
Using `z + \pi` instead of `z` gives an alternating series::
>>> s, z = 3, 0.5
>>> clcos(s, z+pi)
-0.8155530586502260817855618
>>> nsum(lambda k: (-1)**k*cos(z*k)/k**s, [1,inf])
-0.8155530586502260817855618
With `s = 1`, the sum can be expressed in closed form
using elementary functions::
>>> z = 1 + sqrt(3)
>>> clcos(1, z)
-0.6720334373369714849797918
>>> chop(-0.5*(log(1-exp(j*z))+log(1-exp(-j*z))))
-0.6720334373369714849797918
>>> -log(abs(2*sin(0.5*z))) # Equivalent to above when z is real
-0.6720334373369714849797918
>>> nsum(lambda k: cos(k*z)/k, [1,inf])
-0.6720334373369714849797918
It can also be expressed in closed form when `s` is an even integer.
For example,
>>> clcos(2,z)
-0.7805359025135583118863007
>>> pi**2/6 - pi*z/2 + z**2/4
-0.7805359025135583118863007
The case `s = 0` gives the renormalized sum of
`\cos(z) + \cos(2z) + \cos(3z) + \ldots` (which happens to be the same for
any value of `z`)::
>>> clcos(0, z)
-0.5
>>> nsum(lambda k: cos(k*z), [1,inf])
-0.5
Also the sums
.. math ::
\cos(z) + 2\cos(2z) + 3\cos(3z) + \ldots
and
.. math ::
\cos(z) + 2^n \cos(2z) + 3^n \cos(3z) + \ldots
for higher integer powers `n = -s` can be done in closed form. They are zero
when `n` is positive and even (`s` negative and even)::
>>> clcos(-1, z); 1/(2*cos(z)-2)
-0.2607829375240542480694126
-0.2607829375240542480694126
>>> clcos(-3, z); (2+cos(z))*csc(z/2)**4/8
0.1472635054979944390848006
0.1472635054979944390848006
>>> clcos(-2, z); clcos(-4, z); clcos(-6, z)
0.0
0.0
0.0
With `z = \pi`, the series reduces to that of the Riemann zeta function
(more generally, if `z = p \pi/q`, it is a finite sum over Hurwitz zeta
function values)::
>>> clcos(2.5, 0); zeta(2.5)
1.34148725725091717975677
1.34148725725091717975677
>>> clcos(2.5, pi); -altzeta(2.5)
-0.8671998890121841381913472
-0.8671998890121841381913472
Call with ``pi=True`` to multiply `z` by `\pi` exactly::
>>> clcos(-3, 2*pi)
2.997921055881167659267063e+102
>>> clcos(-3, 2, pi=True)
0.008333333333333333333333333
Evaluation for complex `s`, `z` in a nonconvergent case::
>>> s, z = -1-j, 1+2j
>>> clcos(s, z)
(0.9407430121562251476136807 + 0.715826296033590204557054j)
>>> extraprec(20)(nsum)(lambda k: cos(k*z)/k**s, [1,inf])
(0.9407430121562251476136807 + 0.715826296033590204557054j)
"""
whitm = r"""
Evaluates the Whittaker function `M(k,m,z)`, which gives a solution
to the Whittaker differential equation
.. math ::
\frac{d^2f}{dz^2} + \left(-\frac{1}{4}+\frac{k}{z}+
\frac{(\frac{1}{4}-m^2)}{z^2}\right) f = 0.
A second solution is given by :func:`~mpmath.whitw`.
The Whittaker functions are defined in Abramowitz & Stegun, section 13.1.
They are alternate forms of the confluent hypergeometric functions
`\,_1F_1` and `U`:
.. math ::
M(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
\,_1F_1(\tfrac{1}{2}+m-k, 1+2m, z)
W(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
U(\tfrac{1}{2}+m-k, 1+2m, z).
**Examples**
Evaluation for arbitrary real and complex arguments is supported::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> whitm(1, 1, 1)
0.7302596799460411820509668
>>> whitm(1, 1, -1)
(0.0 - 1.417977827655098025684246j)
>>> whitm(j, j/2, 2+3j)
(3.245477713363581112736478 - 0.822879187542699127327782j)
>>> whitm(2, 3, 100000)
4.303985255686378497193063e+21707
Evaluation at zero::
>>> whitm(1,-1,0); whitm(1,-0.5,0); whitm(1,0,0)
+inf
nan
0.0
We can verify that :func:`~mpmath.whitm` numerically satisfies the
differential equation for arbitrarily chosen values::
>>> k = mpf(0.25)
>>> m = mpf(1.5)
>>> f = lambda z: whitm(k,m,z)
>>> for z in [-1, 2.5, 3, 1+2j]:
... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
...
0.0
0.0
0.0
0.0
An integral involving both :func:`~mpmath.whitm` and :func:`~mpmath.whitw`,
verifying evaluation along the real axis::
>>> quad(lambda x: exp(-x)*whitm(3,2,x)*whitw(1,-2,x), [0,inf])
3.438869842576800225207341
>>> 128/(21*sqrt(pi))
3.438869842576800225207341
"""
whitw = r"""
Evaluates the Whittaker function `W(k,m,z)`, which gives a second
solution to the Whittaker differential equation. (See :func:`~mpmath.whitm`.)
**Examples**
Evaluation for arbitrary real and complex arguments is supported::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> whitw(1, 1, 1)
1.19532063107581155661012
>>> whitw(1, 1, -1)
(-0.9424875979222187313924639 - 0.2607738054097702293308689j)
>>> whitw(j, j/2, 2+3j)
(0.1782899315111033879430369 - 0.01609578360403649340169406j)
>>> whitw(2, 3, 100000)
1.887705114889527446891274e-21705
>>> whitw(-1, -1, 100)
1.905250692824046162462058e-24
Evaluation at zero::
>>> for m in [-1, -0.5, 0, 0.5, 1]:
... whitw(1, m, 0)
...
+inf
nan
0.0
nan
+inf
We can verify that :func:`~mpmath.whitw` numerically satisfies the
differential equation for arbitrarily chosen values::
>>> k = mpf(0.25)
>>> m = mpf(1.5)
>>> f = lambda z: whitw(k,m,z)
>>> for z in [-1, 2.5, 3, 1+2j]:
... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
...
0.0
0.0
0.0
0.0
"""
ber = r"""
Computes the Kelvin function ber, which for real arguments gives the real part
of the Bessel J function of a rotated argument
.. math ::
J_n\left(x e^{3\pi i/4}\right) = \mathrm{ber}_n(x) + i \mathrm{bei}_n(x).
The imaginary part is given by :func:`~mpmath.bei`.
**Plots**
.. literalinclude :: /plots/ber.py
.. image :: /plots/ber.png
**Examples**
Verifying the defining relation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> n, x = 2, 3.5
>>> ber(n,x)
1.442338852571888752631129
>>> bei(n,x)
-0.948359035324558320217678
>>> besselj(n, x*root(1,8,3))
(1.442338852571888752631129 - 0.948359035324558320217678j)
The ber and bei functions are also defined by analytic continuation
for complex arguments::
>>> ber(1+j, 2+3j)
(4.675445984756614424069563 - 15.84901771719130765656316j)
>>> bei(1+j, 2+3j)
(15.83886679193707699364398 + 4.684053288183046528703611j)
"""
bei = r"""
Computes the Kelvin function bei, which for real arguments gives the
imaginary part of the Bessel J function of a rotated argument.
See :func:`~mpmath.ber`.
"""
ker = r"""
Computes the Kelvin function ker, which for real arguments gives the real part
of the (rescaled) Bessel K function of a rotated argument
.. math ::
e^{-\pi i/2} K_n\left(x e^{3\pi i/4}\right) = \mathrm{ker}_n(x) + i \mathrm{kei}_n(x).
The imaginary part is given by :func:`~mpmath.kei`.
**Plots**
.. literalinclude :: /plots/ker.py
.. image :: /plots/ker.png
**Examples**
Verifying the defining relation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> n, x = 2, 4.5
>>> ker(n,x)
0.02542895201906369640249801
>>> kei(n,x)
-0.02074960467222823237055351
>>> exp(-n*pi*j/2) * besselk(n, x*root(1,8,1))
(0.02542895201906369640249801 - 0.02074960467222823237055351j)
The ker and kei functions are also defined by analytic continuation
for complex arguments::
>>> ker(1+j, 3+4j)
(1.586084268115490421090533 - 2.939717517906339193598719j)
>>> kei(1+j, 3+4j)
(-2.940403256319453402690132 - 1.585621643835618941044855j)
"""
kei = r"""
Computes the Kelvin function kei, which for real arguments gives the
imaginary part of the (rescaled) Bessel K function of a rotated argument.
See :func:`~mpmath.ker`.
"""
struveh = r"""
Gives the Struve function
.. math ::
\,\mathbf{H}_n(z) =
\sum_{k=0}^\infty \frac{(-1)^k}{\Gamma(k+\frac{3}{2})
\Gamma(k+n+\frac{3}{2})} {\left({\frac{z}{2}}\right)}^{2k+n+1}
which is a solution to the Struve differential equation
.. math ::
z^2 f''(z) + z f'(z) + (z^2-n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
**Examples**
Evaluation for arbitrary real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> struveh(0, 3.5)
0.3608207733778295024977797
>>> struveh(-1, 10)
-0.255212719726956768034732
>>> struveh(1, -100.5)
0.5819566816797362287502246
>>> struveh(2.5, 10000000000000)
3153915652525200060.308937
>>> struveh(2.5, -10000000000000)
(0.0 - 3153915652525200060.308937j)
>>> struveh(1+j, 1000000+4000000j)
(-3.066421087689197632388731e+1737173 - 1.596619701076529803290973e+1737173j)
A Struve function of half-integer order is elementary; for example:
>>> z = 3
>>> struveh(0.5, 3)
0.9167076867564138178671595
>>> sqrt(2/(pi*z))*(1-cos(z))
0.9167076867564138178671595
Numerically verifying the differential equation::
>>> z = mpf(4.5)
>>> n = 3
>>> f = lambda z: struveh(n,z)
>>> lhs = z**2*diff(f,z,2) + z*diff(f,z) + (z**2-n**2)*f(z)
>>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
>>> lhs
17.40359302709875496632744
>>> rhs
17.40359302709875496632744
"""
struvel = r"""
Gives the modified Struve function
.. math ::
\,\mathbf{L}_n(z) = -i e^{-n\pi i/2} \mathbf{H}_n(i z)
which solves to the modified Struve differential equation
.. math ::
z^2 f''(z) + z f'(z) - (z^2+n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
**Examples**
Evaluation for arbitrary real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> struvel(0, 3.5)
7.180846515103737996249972
>>> struvel(-1, 10)
2670.994904980850550721511
>>> struvel(1, -100.5)
1.757089288053346261497686e+42
>>> struvel(2.5, 10000000000000)
4.160893281017115450519948e+4342944819025
>>> struvel(2.5, -10000000000000)
(0.0 - 4.160893281017115450519948e+4342944819025j)
>>> struvel(1+j, 700j)
(-0.1721150049480079451246076 + 0.1240770953126831093464055j)
>>> struvel(1+j, 1000000+4000000j)
(-2.973341637511505389128708e+434290 - 5.164633059729968297147448e+434290j)
Numerically verifying the differential equation::
>>> z = mpf(3.5)
>>> n = 3
>>> f = lambda z: struvel(n,z)
>>> lhs = z**2*diff(f,z,2) + z*diff(f,z) - (z**2+n**2)*f(z)
>>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
>>> lhs
6.368850306060678353018165
>>> rhs
6.368850306060678353018165
"""
appellf1 = r"""
Gives the Appell F1 hypergeometric function of two variables,
.. math ::
F_1(a,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c)_{m+n}}
\frac{x^m y^n}{m! n!}.
This series is only generally convergent when `|x| < 1` and `|y| < 1`,
although :func:`~mpmath.appellf1` can evaluate an analytic continuation
with respecto to either variable, and sometimes both.
**Examples**
Evaluation is supported for real and complex parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf1(1,0,0.5,1,0.5,0.25)
1.154700538379251529018298
>>> appellf1(1,1+j,0.5,1,0.5,0.5j)
(1.138403860350148085179415 + 1.510544741058517621110615j)
For some integer parameters, the F1 series reduces to a polynomial::
>>> appellf1(2,-4,-3,1,2,5)
-816.0
>>> appellf1(-5,1,2,1,4,5)
-20528.0
The analytic continuation with respect to either `x` or `y`,
and sometimes with respect to both, can be evaluated::
>>> appellf1(2,3,4,5,100,0.5)
(0.0006231042714165329279738662 + 0.0000005769149277148425774499857j)
>>> appellf1('1.1', '0.3', '0.2+2j', '0.4', '0.2', 1.5+3j)
(-0.1782604566893954897128702 + 0.002472407104546216117161499j)
>>> appellf1(1,2,3,4,10,12)
-0.07122993830066776374929313
For certain arguments, F1 reduces to an ordinary hypergeometric function::
>>> appellf1(1,2,3,5,0.5,0.25)
1.547902270302684019335555
>>> 4*hyp2f1(1,2,5,'1/3')/3
1.547902270302684019335555
>>> appellf1(1,2,3,4,0,1.5)
(-1.717202506168937502740238 - 2.792526803190927323077905j)
>>> hyp2f1(1,3,4,1.5)
(-1.717202506168937502740238 - 2.792526803190927323077905j)
The F1 function satisfies a system of partial differential equations::
>>> a,b1,b2,c,x,y = map(mpf, [1,0.5,0.25,1.125,0.25,-0.25])
>>> F = lambda x,y: appellf1(a,b1,b2,c,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
... y*(1-x)*diff(F,(x,y),(1,1)) +
... (c-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
... b1*y*diff(F,(x,y),(0,1)) -
... a*b1*F(x,y))
0.0
>>>
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
... x*(1-y)*diff(F,(x,y),(1,1)) +
... (c-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
... b2*x*diff(F,(x,y),(1,0)) -
... a*b2*F(x,y))
0.0
The Appell F1 function allows for closed-form evaluation of various
integrals, such as any integral of the form
`\int x^r (x+a)^p (x+b)^q dx`::
>>> def integral(a,b,p,q,r,x1,x2):
... a,b,p,q,r,x1,x2 = map(mpmathify, [a,b,p,q,r,x1,x2])
... f = lambda x: x**r * (x+a)**p * (x+b)**q
... def F(x):
... v = x**(r+1)/(r+1) * (a+x)**p * (b+x)**q
... v *= (1+x/a)**(-p)
... v *= (1+x/b)**(-q)
... v *= appellf1(r+1,-p,-q,2+r,-x/a,-x/b)
... return v
... print("Num. quad: %s" % quad(f, [x1,x2]))
... print("Appell F1: %s" % (F(x2)-F(x1)))
...
>>> integral('1/5','4/3','-2','3','1/2',0,1)
Num. quad: 9.073335358785776206576981
Appell F1: 9.073335358785776206576981
>>> integral('3/2','4/3','-2','3','1/2',0,1)
Num. quad: 1.092829171999626454344678
Appell F1: 1.092829171999626454344678
>>> integral('3/2','4/3','-2','3','1/2',12,25)
Num. quad: 1106.323225040235116498927
Appell F1: 1106.323225040235116498927
Also incomplete elliptic integrals fall into this category [1]::
>>> def E(z, m):
... if (pi/2).ae(z):
... return ellipe(m)
... return 2*round(re(z)/pi)*ellipe(m) + mpf(-1)**round(re(z)/pi)*\
... sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
...
>>> z, m = 1, 0.5
>>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
0.9273298836244400669659042
0.9273298836244400669659042
>>> z, m = 3, 2
>>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
(1.057495752337234229715836 + 1.198140234735592207439922j)
(1.057495752337234229715836 + 1.198140234735592207439922j)
**References**
1. [WolframFunctions]_ http://functions.wolfram.com/EllipticIntegrals/EllipticE2/26/01/
2. [SrivastavaKarlsson]_
3. [CabralRosetti]_
4. [Vidunas]_
5. [Slater]_
"""
angerj = r"""
Gives the Anger function
.. math ::
\mathbf{J}_{\nu}(z) = \frac{1}{\pi}
\int_0^{\pi} \cos(\nu t - z \sin t) dt
which is an entire function of both the parameter `\nu` and
the argument `z`. It solves the inhomogeneous Bessel differential
equation
.. math ::
f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
= \frac{(z-\nu)}{\pi z^2} \sin(\pi \nu).
**Examples**
Evaluation for real and complex parameter and argument::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> angerj(2,3)
0.4860912605858910769078311
>>> angerj(-3+4j, 2+5j)
(-5033.358320403384472395612 + 585.8011892476145118551756j)
>>> angerj(3.25, 1e6j)
(4.630743639715893346570743e+434290 - 1.117960409887505906848456e+434291j)
>>> angerj(-1.5, 1e6)
0.0002795719747073879393087011
The Anger function coincides with the Bessel J-function when `\nu`
is an integer::
>>> angerj(1,3); besselj(1,3)
0.3390589585259364589255146
0.3390589585259364589255146
>>> angerj(1.5,3); besselj(1.5,3)
0.4088969848691080859328847
0.4777182150870917715515015
Verifying the differential equation::
>>> v,z = mpf(2.25), 0.75
>>> f = lambda z: angerj(v,z)
>>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
-0.6002108774380707130367995
>>> (z-v)/(pi*z**2) * sinpi(v)
-0.6002108774380707130367995
Verifying the integral representation::
>>> angerj(v,z)
0.1145380759919333180900501
>>> quad(lambda t: cos(v*t-z*sin(t))/pi, [0,pi])
0.1145380759919333180900501
**References**
1. [DLMF]_ section 11.10: Anger-Weber Functions
"""
webere = r"""
Gives the Weber function
.. math ::
\mathbf{E}_{\nu}(z) = \frac{1}{\pi}
\int_0^{\pi} \sin(\nu t - z \sin t) dt
which is an entire function of both the parameter `\nu` and
the argument `z`. It solves the inhomogeneous Bessel differential
equation
.. math ::
f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
= -\frac{1}{\pi z^2} (z+\nu+(z-\nu)\cos(\pi \nu)).
**Examples**
Evaluation for real and complex parameter and argument::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> webere(2,3)
-0.1057668973099018425662646
>>> webere(-3+4j, 2+5j)
(-585.8081418209852019290498 - 5033.314488899926921597203j)
>>> webere(3.25, 1e6j)
(-1.117960409887505906848456e+434291 - 4.630743639715893346570743e+434290j)
>>> webere(3.25, 1e6)
-0.00002812518265894315604914453
Up to addition of a rational function of `z`, the Weber function coincides
with the Struve H-function when `\nu` is an integer::
>>> webere(1,3); 2/pi-struveh(1,3)
-0.3834897968188690177372881
-0.3834897968188690177372881
>>> webere(5,3); 26/(35*pi)-struveh(5,3)
0.2009680659308154011878075
0.2009680659308154011878075
Verifying the differential equation::
>>> v,z = mpf(2.25), 0.75
>>> f = lambda z: webere(v,z)
>>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
-1.097441848875479535164627
>>> -(z+v+(z-v)*cospi(v))/(pi*z**2)
-1.097441848875479535164627
Verifying the integral representation::
>>> webere(v,z)
0.1486507351534283744485421
>>> quad(lambda t: sin(v*t-z*sin(t))/pi, [0,pi])
0.1486507351534283744485421
**References**
1. [DLMF]_ section 11.10: Anger-Weber Functions
"""
lommels1 = r"""
Gives the Lommel function `s_{\mu,\nu}` or `s^{(1)}_{\mu,\nu}`
.. math ::
s_{\mu,\nu}(z) = \frac{z^{\mu+1}}{(\mu-\nu+1)(\mu+\nu+1)}
\,_1F_2\left(1; \frac{\mu-\nu+3}{2}, \frac{\mu+\nu+3}{2};
-\frac{z^2}{4} \right)
which solves the inhomogeneous Bessel equation
.. math ::
z^2 f''(z) + z f'(z) + (z^2-\nu^2) f(z) = z^{\mu+1}.
A second solution is given by :func:`~mpmath.lommels2`.
**Plots**
.. literalinclude :: /plots/lommels1.py
.. image :: /plots/lommels1.png
**Examples**
An integral representation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> u,v,z = 0.25, 0.125, mpf(0.75)
>>> lommels1(u,v,z)
0.4276243877565150372999126
>>> (bessely(v,z)*quad(lambda t: t**u*besselj(v,t), [0,z]) - \
... besselj(v,z)*quad(lambda t: t**u*bessely(v,t), [0,z]))*(pi/2)
0.4276243877565150372999126
A special value::
>>> lommels1(v,v,z)
0.5461221367746048054932553
>>> gamma(v+0.5)*sqrt(pi)*power(2,v-1)*struveh(v,z)
0.5461221367746048054932553
Verifying the differential equation::
>>> f = lambda z: lommels1(u,v,z)
>>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
0.6979536443265746992059141
>>> z**(u+1)
0.6979536443265746992059141
**References**
1. [GradshteynRyzhik]_
2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
"""
lommels2 = r"""
Gives the second Lommel function `S_{\mu,\nu}` or `s^{(2)}_{\mu,\nu}`
.. math ::
S_{\mu,\nu}(z) = s_{\mu,\nu}(z) + 2^{\mu-1}
\Gamma\left(\tfrac{1}{2}(\mu-\nu+1)\right)
\Gamma\left(\tfrac{1}{2}(\mu+\nu+1)\right) \times
\left[\sin(\tfrac{1}{2}(\mu-\nu)\pi) J_{\nu}(z) -
\cos(\tfrac{1}{2}(\mu-\nu)\pi) Y_{\nu}(z)
\right]
which solves the same differential equation as
:func:`~mpmath.lommels1`.
**Plots**
.. literalinclude :: /plots/lommels2.py
.. image :: /plots/lommels2.png
**Examples**
For large `|z|`, `S_{\mu,\nu} \sim z^{\mu-1}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> lommels2(10,2,30000)
1.968299831601008419949804e+40
>>> power(30000,9)
1.9683e+40
A special value::
>>> u,v,z = 0.5, 0.125, mpf(0.75)
>>> lommels2(v,v,z)
0.9589683199624672099969765
>>> (struveh(v,z)-bessely(v,z))*power(2,v-1)*sqrt(pi)*gamma(v+0.5)
0.9589683199624672099969765
Verifying the differential equation::
>>> f = lambda z: lommels2(u,v,z)
>>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
0.6495190528383289850727924
>>> z**(u+1)
0.6495190528383289850727924
**References**
1. [GradshteynRyzhik]_
2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
"""
appellf2 = r"""
Gives the Appell F2 hypergeometric function of two variables
.. math ::
F_2(a,b_1,b_2,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c_1)_m (c_2)_n}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for `|x| + |y| < 1`.
**Examples**
Evaluation for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf2(1,2,3,4,5,0.25,0.125)
1.257417193533135344785602
>>> appellf2(1,-3,-4,2,3,2,3)
-42.8
>>> appellf2(0.5,0.25,-0.25,2,3,0.25j,0.25)
(0.9880539519421899867041719 + 0.01497616165031102661476978j)
>>> chop(appellf2(1,1+j,1-j,3j,-3j,0.25,0.25))
1.201311219287411337955192
>>> appellf2(1,1,1,4,6,0.125,16)
(-0.09455532250274744282125152 - 0.7647282253046207836769297j)
A transformation formula::
>>> a,b1,b2,c1,c2,x,y = map(mpf, [1,2,0.5,0.25,1.625,-0.125,0.125])
>>> appellf2(a,b1,b2,c1,c2,x,y)
0.2299211717841180783309688
>>> (1-x)**(-a)*appellf2(a,c1-b1,b2,c1,c2,x/(x-1),y/(1-x))
0.2299211717841180783309688
A system of partial differential equations satisfied by F2::
>>> a,b1,b2,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,1.5,0.0625,-0.0625])
>>> F = lambda x,y: appellf2(a,b1,b2,c1,c2,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
... x*y*diff(F,(x,y),(1,1)) +
... (c1-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
... b1*y*diff(F,(x,y),(0,1)) -
... a*b1*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
... x*y*diff(F,(x,y),(1,1)) +
... (c2-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
... b2*x*diff(F,(x,y),(1,0)) -
... a*b2*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
appellf3 = r"""
Gives the Appell F3 hypergeometric function of two variables
.. math ::
F_3(a_1,a_2,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a_1)_m (a_2)_n (b_1)_m (b_2)_n}{(c)_{m+n}}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for `|x| < 1, |y| < 1`.
**Examples**
Evaluation for various parameters and variables::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf3(1,2,3,4,5,0.5,0.25)
2.221557778107438938158705
>>> appellf3(1,2,3,4,5,6,0); hyp2f1(1,3,5,6)
(-0.5189554589089861284537389 - 0.1454441043328607980769742j)
(-0.5189554589089861284537389 - 0.1454441043328607980769742j)
>>> appellf3(1,-2,-3,1,1,4,6)
-17.4
>>> appellf3(1,2,-3,1,1,4,6)
(17.7876136773677356641825 + 19.54768762233649126154534j)
>>> appellf3(1,2,-3,1,1,6,4)
(85.02054175067929402953645 + 148.4402528821177305173599j)
>>> chop(appellf3(1+j,2,1-j,2,3,0.25,0.25))
1.719992169545200286696007
Many transformations and evaluations for special combinations
of the parameters are possible, e.g.:
>>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
>>> appellf3(a,c-a,b,c-b,c,x,y)
1.093432340896087107444363
>>> (1-y)**(a+b-c)*hyp2f1(a,b,c,x+y-x*y)
1.093432340896087107444363
>>> x**2*appellf3(1,1,1,1,3,x,-x)
0.01568646277445385390945083
>>> polylog(2,x**2)
0.01568646277445385390945083
>>> a1,a2,b1,b2,c,x = map(mpf, [0.5,0.25,0.125,0.5,4.25,0.125])
>>> appellf3(a1,a2,b1,b2,c,x,1)
1.03947361709111140096947
>>> gammaprod([c,c-a2-b2],[c-a2,c-b2])*hyp3f2(a1,b1,c-a2-b2,c-a2,c-b2,x)
1.03947361709111140096947
The Appell F3 function satisfies a pair of partial
differential equations::
>>> a1,a2,b1,b2,c,x,y = map(mpf, [0.5,0.25,0.125,0.5,0.625,0.0625,-0.0625])
>>> F = lambda x,y: appellf3(a1,a2,b1,b2,c,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
... y*diff(F,(x,y),(1,1)) +
... (c-(a1+b1+1)*x)*diff(F,(x,y),(1,0)) -
... a1*b1*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
... x*diff(F,(x,y),(1,1)) +
... (c-(a2+b2+1)*y)*diff(F,(x,y),(0,1)) -
... a2*b2*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
appellf4 = r"""
Gives the Appell F4 hypergeometric function of two variables
.. math ::
F_4(a,b,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b)_{m+n}}{(c_1)_m (c_2)_n}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for
`\sqrt{|x|} + \sqrt{|y|} < 1`.
**Examples**
Evaluation for various parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf4(1,1,2,2,0.25,0.125)
1.286182069079718313546608
>>> appellf4(-2,-3,4,5,4,5)
34.8
>>> appellf4(5,4,2,3,0.25j,-0.125j)
(-0.2585967215437846642163352 + 2.436102233553582711818743j)
Reduction to `\,_2F_1` in a special case::
>>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
>>> appellf4(a,b,c,a+b-c+1,x*(1-y),y*(1-x))
1.129143488466850868248364
>>> hyp2f1(a,b,c,x)*hyp2f1(a,b,a+b-c+1,y)
1.129143488466850868248364
A system of partial differential equations satisfied by F4::
>>> a,b,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,0.0625,-0.0625])
>>> F = lambda x,y: appellf4(a,b,c1,c2,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
... y**2*diff(F,(x,y),(0,2)) -
... 2*x*y*diff(F,(x,y),(1,1)) +
... (c1-(a+b+1)*x)*diff(F,(x,y),(1,0)) -
... ((a+b+1)*y)*diff(F,(x,y),(0,1)) -
... a*b*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
... x**2*diff(F,(x,y),(2,0)) -
... 2*x*y*diff(F,(x,y),(1,1)) +
... (c2-(a+b+1)*y)*diff(F,(x,y),(0,1)) -
... ((a+b+1)*x)*diff(F,(x,y),(1,0)) -
... a*b*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
zeta = r"""
Computes the Riemann zeta function
.. math ::
\zeta(s) = 1+\frac{1}{2^s}+\frac{1}{3^s}+\frac{1}{4^s}+\ldots
or, with `a \ne 1`, the more general Hurwitz zeta function
.. math ::
\zeta(s,a) = \sum_{k=0}^\infty \frac{1}{(a+k)^s}.
Optionally, ``zeta(s, a, n)`` computes the `n`-th derivative with
respect to `s`,
.. math ::
\zeta^{(n)}(s,a) = (-1)^n \sum_{k=0}^\infty \frac{\log^n(a+k)}{(a+k)^s}.
Although these series only converge for `\Re(s) > 1`, the Riemann and Hurwitz
zeta functions are defined through analytic continuation for arbitrary
complex `s \ne 1` (`s = 1` is a pole).
The implementation uses three algorithms: the Borwein algorithm for
the Riemann zeta function when `s` is close to the real line;
the Riemann-Siegel formula for the Riemann zeta function when `s` is
large imaginary, and Euler-Maclaurin summation in all other cases.
The reflection formula for `\Re(s) < 0` is implemented in some cases.
The algorithm can be chosen with ``method = 'borwein'``,
``method='riemann-siegel'`` or ``method = 'euler-maclaurin'``.
The parameter `a` is usually a rational number `a = p/q`, and may be specified
as such by passing an integer tuple `(p, q)`. Evaluation is supported for
arbitrary complex `a`, but may be slow and/or inaccurate when `\Re(s) < 0` for
nonrational `a` or when computing derivatives.
**Examples**
Some values of the Riemann zeta function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> zeta(2); pi**2 / 6
1.644934066848226436472415
1.644934066848226436472415
>>> zeta(0)
-0.5
>>> zeta(-1)
-0.08333333333333333333333333
>>> zeta(-2)
0.0
For large positive `s`, `\zeta(s)` rapidly approaches 1::
>>> zeta(50)
1.000000000000000888178421
>>> zeta(100)
1.0
>>> zeta(inf)
1.0
>>> 1-sum((zeta(k)-1)/k for k in range(2,85)); +euler
0.5772156649015328606065121
0.5772156649015328606065121
>>> nsum(lambda k: zeta(k)-1, [2, inf])
1.0
Evaluation is supported for complex `s` and `a`:
>>> zeta(-3+4j)
(-0.03373057338827757067584698 + 0.2774499251557093745297677j)
>>> zeta(2+3j, -1+j)
(389.6841230140842816370741 + 295.2674610150305334025962j)
The Riemann zeta function has so-called nontrivial zeros on
the critical line `s = 1/2 + it`::
>>> findroot(zeta, 0.5+14j); zetazero(1)
(0.5 + 14.13472514173469379045725j)
(0.5 + 14.13472514173469379045725j)
>>> findroot(zeta, 0.5+21j); zetazero(2)
(0.5 + 21.02203963877155499262848j)
(0.5 + 21.02203963877155499262848j)
>>> findroot(zeta, 0.5+25j); zetazero(3)
(0.5 + 25.01085758014568876321379j)
(0.5 + 25.01085758014568876321379j)
>>> chop(zeta(zetazero(10)))
0.0
Evaluation on and near the critical line is supported for large
heights `t` by means of the Riemann-Siegel formula (currently
for `a = 1`, `n \le 4`)::
>>> zeta(0.5+100000j)
(1.073032014857753132114076 + 5.780848544363503984261041j)
>>> zeta(0.75+1000000j)
(0.9535316058375145020351559 + 0.9525945894834273060175651j)
>>> zeta(0.5+10000000j)
(11.45804061057709254500227 - 8.643437226836021723818215j)
>>> zeta(0.5+100000000j, derivative=1)
(51.12433106710194942681869 + 43.87221167872304520599418j)
>>> zeta(0.5+100000000j, derivative=2)
(-444.2760822795430400549229 - 896.3789978119185981665403j)
>>> zeta(0.5+100000000j, derivative=3)
(3230.72682687670422215339 + 14374.36950073615897616781j)
>>> zeta(0.5+100000000j, derivative=4)
(-11967.35573095046402130602 - 218945.7817789262839266148j)
>>> zeta(1+10000000j) # off the line
(2.859846483332530337008882 + 0.491808047480981808903986j)
>>> zeta(1+10000000j, derivative=1)
(-4.333835494679647915673205 - 0.08405337962602933636096103j)
>>> zeta(1+10000000j, derivative=4)
(453.2764822702057701894278 - 581.963625832768189140995j)
For investigation of the zeta function zeros, the Riemann-Siegel
Z-function is often more convenient than working with the Riemann
zeta function directly (see :func:`~mpmath.siegelz`).
Some values of the Hurwitz zeta function::
>>> zeta(2, 3); -5./4 + pi**2/6
0.3949340668482264364724152
0.3949340668482264364724152
>>> zeta(2, (3,4)); pi**2 - 8*catalan
2.541879647671606498397663
2.541879647671606498397663
For positive integer values of `s`, the Hurwitz zeta function is
equivalent to a polygamma function (except for a normalizing factor)::
>>> zeta(4, (1,5)); psi(3, '1/5')/6
625.5408324774542966919938
625.5408324774542966919938
Evaluation of derivatives::
>>> zeta(0, 3+4j, 1); loggamma(3+4j) - ln(2*pi)/2
(-2.675565317808456852310934 + 4.742664438034657928194889j)
(-2.675565317808456852310934 + 4.742664438034657928194889j)
>>> zeta(2, 1, 20)
2432902008176640000.000242
>>> zeta(3+4j, 5.5+2j, 4)
(-0.140075548947797130681075 - 0.3109263360275413251313634j)
>>> zeta(0.5+100000j, 1, 4)
(-10407.16081931495861539236 + 13777.78669862804508537384j)
>>> zeta(-100+0.5j, (1,3), derivative=4)
(4.007180821099823942702249e+79 + 4.916117957092593868321778e+78j)
Generating a Taylor series at `s = 2` using derivatives::
>>> for k in range(11): print("%s * (s-2)^%i" % (zeta(2,1,k)/fac(k), k))
...
1.644934066848226436472415 * (s-2)^0
-0.9375482543158437537025741 * (s-2)^1
0.9946401171494505117104293 * (s-2)^2
-1.000024300473840810940657 * (s-2)^3
1.000061933072352565457512 * (s-2)^4
-1.000006869443931806408941 * (s-2)^5
1.000000173233769531820592 * (s-2)^6
-0.9999999569989868493432399 * (s-2)^7
0.9999999937218844508684206 * (s-2)^8
-0.9999999996355013916608284 * (s-2)^9
1.000000000004610645020747 * (s-2)^10
Evaluation at zero and for negative integer `s`::
>>> zeta(0, 10)
-9.5
>>> zeta(-2, (2,3)); mpf(1)/81
0.01234567901234567901234568
0.01234567901234567901234568
>>> zeta(-3+4j, (5,4))
(0.2899236037682695182085988 + 0.06561206166091757973112783j)
>>> zeta(-3.25, 1/pi)
-0.0005117269627574430494396877
>>> zeta(-3.5, pi, 1)
11.156360390440003294709
>>> zeta(-100.5, (8,3))
-4.68162300487989766727122e+77
>>> zeta(-10.5, (-8,3))
(-0.01521913704446246609237979 + 29907.72510874248161608216j)
>>> zeta(-1000.5, (-8,3))
(1.031911949062334538202567e+1770 + 1.519555750556794218804724e+426j)
>>> zeta(-1+j, 3+4j)
(-16.32988355630802510888631 - 22.17706465801374033261383j)
>>> zeta(-1+j, 3+4j, 2)
(32.48985276392056641594055 - 51.11604466157397267043655j)
>>> diff(lambda s: zeta(s, 3+4j), -1+j, 2)
(32.48985276392056641594055 - 51.11604466157397267043655j)
**References**
1. http://mathworld.wolfram.com/RiemannZetaFunction.html
2. http://mathworld.wolfram.com/HurwitzZetaFunction.html
3. http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P155.pdf
"""
dirichlet = r"""
Evaluates the Dirichlet L-function
.. math ::
L(s,\chi) = \sum_{k=1}^\infty \frac{\chi(k)}{k^s}.
where `\chi` is a periodic sequence of length `q` which should be supplied
in the form of a list `[\chi(0), \chi(1), \ldots, \chi(q-1)]`.
Strictly, `\chi` should be a Dirichlet character, but any periodic
sequence will work.
For example, ``dirichlet(s, [1])`` gives the ordinary
Riemann zeta function and ``dirichlet(s, [-1,1])`` gives
the alternating zeta function (Dirichlet eta function).
Also the derivative with respect to `s` (currently only a first
derivative) can be evaluated.
**Examples**
The ordinary Riemann zeta function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> dirichlet(3, [1]); zeta(3)
1.202056903159594285399738
1.202056903159594285399738
>>> dirichlet(1, [1])
+inf
The alternating zeta function::
>>> dirichlet(1, [-1,1]); ln(2)
0.6931471805599453094172321
0.6931471805599453094172321
The following defines the Dirichlet beta function
`\beta(s) = \sum_{k=0}^\infty \frac{(-1)^k}{(2k+1)^s}` and verifies
several values of this function::
>>> B = lambda s, d=0: dirichlet(s, [0, 1, 0, -1], d)
>>> B(0); 1./2
0.5
0.5
>>> B(1); pi/4
0.7853981633974483096156609
0.7853981633974483096156609
>>> B(2); +catalan
0.9159655941772190150546035
0.9159655941772190150546035
>>> B(2,1); diff(B, 2)
0.08158073611659279510291217
0.08158073611659279510291217
>>> B(-1,1); 2*catalan/pi
0.5831218080616375602767689
0.5831218080616375602767689
>>> B(0,1); log(gamma(0.25)**2/(2*pi*sqrt(2)))
0.3915943927068367764719453
0.3915943927068367764719454
>>> B(1,1); 0.25*pi*(euler+2*ln2+3*ln(pi)-4*ln(gamma(0.25)))
0.1929013167969124293631898
0.1929013167969124293631898
A custom L-series of period 3::
>>> dirichlet(2, [2,0,1])
0.7059715047839078092146831
>>> 2*nsum(lambda k: (3*k)**-2, [1,inf]) + \
... nsum(lambda k: (3*k+2)**-2, [0,inf])
0.7059715047839078092146831
"""
coulombf = r"""
Calculates the regular Coulomb wave function
.. math ::
F_l(\eta,z) = C_l(\eta) z^{l+1} e^{-iz} \,_1F_1(l+1-i\eta, 2l+2, 2iz)
where the normalization constant `C_l(\eta)` is as calculated by
:func:`~mpmath.coulombc`. This function solves the differential equation
.. math ::
f''(z) + \left(1-\frac{2\eta}{z}-\frac{l(l+1)}{z^2}\right) f(z) = 0.
A second linearly independent solution is given by the irregular
Coulomb wave function `G_l(\eta,z)` (see :func:`~mpmath.coulombg`)
and thus the general solution is
`f(z) = C_1 F_l(\eta,z) + C_2 G_l(\eta,z)` for arbitrary
constants `C_1`, `C_2`.
Physically, the Coulomb wave functions give the radial solution
to the Schrodinger equation for a point particle in a `1/z` potential; `z` is
then the radius and `l`, `\eta` are quantum numbers.
The Coulomb wave functions with real parameters are defined
in Abramowitz & Stegun, section 14. However, all parameters are permitted
to be complex in this implementation (see references).
**Plots**
.. literalinclude :: /plots/coulombf.py
.. image :: /plots/coulombf.png
.. literalinclude :: /plots/coulombf_c.py
.. image :: /plots/coulombf_c.png
**Examples**
Evaluation is supported for arbitrary magnitudes of `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> coulombf(2, 1.5, 3.5)
0.4080998961088761187426445
>>> coulombf(-2, 1.5, 3.5)
0.7103040849492536747533465
>>> coulombf(2, 1.5, '1e-10')
4.143324917492256448770769e-33
>>> coulombf(2, 1.5, 1000)
0.4482623140325567050716179
>>> coulombf(2, 1.5, 10**10)
-0.066804196437694360046619
Verifying the differential equation::
>>> l, eta, z = 2, 3, mpf(2.75)
>>> A, B = 1, 2
>>> f = lambda z: A*coulombf(l,eta,z) + B*coulombg(l,eta,z)
>>> chop(diff(f,z,2) + (1-2*eta/z - l*(l+1)/z**2)*f(z))
0.0
A Wronskian relation satisfied by the Coulomb wave functions::
>>> l = 2
>>> eta = 1.5
>>> F = lambda z: coulombf(l,eta,z)
>>> G = lambda z: coulombg(l,eta,z)
>>> for z in [3.5, -1, 2+3j]:
... chop(diff(F,z)*G(z) - F(z)*diff(G,z))
...
1.0
1.0
1.0
Another Wronskian relation::
>>> F = coulombf
>>> G = coulombg
>>> for z in [3.5, -1, 2+3j]:
... chop(F(l-1,eta,z)*G(l,eta,z)-F(l,eta,z)*G(l-1,eta,z) - l/sqrt(l**2+eta**2))
...
0.0
0.0
0.0
An integral identity connecting the regular and irregular wave functions::
>>> l, eta, z = 4+j, 2-j, 5+2j
>>> coulombf(l,eta,z) + j*coulombg(l,eta,z)
(0.7997977752284033239714479 + 0.9294486669502295512503127j)
>>> g = lambda t: exp(-t)*t**(l-j*eta)*(t+2*j*z)**(l+j*eta)
>>> j*exp(-j*z)*z**(-l)/fac(2*l+1)/coulombc(l,eta)*quad(g, [0,inf])
(0.7997977752284033239714479 + 0.9294486669502295512503127j)
Some test case with complex parameters, taken from Michel [2]::
>>> mp.dps = 15
>>> coulombf(1+0.1j, 50+50j, 100.156)
(-1.02107292320897e+15 - 2.83675545731519e+15j)
>>> coulombg(1+0.1j, 50+50j, 100.156)
(2.83675545731519e+15 - 1.02107292320897e+15j)
>>> coulombf(1e-5j, 10+1e-5j, 0.1+1e-6j)
(4.30566371247811e-14 - 9.03347835361657e-19j)
>>> coulombg(1e-5j, 10+1e-5j, 0.1+1e-6j)
(778709182061.134 + 18418936.2660553j)
The following reproduces a table in Abramowitz & Stegun, at twice
the precision::
>>> mp.dps = 10
>>> eta = 2; z = 5
>>> for l in [5, 4, 3, 2, 1, 0]:
... print("%s %s %s" % (l, coulombf(l,eta,z),
... diff(lambda z: coulombf(l,eta,z), z)))
...
5 0.09079533488 0.1042553261
4 0.2148205331 0.2029591779
3 0.4313159311 0.320534053
2 0.7212774133 0.3952408216
1 0.9935056752 0.3708676452
0 1.143337392 0.2937960375
**References**
1. I.J. Thompson & A.R. Barnett, "Coulomb and Bessel Functions of Complex
Arguments and Order", J. Comp. Phys., vol 64, no. 2, June 1986.
2. N. Michel, "Precise Coulomb wave functions for a wide range of
complex `l`, `\eta` and `z`", http://arxiv.org/abs/physics/0702051v1
"""
coulombg = r"""
Calculates the irregular Coulomb wave function
.. math ::
G_l(\eta,z) = \frac{F_l(\eta,z) \cos(\chi) - F_{-l-1}(\eta,z)}{\sin(\chi)}
where `\chi = \sigma_l - \sigma_{-l-1} - (l+1/2) \pi`
and `\sigma_l(\eta) = (\ln \Gamma(1+l+i\eta)-\ln \Gamma(1+l-i\eta))/(2i)`.
See :func:`~mpmath.coulombf` for additional information.
**Plots**
.. literalinclude :: /plots/coulombg.py
.. image :: /plots/coulombg.png
.. literalinclude :: /plots/coulombg_c.py
.. image :: /plots/coulombg_c.png
**Examples**
Evaluation is supported for arbitrary magnitudes of `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> coulombg(-2, 1.5, 3.5)
1.380011900612186346255524
>>> coulombg(2, 1.5, 3.5)
1.919153700722748795245926
>>> coulombg(-2, 1.5, '1e-10')
201126715824.7329115106793
>>> coulombg(-2, 1.5, 1000)
0.1802071520691149410425512
>>> coulombg(-2, 1.5, 10**10)
0.652103020061678070929794
The following reproduces a table in Abramowitz & Stegun,
at twice the precision::
>>> mp.dps = 10
>>> eta = 2; z = 5
>>> for l in [1, 2, 3, 4, 5]:
... print("%s %s %s" % (l, coulombg(l,eta,z),
... -diff(lambda z: coulombg(l,eta,z), z)))
...
1 1.08148276 0.6028279961
2 1.496877075 0.5661803178
3 2.048694714 0.7959909551
4 3.09408669 1.731802374
5 5.629840456 4.549343289
Evaluation close to the singularity at `z = 0`::
>>> mp.dps = 15
>>> coulombg(0,10,1)
3088184933.67358
>>> coulombg(0,10,'1e-10')
5554866000719.8
>>> coulombg(0,10,'1e-100')
5554866221524.1
Evaluation with a half-integer value for `l`::
>>> coulombg(1.5, 1, 10)
0.852320038297334
"""
coulombc = r"""
Gives the normalizing Gamow constant for Coulomb wave functions,
.. math ::
C_l(\eta) = 2^l \exp\left(-\pi \eta/2 + [\ln \Gamma(1+l+i\eta) +
\ln \Gamma(1+l-i\eta)]/2 - \ln \Gamma(2l+2)\right),
where the log gamma function with continuous imaginary part
away from the negative half axis (see :func:`~mpmath.loggamma`) is implied.
This function is used internally for the calculation of
Coulomb wave functions, and automatically cached to make multiple
evaluations with fixed `l`, `\eta` fast.
"""
ellipfun = r"""
Computes any of the Jacobi elliptic functions, defined
in terms of Jacobi theta functions as
.. math ::
\mathrm{sn}(u,m) = \frac{\vartheta_3(0,q)}{\vartheta_2(0,q)}
\frac{\vartheta_1(t,q)}{\vartheta_4(t,q)}
\mathrm{cn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_2(0,q)}
\frac{\vartheta_2(t,q)}{\vartheta_4(t,q)}
\mathrm{dn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_3(0,q)}
\frac{\vartheta_3(t,q)}{\vartheta_4(t,q)},
or more generally computes a ratio of two such functions. Here
`t = u/\vartheta_3(0,q)^2`, and `q = q(m)` denotes the nome (see
:func:`~mpmath.nome`). Optionally, you can specify the nome directly
instead of `m` by passing ``q=<value>``, or you can directly
specify the elliptic parameter `k` with ``k=<value>``.
The first argument should be a two-character string specifying the
function using any combination of ``'s'``, ``'c'``, ``'d'``, ``'n'``. These
letters respectively denote the basic functions
`\mathrm{sn}(u,m)`, `\mathrm{cn}(u,m)`, `\mathrm{dn}(u,m)`, and `1`.
The identifier specifies the ratio of two such functions.
For example, ``'ns'`` identifies the function
.. math ::
\mathrm{ns}(u,m) = \frac{1}{\mathrm{sn}(u,m)}
and ``'cd'`` identifies the function
.. math ::
\mathrm{cd}(u,m) = \frac{\mathrm{cn}(u,m)}{\mathrm{dn}(u,m)}.
If called with only the first argument, a function object
evaluating the chosen function for given arguments is returned.
**Examples**
Basic evaluation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipfun('cd', 3.5, 0.5)
-0.9891101840595543931308394
>>> ellipfun('cd', 3.5, q=0.25)
0.07111979240214668158441418
The sn-function is doubly periodic in the complex plane with periods
`4 K(m)` and `2 i K(1-m)` (see :func:`~mpmath.ellipk`)::
>>> sn = ellipfun('sn')
>>> sn(2, 0.25)
0.9628981775982774425751399
>>> sn(2+4*ellipk(0.25), 0.25)
0.9628981775982774425751399
>>> chop(sn(2+2*j*ellipk(1-0.25), 0.25))
0.9628981775982774425751399
The cn-function is doubly periodic with periods `4 K(m)` and `4 i K(1-m)`::
>>> cn = ellipfun('cn')
>>> cn(2, 0.25)
-0.2698649654510865792581416
>>> cn(2+4*ellipk(0.25), 0.25)
-0.2698649654510865792581416
>>> chop(cn(2+4*j*ellipk(1-0.25), 0.25))
-0.2698649654510865792581416
The dn-function is doubly periodic with periods `2 K(m)` and `4 i K(1-m)`::
>>> dn = ellipfun('dn')
>>> dn(2, 0.25)
0.8764740583123262286931578
>>> dn(2+2*ellipk(0.25), 0.25)
0.8764740583123262286931578
>>> chop(dn(2+4*j*ellipk(1-0.25), 0.25))
0.8764740583123262286931578
"""
jtheta = r"""
Computes the Jacobi theta function `\vartheta_n(z, q)`, where
`n = 1, 2, 3, 4`, defined by the infinite series:
.. math ::
\vartheta_1(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
(-1)^n q^{n^2+n\,} \sin((2n+1)z)
\vartheta_2(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
q^{n^{2\,} + n} \cos((2n+1)z)
\vartheta_3(z,q) = 1 + 2 \sum_{n=1}^{\infty}
q^{n^2\,} \cos(2 n z)
\vartheta_4(z,q) = 1 + 2 \sum_{n=1}^{\infty}
(-q)^{n^2\,} \cos(2 n z)
The theta functions are functions of two variables:
* `z` is the *argument*, an arbitrary real or complex number
* `q` is the *nome*, which must be a real or complex number
in the unit disk (i.e. `|q| < 1`). For `|q| \ll 1`, the
series converge very quickly, so the Jacobi theta functions
can efficiently be evaluated to high precision.
The compact notations `\vartheta_n(q) = \vartheta_n(0,q)`
and `\vartheta_n = \vartheta_n(0,q)` are also frequently
encountered. Finally, Jacobi theta functions are frequently
considered as functions of the half-period ratio `\tau`
and then usually denoted by `\vartheta_n(z|\tau)`.
Optionally, ``jtheta(n, z, q, derivative=d)`` with `d > 0` computes
a `d`-th derivative with respect to `z`.
**Examples and basic properties**
Considered as functions of `z`, the Jacobi theta functions may be
viewed as generalizations of the ordinary trigonometric functions
cos and sin. They are periodic functions::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> jtheta(1, 0.25, '0.2')
0.2945120798627300045053104
>>> jtheta(1, 0.25 + 2*pi, '0.2')
0.2945120798627300045053104
Indeed, the series defining the theta functions are essentially
trigonometric Fourier series. The coefficients can be retrieved
using :func:`~mpmath.fourier`::
>>> mp.dps = 10
>>> nprint(fourier(lambda x: jtheta(2, x, 0.5), [-pi, pi], 4))
([0.0, 1.68179, 0.0, 0.420448, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0])
The Jacobi theta functions are also so-called quasiperiodic
functions of `z` and `\tau`, meaning that for fixed `\tau`,
`\vartheta_n(z, q)` and `\vartheta_n(z+\pi \tau, q)` are the same
except for an exponential factor::
>>> mp.dps = 25
>>> tau = 3*j/10
>>> q = exp(pi*j*tau)
>>> z = 10
>>> jtheta(4, z+tau*pi, q)
(-0.682420280786034687520568 + 1.526683999721399103332021j)
>>> -exp(-2*j*z)/q * jtheta(4, z, q)
(-0.682420280786034687520568 + 1.526683999721399103332021j)
The Jacobi theta functions satisfy a huge number of other
functional equations, such as the following identity (valid for
any `q`)::
>>> q = mpf(3)/10
>>> jtheta(3,0,q)**4
6.823744089352763305137427
>>> jtheta(2,0,q)**4 + jtheta(4,0,q)**4
6.823744089352763305137427
Extensive listings of identities satisfied by the Jacobi theta
functions can be found in standard reference works.
The Jacobi theta functions are related to the gamma function
for special arguments::
>>> jtheta(3, 0, exp(-pi))
1.086434811213308014575316
>>> pi**(1/4.) / gamma(3/4.)
1.086434811213308014575316
:func:`~mpmath.jtheta` supports arbitrary precision evaluation and complex
arguments::
>>> mp.dps = 50
>>> jtheta(4, sqrt(2), 0.5)
2.0549510717571539127004115835148878097035750653737
>>> mp.dps = 25
>>> jtheta(4, 1+2j, (1+j)/5)
(7.180331760146805926356634 - 1.634292858119162417301683j)
Evaluation of derivatives::
>>> mp.dps = 25
>>> jtheta(1, 7, 0.25, 1); diff(lambda z: jtheta(1, z, 0.25), 7)
1.209857192844475388637236
1.209857192844475388637236
>>> jtheta(1, 7, 0.25, 2); diff(lambda z: jtheta(1, z, 0.25), 7, 2)
-0.2598718791650217206533052
-0.2598718791650217206533052
>>> jtheta(2, 7, 0.25, 1); diff(lambda z: jtheta(2, z, 0.25), 7)
-1.150231437070259644461474
-1.150231437070259644461474
>>> jtheta(2, 7, 0.25, 2); diff(lambda z: jtheta(2, z, 0.25), 7, 2)
-0.6226636990043777445898114
-0.6226636990043777445898114
>>> jtheta(3, 7, 0.25, 1); diff(lambda z: jtheta(3, z, 0.25), 7)
-0.9990312046096634316587882
-0.9990312046096634316587882
>>> jtheta(3, 7, 0.25, 2); diff(lambda z: jtheta(3, z, 0.25), 7, 2)
-0.1530388693066334936151174
-0.1530388693066334936151174
>>> jtheta(4, 7, 0.25, 1); diff(lambda z: jtheta(4, z, 0.25), 7)
0.9820995967262793943571139
0.9820995967262793943571139
>>> jtheta(4, 7, 0.25, 2); diff(lambda z: jtheta(4, z, 0.25), 7, 2)
0.3936902850291437081667755
0.3936902850291437081667755
**Possible issues**
For `|q| \ge 1` or `\Im(\tau) \le 0`, :func:`~mpmath.jtheta` raises
``ValueError``. This exception is also raised for `|q|` extremely
close to 1 (or equivalently `\tau` very close to 0), since the
series would converge too slowly::
>>> jtheta(1, 10, 0.99999999 * exp(0.5*j))
Traceback (most recent call last):
...
ValueError: abs(q) > THETA_Q_LIM = 1.000000
"""
eulernum = r"""
Gives the `n`-th Euler number, defined as the `n`-th derivative of
`\mathrm{sech}(t) = 1/\cosh(t)` evaluated at `t = 0`. Equivalently, the
Euler numbers give the coefficients of the Taylor series
.. math ::
\mathrm{sech}(t) = \sum_{n=0}^{\infty} \frac{E_n}{n!} t^n.
The Euler numbers are closely related to Bernoulli numbers
and Bernoulli polynomials. They can also be evaluated in terms of
Euler polynomials (see :func:`~mpmath.eulerpoly`) as `E_n = 2^n E_n(1/2)`.
**Examples**
Computing the first few Euler numbers and verifying that they
agree with the Taylor series::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> [eulernum(n) for n in range(11)]
[1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
>>> chop(diffs(sech, 0, 10))
[1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
Euler numbers grow very rapidly. :func:`~mpmath.eulernum` efficiently
computes numerical approximations for large indices::
>>> eulernum(50)
-6.053285248188621896314384e+54
>>> eulernum(1000)
3.887561841253070615257336e+2371
>>> eulernum(10**20)
4.346791453661149089338186e+1936958564106659551331
Comparing with an asymptotic formula for the Euler numbers::
>>> n = 10**5
>>> (-1)**(n//2) * 8 * sqrt(n/(2*pi)) * (2*n/(pi*e))**n
3.69919063017432362805663e+436961
>>> eulernum(n)
3.699193712834466537941283e+436961
Pass ``exact=True`` to obtain exact values of Euler numbers as integers::
>>> print(eulernum(50, exact=True))
-6053285248188621896314383785111649088103498225146815121
>>> print(eulernum(200, exact=True) % 10**10)
1925859625
>>> eulernum(1001, exact=True)
0
"""
eulerpoly = r"""
Evaluates the Euler polynomial `E_n(z)`, defined by the generating function
representation
.. math ::
\frac{2e^{zt}}{e^t+1} = \sum_{n=0}^\infty E_n(z) \frac{t^n}{n!}.
The Euler polynomials may also be represented in terms of
Bernoulli polynomials (see :func:`~mpmath.bernpoly`) using various formulas, for
example
.. math ::
E_n(z) = \frac{2}{n+1} \left(
B_n(z)-2^{n+1}B_n\left(\frac{z}{2}\right)
\right).
Special values include the Euler numbers `E_n = 2^n E_n(1/2)` (see
:func:`~mpmath.eulernum`).
**Examples**
Computing the coefficients of the first few Euler polynomials::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> for n in range(6):
... chop(taylor(lambda z: eulerpoly(n,z), 0, n))
...
[1.0]
[-0.5, 1.0]
[0.0, -1.0, 1.0]
[0.25, 0.0, -1.5, 1.0]
[0.0, 1.0, 0.0, -2.0, 1.0]
[-0.5, 0.0, 2.5, 0.0, -2.5, 1.0]
Evaluation for arbitrary `z`::
>>> eulerpoly(2,3)
6.0
>>> eulerpoly(5,4)
423.5
>>> eulerpoly(35, 11111111112)
3.994957561486776072734601e+351
>>> eulerpoly(4, 10+20j)
(-47990.0 - 235980.0j)
>>> eulerpoly(2, '-3.5e-5')
0.000035001225
>>> eulerpoly(3, 0.5)
0.0
>>> eulerpoly(55, -10**80)
-1.0e+4400
>>> eulerpoly(5, -inf)
-inf
>>> eulerpoly(6, -inf)
+inf
Computing Euler numbers::
>>> 2**26 * eulerpoly(26,0.5)
-4087072509293123892361.0
>>> eulernum(26)
-4087072509293123892361.0
Evaluation is accurate for large `n` and small `z`::
>>> eulerpoly(100, 0.5)
2.29047999988194114177943e+108
>>> eulerpoly(1000, 10.5)
3.628120031122876847764566e+2070
>>> eulerpoly(10000, 10.5)
1.149364285543783412210773e+30688
"""
spherharm = r"""
Evaluates the spherical harmonic `Y_l^m(\theta,\phi)`,
.. math ::
Y_l^m(\theta,\phi) = \sqrt{\frac{2l+1}{4\pi}\frac{(l-m)!}{(l+m)!}}
P_l^m(\cos \theta) e^{i m \phi}
where `P_l^m` is an associated Legendre function (see :func:`~mpmath.legenp`).
Here `\theta \in [0, \pi]` denotes the polar coordinate (ranging
from the north pole to the south pole) and `\phi \in [0, 2 \pi]` denotes the
azimuthal coordinate on a sphere. Care should be used since many different
conventions for spherical coordinate variables are used.
Usually spherical harmonics are considered for `l \in \mathbb{N}`,
`m \in \mathbb{Z}`, `|m| \le l`. More generally, `l,m,\theta,\phi`
are permitted to be complex numbers.
.. note ::
:func:`~mpmath.spherharm` returns a complex number, even the value is
purely real.
**Plots**
.. literalinclude :: /plots/spherharm40.py
`Y_{4,0}`:
.. image :: /plots/spherharm40.png
`Y_{4,1}`:
.. image :: /plots/spherharm41.png
`Y_{4,2}`:
.. image :: /plots/spherharm42.png
`Y_{4,3}`:
.. image :: /plots/spherharm43.png
`Y_{4,4}`:
.. image :: /plots/spherharm44.png
**Examples**
Some low-order spherical harmonics with reference values::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> theta = pi/4
>>> phi = pi/3
>>> spherharm(0,0,theta,phi); 0.5*sqrt(1/pi)*expj(0)
(0.2820947917738781434740397 + 0.0j)
(0.2820947917738781434740397 + 0.0j)
>>> spherharm(1,-1,theta,phi); 0.5*sqrt(3/(2*pi))*expj(-phi)*sin(theta)
(0.1221506279757299803965962 - 0.2115710938304086076055298j)
(0.1221506279757299803965962 - 0.2115710938304086076055298j)
>>> spherharm(1,0,theta,phi); 0.5*sqrt(3/pi)*cos(theta)*expj(0)
(0.3454941494713354792652446 + 0.0j)
(0.3454941494713354792652446 + 0.0j)
>>> spherharm(1,1,theta,phi); -0.5*sqrt(3/(2*pi))*expj(phi)*sin(theta)
(-0.1221506279757299803965962 - 0.2115710938304086076055298j)
(-0.1221506279757299803965962 - 0.2115710938304086076055298j)
With the normalization convention used, the spherical harmonics are orthonormal
on the unit sphere::
>>> sphere = [0,pi], [0,2*pi]
>>> dS = lambda t,p: fp.sin(t) # differential element
>>> Y1 = lambda t,p: fp.spherharm(l1,m1,t,p)
>>> Y2 = lambda t,p: fp.conj(fp.spherharm(l2,m2,t,p))
>>> l1 = l2 = 3; m1 = m2 = 2
>>> print(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere))
(1+0j)
>>> m2 = 1 # m1 != m2
>>> print(fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere)))
0.0
Evaluation is accurate for large orders::
>>> spherharm(1000,750,0.5,0.25)
(3.776445785304252879026585e-102 - 5.82441278771834794493484e-102j)
Evaluation works with complex parameter values::
>>> spherharm(1+j, 2j, 2+3j, -0.5j)
(64.44922331113759992154992 + 1981.693919841408089681743j)
"""
scorergi = r"""
Evaluates the Scorer function
.. math ::
\operatorname{Gi}(z) =
\operatorname{Ai}(z) \int_0^z \operatorname{Bi}(t) dt +
\operatorname{Bi}(z) \int_z^{\infty} \operatorname{Ai}(t) dt
which gives a particular solution to the inhomogeneous Airy
differential equation `f''(z) - z f(z) = 1/\pi`. Another
particular solution is given by the Scorer Hi-function
(:func:`~mpmath.scorerhi`). The two functions are related as
`\operatorname{Gi}(z) + \operatorname{Hi}(z) = \operatorname{Bi}(z)`.
**Plots**
.. literalinclude :: /plots/gi.py
.. image :: /plots/gi.png
.. literalinclude :: /plots/gi_c.py
.. image :: /plots/gi_c.png
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> scorergi(0); 1/(power(3,'7/6')*gamma('2/3'))
0.2049755424820002450503075
0.2049755424820002450503075
>>> diff(scorergi, 0); 1/(power(3,'5/6')*gamma('1/3'))
0.1494294524512754526382746
0.1494294524512754526382746
>>> scorergi(+inf); scorergi(-inf)
0.0
0.0
>>> scorergi(1)
0.2352184398104379375986902
>>> scorergi(-1)
-0.1166722172960152826494198
Evaluation for large arguments::
>>> scorergi(10)
0.03189600510067958798062034
>>> scorergi(100)
0.003183105228162961476590531
>>> scorergi(1000000)
0.0000003183098861837906721743873
>>> 1/(pi*1000000)
0.0000003183098861837906715377675
>>> scorergi(-1000)
-0.08358288400262780392338014
>>> scorergi(-100000)
0.02886866118619660226809581
>>> scorergi(50+10j)
(0.0061214102799778578790984 - 0.001224335676457532180747917j)
>>> scorergi(-50-10j)
(5.236047850352252236372551e+29 - 3.08254224233701381482228e+29j)
>>> scorergi(100000j)
(-8.806659285336231052679025e+6474077 + 8.684731303500835514850962e+6474077j)
Verifying the connection between Gi and Hi::
>>> z = 0.25
>>> scorergi(z) + scorerhi(z)
0.7287469039362150078694543
>>> airybi(z)
0.7287469039362150078694543
Verifying the differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(diff(scorergi,z,2) - z*scorergi(z))
...
-0.3183098861837906715377675
-0.3183098861837906715377675
-0.3183098861837906715377675
-0.3183098861837906715377675
Verifying the integral representation::
>>> z = 0.5
>>> scorergi(z)
0.2447210432765581976910539
>>> Ai,Bi = airyai,airybi
>>> Bi(z)*(Ai(inf,-1)-Ai(z,-1)) + Ai(z)*(Bi(z,-1)-Bi(0,-1))
0.2447210432765581976910539
**References**
1. [DLMF]_ section 9.12: Scorer Functions
"""
scorerhi = r"""
Evaluates the second Scorer function
.. math ::
\operatorname{Hi}(z) =
\operatorname{Bi}(z) \int_{-\infty}^z \operatorname{Ai}(t) dt -
\operatorname{Ai}(z) \int_{-\infty}^z \operatorname{Bi}(t) dt
which gives a particular solution to the inhomogeneous Airy
differential equation `f''(z) - z f(z) = 1/\pi`. See also
:func:`~mpmath.scorergi`.
**Plots**
.. literalinclude :: /plots/hi.py
.. image :: /plots/hi.png
.. literalinclude :: /plots/hi_c.py
.. image :: /plots/hi_c.png
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> scorerhi(0); 2/(power(3,'7/6')*gamma('2/3'))
0.4099510849640004901006149
0.4099510849640004901006149
>>> diff(scorerhi,0); 2/(power(3,'5/6')*gamma('1/3'))
0.2988589049025509052765491
0.2988589049025509052765491
>>> scorerhi(+inf); scorerhi(-inf)
+inf
0.0
>>> scorerhi(1)
0.9722051551424333218376886
>>> scorerhi(-1)
0.2206696067929598945381098
Evaluation for large arguments::
>>> scorerhi(10)
455641153.5163291358991077
>>> scorerhi(100)
6.041223996670201399005265e+288
>>> scorerhi(1000000)
7.138269638197858094311122e+289529652
>>> scorerhi(-10)
0.0317685352825022727415011
>>> scorerhi(-100)
0.003183092495767499864680483
>>> scorerhi(100j)
(-6.366197716545672122983857e-9 + 0.003183098861710582761688475j)
>>> scorerhi(50+50j)
(-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
>>> scorerhi(-1000-1000j)
(0.0001591549432510502796565538 - 0.000159154943091895334973109j)
Verifying the differential equation::
>>> for z in [-3.4, 0, 2, 1+2j]:
... chop(diff(scorerhi,z,2) - z*scorerhi(z))
...
0.3183098861837906715377675
0.3183098861837906715377675
0.3183098861837906715377675
0.3183098861837906715377675
Verifying the integral representation::
>>> z = 0.5
>>> scorerhi(z)
0.6095559998265972956089949
>>> Ai,Bi = airyai,airybi
>>> Bi(z)*(Ai(z,-1)-Ai(-inf,-1)) - Ai(z)*(Bi(z,-1)-Bi(-inf,-1))
0.6095559998265972956089949
"""
|
NikNitro/Python-iBeacon-Scan
|
mpmath/function_docs.py
|
Python
|
gpl-3.0
| 275,627
|
[
"Gaussian"
] |
4c7973a9a764ba4b83df9715be93c73ad447015241d8b93a0f930643f6c805c9
|
__author__ = 'Thomas Heavey'
import re
filename = "testg.out"
def findallgeoms(filename):
"""A function that takes a file name and returns a list of
geometries. Works with Gaussian output, haven't checked with
Q-Chem."""
relevantelem = [1,3,4,5]
xyzformat = '{:>2} {: f} {: f} {: f}'
geomregex = re.compile(
r'(?:Standard orientation)' # non-capturing (nc) start of geometry
r'(?:.+?)' # nc geometry header
r'((?:(?:\s+\d+\s+)' # nc atom number
r'(\d+\s+)' # (capturing) atomic number
r'(?:\d+\s+)' # nc atomic type
r'(-?\d+\.\d+\s*){3,3}' # 3 cartesian coordinates (x,y,z)
r')+)' # repeat for at least one atom
r'(?:-)' # nc end at line of dashes
, re.DOTALL)
with open(filename, 'r') as file:
allxyz = []
geoms = geomregex.finditer(file.read())
for geom in geoms:
thisxyz = []
mlgeom = geom.group(1)
for line in mlgeom.split('\n'):
# Ignore blank lines:
if len(line) < 2:
continue
xyzelemstring = [line.split()[i] for i in relevantelem]
xyzelemnum = [float(i) for i in xyzelemstring]
xyzelemnum[0] = int(xyzelemstring[0])
thisxyz.append(xyzformat.format(*xyzelemnum))
allxyz.append(thisxyz)
return(allxyz)
# I don't know if I like this format. It would be reasonable for
# Mathematica, but somewhat odd for Python. I guess for outputting
# it though it won't be terrible because I can just double
# iterate over the nested list, writing lines from the strings.
# I'll need to pick a separator for between geometries maybe but that's
# not a problem. Also with this format, should be easy to count number
# of atoms.
# Still need to have way to just find stationary points
if __name__ == "__main__":
print(findallgeoms(filename))
# Ugly because returned as list of list of strings
|
thompcinnamon/QM-calc-scripts
|
gautools/geomRegex.py
|
Python
|
apache-2.0
| 2,092
|
[
"Gaussian",
"Q-Chem"
] |
60445747983305ecdae6481e59863f93aa6a78b844c1cf3fcfc3aa369ab297db
|
# -*- coding: utf-8 -*-
"""
nsaba.py: (N)euro(s)ynth, (A)llen (B)rain (A)tlas
Methods to analyze genome-scale gene expression data
from the Allen Human Brain Atlas in conjunction with
fMRI activation maps from Neurosynth
Authors: Simon Haxby & Torben Noto
"""
import pickle
import os
import itertools
import collections
import warnings
import numpy as np
import pandas as pd
from scipy import spatial
from scipy import signal
from sklearn.neighbors import RadiusNeighborsRegressor
from nsabatools import not_operational, preprint
class NsabaBase(object):
"""
Contains essential base data structures and methods which derived
Nsaba classes all depend upon.
Fields
------
__aba : Contains panda.Dataframe objects representating the
structure of MicroarrayExpression.CSV, SampleAnnot.csv and Probes.CSV
(default names), as well as numpy.array representing the MNI coordinates
of each location sampled by ABA.
__ns : Contains pandas.DataFrame objects representating Neurosynth's
database.txt and features.txt CSV-style fields.
Methods
-------
aba_load()
ns_load()
ns_load_id_dict()
WARNING: NsabaBase is not meant to instantiated explicitly, only Nsaba should
be publicly interfaced.
"""
_aba = {
'exp_df': None,
'probe_df': None,
'si_df': None,
'mni_coords': None
}
_ns = {
'database_df': None,
'features_df': None,
}
@classmethod
@preprint('This may take a minute or two ...')
def aba_load(cls, aba_path=".", csv_names=None):
"""
Initialization of aba dictionary
Parameters
----------
aba_path : string, optional
Root directory of ABA files (defaultly named) MicroarrayExpression.csv,
SampleAnnot.csv and Probes.txt.
csv_names: tuple-like, optional
Tuple specifying alternative names for MicroarrayExpression.csv, SampleAnnot.txt
and Probes.txt. (NOTE: order affects aba instantiation).
Default = ('MicroarrayExpression.csv', 'SampleAnnot.csv', 'Probes.txt').
"""
if not csv_names:
csv_names = [
'MicroarrayExpression.csv',
'SampleAnnot.csv',
'Probes.csv']
if len(csv_names) != 3:
raise IndexError("'csv_names' must a list of 3 'str' variables")
if not isinstance(aba_path, str):
raise ValueError("'aba_path' must be a str.")
csv_path = os.path.join(aba_path, csv_names[1])
cls._aba['si_df'] = pd.read_csv(csv_path)
print '%s loaded.' % csv_names[1]
csv_path = os.path.join(aba_path, csv_names[0])
cls._aba['exp_df'] = pd.read_csv(csv_path, header=None)
print '%s loaded.' % csv_names[0]
cls._aba['exp_df'].columns = list(
itertools.chain.from_iterable(
[['probe_id'], range(cls._aba['si_df'].shape[0])]))
csv_path = os.path.join(aba_path, csv_names[2])
cls._aba['probe_df'] = pd.read_csv(csv_path)
print '%s loaded.' % csv_names[2]
mni_coords = cls._aba['si_df'].loc[:, 'mni_x':'mni_z'].as_matrix().astype(float)
cls._aba['mni_coords'] = spatial.KDTree(mni_coords)
print "Nsaba.aba['mni_coords'] initialized.\n"
@classmethod
@preprint('This may take a minute or two ...')
def ns_load(cls, ns_path=".", ns_files=None):
"""
Initialization of ns dictionary
Parameters
----------
ns_path : string, optional
Root directory of neurosynth files (defaultly named) database.txt and features.txt.
ns_files : tuple-like, optional
Tuple specifying alternative names for database.txt and features.txt
(NOTE: order affects ns instantiation). Default = ('database.txt', 'features.txt').
"""
if not ns_files:
ns_files = ('database.txt', 'features.txt')
if len(ns_files) != 2:
raise IndexError("'ns_files' must a list of 2 'str' variables")
if not isinstance(ns_path, str):
raise ValueError("'ns_path' must be a str.")
df = pd.read_table(os.path.join(ns_path, ns_files[0]))
cls._ns['database_df'] = df.loc[df.space == 'MNI', ['id', 'x', 'y', 'z']]
print "%s loaded." % ns_files[0]
cls._ns['features_df'] = pd.read_table(os.path.join(ns_path, ns_files[1]))
print "%s loaded." % ns_files[1]
mni_coords = cls._ns['database_df'].loc[:, 'x':'z'].as_matrix().astype(float)
cls._ns['mni_coords'] = spatial.KDTree(mni_coords)
print "Nsaba.ns['mni_coords'] initialized.\n"
@classmethod
@preprint('This may take a minute or two ...')
def ns_load_id_dict(cls):
"""ID dictionary thing needed for doing some NS analyses"""
cls._check_static_members()
cls._ns['id_dict'] = {}
c = 0
for i in cls._ns['database_df'].loc[:, 'id']:
if i not in cls._ns['id_dict']:
cls._ns['id_dict'][i] = [(np.floor(cls._ns['database_df']['x'].iloc[c]),
np.floor(cls._ns['database_df']['y'].iloc[c]),
np.floor(cls._ns['database_df']['z'].iloc[c]))]
c += 1
else:
cls._ns['id_dict'][i].append((np.floor(cls._ns['database_df']['x'].iloc[c]),
np.floor(cls._ns['database_df']['y'].iloc[c]),
np.floor(cls._ns['database_df']['z'].iloc[c])))
c += 1
@classmethod
def _check_static_members(self):
""" Ensures Nsaba class is not instantiated without initalizing NsabaBase.aba and NsabaBase.ns."""
for val in self._aba.itervalues():
if val is None:
raise AttributeError("Unassigned Nsaba 'aba' static variable: see Nsaba.aba_load(path)")
for val in self._ns.itervalues():
if val is None:
raise AttributeError("Unassigned Nsaba 'ns' static variable: see Nsaba.ns_load(path)")
class Nsaba(NsabaBase):
"""
Principal Nsaba class.
Contains methods for data fetching and estimation.
"""
def __init__(self):
"""Nsaba init method; terminates instantiation if Nsaba.ns or Nsaba.aba are not loaded."""
self._check_static_members()
self.ge = {}
self.term = {}
self.ns_weight_f = lambda r: 1. / np.power(r, 2)
self._gaussian_weight_radius = 5
def get_ns_struct(self, key=None):
"""
Returns _ns internal Dictionary or specified sub-structure.
See class documentation for more information.
Parameters
----------
key: string, optional
Name of specified sub-structure of _ns if provided;
else _ns dictionary is returned.
"""
if not key:
return self._ns
else:
try:
return self._ns[key]
except KeyError:
opts = " / ".join(self._ns.keys())
raise KeyError("'key' argument invalid; options are: %s" % opts)
def get_aba_struct(self, key=None):
"""
Returns _aba internal dictionary or specified sub-structure.
See class documentation for more information.
Parameters
----------
key: string, optional
Name of specified sub-structure of _aba if provided;
else _ns dictionary is returned.
"""
if not key:
return self._aba
else:
try:
return self._aba[key]
except KeyError:
opts = " / ".join(self._aba.keys())
raise KeyError("'key' argument invalid; options are: %s" % opts)
def _check_entrez_struct(self, entrez_ids):
"""
Checks if 'entrez_ids' parameter is an non-str iterable; type-checking method.
Raises errors if entrez_ids is not as specified above; ensures that methods and
data structures use 'entrez_ids' are well-behaved.
Parameters
----------
entrez_ids: List-like
list-like structure containing NIH Entrez IDs.
"""
try:
iter(entrez_ids)
except TypeError:
raise TypeError("Invalid parameter form; please contain entrez ids in iterable container")
else:
if isinstance(entrez_ids, str):
raise TypeError("Invalid parameter form; please contain entrez ids in iterable container")
def _check_coords_for_distance_weighting(self, coords, check_radius, check_weights, X, y_mean):
"""
Checks that coords won't break the distance weighting function
"""
valid_inds = []
for coord in xrange(len(coords)):
temp = RadiusNeighborsRegressor(radius=check_radius, weights=check_weights)
temp.fit(X, y_mean)
try:
temp.predict([coords[coord]])
valid_inds.append(coord)
except ZeroDivisionError:
continue
return valid_inds
def _gaussian_weight_function(self, estimation_distances):
"""custom function to weight distance by gaussian smoothing"""
radius = self._gaussian_weight_radius
radius_gaussian = signal.gaussian(radius*2+1, radius/2.0, sym=True)
rad_fit_to_gaussian = radius_gaussian[radius:radius+radius+1]
weights = []
for ele in estimation_distances:
weights.append([rad_fit_to_gaussian[int(rad_i)] for rad_i in ele])
return weights
def estimate_aba_ge(self, entrez_ids, coords=None, **kwargs):
"""
Retrieves, estimates and stores gene expression coefficients in ABA dictionary based on a
a passed list of NIH Entrez IDs.
Parameters
----------
entrez_ids: List-like
list-like structure containing NIH Entrez IDs.
kwargs : dict, optional
OPTIONS:
'rnn_args' : dict
SKLearn RadiusNeighborsRegressor() optional arguments.
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsRegressor.html
for default arguments.
"""
self._check_entrez_struct(entrez_ids)
for entrez_id in entrez_ids:
# Fetch probe IDs for Entrez ID
probe_ids = self._aba['probe_df'].loc[self._aba['probe_df']['entrez_id']
== entrez_id]['probe_id'].tolist()
if len(probe_ids) == 0:
print 'Entrez ID: %s not registered with ABA database' % entrez_id
continue
# Return gene expression on given probes across sampled locations.
ge_df = self._aba['exp_df'].loc[self._aba['exp_df']['probe_id'].isin(probe_ids)]
ge_mat = ge_df.as_matrix().astype(float)[:, 1:]
# Take average gene expression across probes at a given sampled location.
ge_vec = np.mean(ge_mat, axis=0)
self.ge[entrez_id] = {}
for probe in probe_ids:
self.ge[entrez_id][probe] = {}
self.ge[entrez_id]["mean"] = {}
# z scoring method
if 'z_score' in kwargs:
for row in xrange(ge_mat.shape[0]):
ge_mat[row] = (ge_mat[row]-ge_mat[row].mean())/ge_mat[row].std()
ge_vec = (ge_vec-ge_vec.mean())/ge_vec.std()
if coords is None:
for row, probe in enumerate(probe_ids):
self.ge[entrez_id][probe]['GE'] = ge_mat[row]
self.ge[entrez_id]["mean"]['GE'] = ge_vec
self.ge[entrez_id]['coord_type'] = 'ABA'
# Estimate gene expression at custom coordinates
else:
X = self._aba['mni_coords'].data
y_mean = ge_vec
valid_inds = self._check_coords_for_distance_weighting(coords=coords, check_radius=kwargs['rnn_args']['radius'], check_weights='distance', X=X, y_mean=y_mean)
if 'rnn_args' in kwargs:
if 'radius' not in kwargs['rnn_args']:
kwargs['rnn_args']['radius'] = 5
if 'radius' in kwargs['rnn_args']:
if kwargs['rnn_args']['radius'] == 1:
kwargs['weights'] = 'uniform'
if 'weights' not in kwargs['rnn_args']:
kwargs['weights'] = 'uniform'
if 'weights' != 'distance':
self._gaussian_weight_radius = kwargs['rnn_args']['radius']
for row, probe in enumerate(probe_ids):
self.ge[entrez_id][probe]['classifier'] = RadiusNeighborsRegressor(**kwargs['rnn_args'])
self.ge[entrez_id]["mean"]['classifier'] = RadiusNeighborsRegressor(**kwargs['rnn_args'])
else:
for row, probe in enumerate(probe_ids):
self.ge[entrez_id][probe]['classifier'] = RadiusNeighborsRegressor(radius=5, weights='uniform')
self.ge[entrez_id]["mean"]['classifier'] = RadiusNeighborsRegressor(radius=5, weights='uniform')
for row, probe in enumerate(probe_ids):
self.ge[entrez_id][probe]['classifier'].fit(X, ge_mat[row])
self.ge[entrez_id]["mean"]['classifier'].fit(X, y_mean)
if 'store_coords' in kwargs:
if kwargs['store_coords']:
self.ge[entrez_id]['coords'] = coords
if 'coord_type' in kwargs:
self.ge[entrez_id]['coord_type'] = kwargs['coord_type']
else:
self.ge[entrez_id]['coord_type'] = 'Custom'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nan_array = np.empty(len(coords))
nan_array[:] = np.nan
for row, probe in enumerate(probe_ids):
self.ge[entrez_id][probe]["GE"] = nan_array
if len(valid_inds) > 0:
estimations = self.ge[entrez_id][probe]['classifier'].predict([coords[i] for i in valid_inds])
for vi in xrange(len(valid_inds)):
self.ge[entrez_id][probe]["GE"][valid_inds[vi]] = estimations[vi]
self.ge[entrez_id]["mean"]["GE"] = nan_array
if len(valid_inds) > 0:
estimations = self.ge[entrez_id]["mean"]['classifier'].predict([coords[i] for i in valid_inds])
for vi in xrange(len(valid_inds)):
self.ge[entrez_id]["mean"]["GE"][vi] = estimations[vi]
def ge_ratio(self, entrez_ids, coords=None, **kwargs):
"""
Calculates the ratio of gene expression at each ABA sampled MNI coordinate
or custom coordinates.
NOTE: This methods overwrites previously stored gene expression coefficients.
Parameters
----------
entrez_ids: (tuple-like) 2
Entrez IDs of genes whose expression ratio is to be calculated.
kwargs : dict, optional
OPTIONS:
'rnn_args' : dict
SKLearn RadiusNeighborsRegressor() optional arguments.
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsRegressor.html
for default arguments.
Returns
-------
ratio: np.array() [1 x N]
Array of ratios of gene expression at each ABA sampled MNI coordinate,
or at custom MNI coordinates where N is the number of sampled locations
or custom coordinates provided.
"""
if len(entrez_ids) != 2:
raise ValueError("Invalid parameter form: entrez_ids should be in a 2-tuple")
self._check_entrez_struct(entrez_ids)
self.estimate_aba_ge(entrez_ids, coords=coords, **kwargs)
ei1, ei2 = entrez_ids
ratio = self.ge[ei1]["mean"]['GE']/self.ge[ei2]["mean"]['GE']
return ratio
def pickle_ge(self, pkl_file="Nsaba_ABA_ge.pkl", output_dir='.'):
"""
Stores Nsaba.ge as pickle named by 'pkl_file' in directory 'output_dir'.
Parameters
----------
pkl_file: string, optional
Name of pickle file.
output_dir: string, optional
Name of directory the pickle is to be written to;
'/' automatically added via os.path.join.
"""
pickle.dump(self.ge, open(os.path.join(output_dir, pkl_file), 'wb'))
print "%s successfully created" % pkl_file
@preprint('This may take a minute or two...')
def load_ge_pickle(self, pkl_file="Nsaba_ABA_ge.pkl", path='.'):
"""
Loads pickle named by 'pkl_file' in directory 'output_dir' into Nsaba.ge.
Parameters
----------
pkl_file: string, optional
Name of pickle file.
path: string, optional
Path to directory the pickle is written to;
'/' automatically added via os.path.join.
"""
self.ge = pickle.load(open(os.path.join(path, pkl_file), 'rb'))
print "'ge' dictionary successfully loaded"
def pickle_ns(self, pkl_file="Nsaba_NS_act.pkl", output_dir='.'):
"""
Stores Nsaba.term as pickle named by 'pkl_file' in directory 'output_dir'.
Parameters
----------
pkl_file: string, optional
Name of pickle file.
output_dir: string, optional
Name of directory the pickle is to be written to;
'/' automatically added via os.path.join.
"""
pickle.dump(self.term, open(os.path.join(output_dir, pkl_file), 'wb'))
print "%s successfully created" % pkl_file
@preprint('This may take a minute or two ...')
def load_ns_pickle(self, pkl_file="Nsaba_NS_act.pkl", path='.'):
"""
Loads pickle named by 'pkl_file' in directory 'output_dir' into Nsaba.term.
Parameters
----------
pkl_file: string, optional
Name of pickle file.
path: string, optional
Path to directory the pickle is written to;
'/' automatically added via os.path.join.
"""
self.term = pickle.load(open(os.path.join(path, pkl_file), 'rb'))
print "term dictionary successfully loaded"
def is_gene(self, gene):
"""
Parameters
----------
gene: int
Checks whether gene is a registered NIH Entrez ID within ABA.
"""
if isinstance(gene, str):
raise ValueError("%s is a string; please pass as a numeric." % gene)
if gene in self._aba['probe_df']['entrez_id']:
return True
else:
return False
def is_term(self, term):
"""
Parameters
----------
term: string
Checks if this term is in the NS term database.
"""
if term in self._ns['features_df'].columns:
return True
else:
return False
def is_id(self, study_id):
"""
Parameters
----------
study_id: int
Checks if study_id is a registered NS study ID.
"""
if any(self._ns['features_df'].pmid == study_id) or \
any(self._ns['database_df'].id == study_id):
return True
else:
return False
def is_coord(self, coord):
"""
Parameters
----------
term: tuple-like (3)
Checks if an (x,y,z) MNI coordinate matches an NS data point.
"""
if len(coord) == 3 and not isinstance(coord, str):
if self._ns['mni_coords'].query(coord, distance_upper_bound=1)[0] == 0:
return True
else:
return False
else:
raise ValueError("MNI coordinate in improper form; must be 3-tuple-like")
def coord_to_ids(self, coord):
"""
Uses the study dictionary above to find NS study IDs from x,y,z coordinates.
Parameters
----------
coordinate: tuple-like (3)
Checks if an MNI (x,y,z) coordinate matches an NS data point.
Returns
-------
ids: list (int)
NS study IDs that have a data point corresponding to coord.
"""
try:
self._ns['id_dict']
except KeyError:
raise NameError("Study ID dictionary not initialized; see/call NsabaBase.ns_load_id_dict()")
ids = []
if len(coord) == 3 and not isinstance(coord, str):
for i, coords in self._ns['id_dict'].items():
for this_coordinate in coords:
if this_coordinate == tuple(coord):
if i not in ids:
if self.is_id(i):
ids.append(i)
return ids
else:
raise ValueError("Argument form improper; check function documentation.")
def id_to_ns_act(self, study_id):
"""
Returns activations for all terms for a given study.
Parameters
----------
study_id: int
int representing a paper/study in the NS framework.
Return
------
term_vector_off_by_1[1:]: numpy.array [1 x 3406]
Vector of term activations for all terms for a specified NS study.
"""
if self.is_id(study_id):
term_vector_off_by_1 = np.squeeze(self._ns['features_df'].loc[
self._ns['features_df'].pmid == study_id].as_matrix())
# Shifting to remove ID index from vector
return term_vector_off_by_1[1:]
else:
raise ValueError("Invalid NS study ID; check 'study_id' parameter")
def coord_to_ns_act(self, coord, return_type='list'):
"""
-- LEGACY --
Used to support visualize_ns_old(); itself unsupported.
Returns list of terms activations for a MNI coordinate
for all NS terms.
Parameters
----------
coord: tuple-like (3)
Reference MNI coordinate.
return_type: str
OPTIONS:
'dict': See Returns.
'list': Returns list of activations for each term.
OTHER: Raises ValueError.
Returns
-------
terms: dict OR list
A dictionary with (term: activation) key pairs for
the specified MNI coordinate.
"""
ids = self.coord_to_ids(coord)
if len(ids) == 1:
terms = self.id_to_ns_act(ids)
elif len(ids) > 1:
temp = []
for multiple_id in ids:
temp.append(self.id_to_ns_act(multiple_id))
terms = np.mean(temp, 0)
else:
return []
# [1:] to remove 'PMID' column header
if return_type == 'dict':
return {term: act for term, act in zip(self._ns['features_df'].columns[1:], terms)}
elif return_type == 'list':
return terms
else:
raise ValueError("Invalid return_type argument; use 'list' or 'dict'.")
@not_operational
def term_to_id_coords(self, term, no_ids=3):
"""
Returns coordinates associated with studies that have the
greatest term activation.
Parameters
----------
term : string
NS term of interest
no_ids : numeric, optional
Number of studies to return coordinates for.
Returns
-------
coords : list [ PMID_coord_pair (int, list [tuple(1x3)] ) ]
Returns a list of len(no_ids) lists containing a namedtuple:
"PMID_coord_pair". PMID_coord_pair contains two arguments: PMID
and a list of coordinates (in tuple form) for that study.
"""
id_coord_pair = collections.namedtuple("PMID_coord_pair", "pmid coords")
if self.is_term(term):
try:
self._ns['id_dict'][24379394]
except KeyError:
self.ns_load_id_dict()
heat = self._ns['features_df'][term]
sorted_heat_vals = sorted(enumerate(heat), key=lambda x: x[1], reverse=True)[0:no_ids]
inds = zip(*sorted_heat_vals)[0]
pmids = [self._ns['features_df']['pmid'].ix[ind] for ind in inds]
coords = []
for pmid in pmids:
if self.is_id(pmid):
coords.append(id_coord_pair(pmid, self._ns['id_dict'][pmid]))
return coords
else:
raise ValueError("No previous estimation found for '%s'." % term)
def _term_to_coords(self, term, thresh=0):
"""
Finds coordinates associated with a given term above
a specified threshold.
Parameters
----------
term : string
NS term of interest
thresh : numeric
NS activation threshold.
(activations < threshold: are discarded)
Returns
-------
(spatial.KDTree, pandas.DataFrame)
"""
term_ids_act = self._ns['features_df'].loc[self._ns['features_df'][term] >= thresh, ['pmid', term]]
term_ids = term_ids_act['pmid'].tolist()
term_coords = self._ns['database_df'].loc[self._ns['database_df']['id'].isin(term_ids)]
try:
ns_coord_tree = spatial.KDTree(term_coords.loc[:, 'x':'z'].as_matrix().astype(float))
except ValueError:
raise ValueError("No studies with term: '%s' and threshold: %.2f found" % (term, thresh))
else:
term_ids_act.rename(columns={'pmid': 'id'}, inplace=True)
return ns_coord_tree, term_coords.merge(term_ids_act)
def estimate_ns_act(self, term, coords=None, **kwargs):
"""
Uses KNN to estimate Neurosynth term activation (tf-idf) at
specified coordinates. If no coordinates are passed, ABA sampled
locations in corresponding NsabaBase are used.
Parameters
----------
term : str
NS term whose activation is to be estimated
coords : np.array [int], optional
Coordinates where NS term activation is to be estimated.
kwargs : dict, optional
'rnn_args' : dict
SKLearn RadiusNeighborsRegressor() optional arguments.
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsRegressor.html
for default arguments.
"""
if not self.is_term(term):
raise ValueError("'%s' is not a registered term." % term)
self.term[term] = {}
if coords is None:
coords = self._aba['mni_coords'].data
self.term[term]['coord_type'] = 'ABA MNI'
else:
self.term[term]['coords'] = coords
if 'coord_type' in kwargs:
self.term[term]['coord_type'] = kwargs['coord_type']
ns_coord_tree, ns_coord_act_df = self._term_to_coords(term, 0)
if 'rnn_args' in kwargs:
if 'radius' not in kwargs['rnn_args']:
kwargs['rnn_args']['radius'] = 5
self.term[term]['classifier'] = RadiusNeighborsRegressor(**kwargs['rnn_args'])
else:
self.term[term]['classifier'] = RadiusNeighborsRegressor(radius=5)
X = ns_coord_tree.data
y = ns_coord_act_df[term].as_matrix()
self.term[term]['classifier'].fit(X, y)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.term[term]['act'] = self.term[term]['classifier'].predict(coords)
def matrix_builder(self, ns_terms=None, entrez_ids=None):
"""
Generates a np.array() matrix of pre-estimated term activations and gene expression
coefficients.
NOTE: All coefficient vectors must be same size; otherwise ValueError is raised.
Parameters
----------
ns_terms : list-like, optional
List of NS terms whose activations are inserted in the returned matrix.
entrez_ids : list-like, optional
List of Entrez IDs whose corresponding gene expression coefficients
are inserted in the returned matrix.
Returns
-------
np.array( [entrez_ids + ns_terms x vec_len]):
Matrix of term activations and/or gene expression coefficients; coefficient
vectors are stacked horizontally as column vectors.
"""
if entrez_ids is None:
entrez_ids = []
else:
self._check_entrez_struct(entrez_ids)
if not all([key in self.ge for key in entrez_ids]):
raise ValueError()
if ns_terms is not None:
if not all([term in self.term for term in ns_terms]):
raise ValueError()
else:
ns_terms = []
if not entrez_ids == []:
vec_len = len(self.ge[entrez_ids[0]]["mean"]['GE'])
elif not ns_terms == []:
vec_len = len(self.term[ns_terms[0]]['act'])
else:
raise ValueError("ns_terms and entrez_ids parameters both 'None'; "
"at least one be set explicitly.")
matrix = []
for entrez_id in entrez_ids:
if len(self.ge[entrez_id]["mean"]['GE']) == vec_len:
matrix.append(self.ge[entrez_id]["mean"]['GE'])
else:
raise ValueError("Gene expression vector for '%s' size mismatched "
"with base vector. Please ensure that all vectors "
"corresponding to passed Entrez IDs are the same size." % str(entrez_id))
for term in ns_terms:
if len(self.term[term]['act']) == vec_len:
matrix.append(self.term[term]['act'])
else:
raise ValueError("Term activation vector for '%s' size mismatched "
"with base vector. Please ensure that all vectors "
"corresponding to passed Entrez IDs are the same size." % term)
return np.array(matrix).T
|
voytekresearch/nsaba
|
nsaba/nsaba.py
|
Python
|
mit
| 30,776
|
[
"Gaussian"
] |
0d1794911638de37f570d9c8b1897cb938ec02053c379f8f404b63d74c5ca4a4
|
""" JobMonitoringHandler is the implementation of the JobMonitoring service
in the DISET framework
The following methods are available in the Service interface
"""
__RCSID__ = "$Id$"
from types import IntType, LongType, ListType, DictType, StringTypes, NoneType, BooleanType
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC import S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
from DIRAC.WorkloadManagementSystem.Service.JobPolicy import JobPolicy, RIGHT_GET_INFO
import DIRAC.Core.Utilities.Time as Time
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
# These are global instances of the DB classes
gJobDB = False
gJobLoggingDB = False
gTaskQueueDB = False
SUMMARY = ['JobType', 'Site', 'JobName', 'Owner', 'SubmissionTime',
'LastUpdateTime', 'Status', 'MinorStatus', 'ApplicationStatus']
SUMMARY = []
PRIMARY_SUMMARY = []
FINAL_STATES = ['Done', 'Completed', 'Stalled', 'Failed', 'Killed']
def initializeJobMonitoringHandler( serviceInfo ):
global gJobDB, gJobLoggingDB, gTaskQueueDB
gJobDB = JobDB()
gJobLoggingDB = JobLoggingDB()
gTaskQueueDB = TaskQueueDB()
return S_OK()
class JobMonitoringHandler( RequestHandler ):
def initialize( self ):
credDict = self.getRemoteCredentials()
self.ownerDN = credDict['DN']
self.ownerGroup = credDict['group']
operations = Operations( group = self.ownerGroup )
self.globalJobsInfo = operations.getValue( '/Services/JobMonitoring/GlobalJobsInfo', True )
self.jobPolicy = JobPolicy( self.ownerDN, self.ownerGroup, self.globalJobsInfo )
self.jobPolicy.setJobDB( gJobDB )
return S_OK()
##############################################################################
types_getApplicationStates = []
@staticmethod
def export_getApplicationStates ():
""" Return Distinct Values of ApplicationStatus job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'ApplicationStatus' )
##############################################################################
types_getJobTypes = []
@staticmethod
def export_getJobTypes ():
""" Return Distinct Values of JobType job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'JobType' )
##############################################################################
types_getOwners = []
@staticmethod
def export_getOwners ():
"""
Return Distinct Values of Owner job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'Owner' )
##############################################################################
types_getProductionIds = []
@staticmethod
def export_getProductionIds ():
"""
Return Distinct Values of ProductionId job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'JobGroup' )
##############################################################################
types_getJobGroups = []
@staticmethod
def export_getJobGroups( condDict = None, cutDate = None ):
"""
Return Distinct Values of ProductionId job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'JobGroup', condDict,
newer = cutDate )
##############################################################################
types_getSites = []
@staticmethod
def export_getSites ():
"""
Return Distinct Values of Site job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'Site' )
##############################################################################
types_getStates = []
@staticmethod
def export_getStates ():
"""
Return Distinct Values of Status job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'Status' )
##############################################################################
types_getMinorStates = []
@staticmethod
def export_getMinorStates ():
"""
Return Distinct Values of Minor Status job Attribute in WMS
"""
return gJobDB.getDistinctJobAttributes( 'MinorStatus' )
##############################################################################
types_getJobs = []
@staticmethod
def export_getJobs ( attrDict = None, cutDate = None ):
"""
Return list of JobIds matching the condition given in attrDict
"""
# queryDict = {}
# if attrDict:
# if type ( attrDict ) != DictType:
# return S_ERROR( 'Argument must be of Dict Type' )
# for attribute in self.queryAttributes:
# # Only those Attribute in self.queryAttributes can be used
# if attrDict.has_key(attribute):
# queryDict[attribute] = attrDict[attribute]
print attrDict
return gJobDB.selectJobs( attrDict, newer = cutDate )
##############################################################################
types_getCounters = [ ListType ]
@staticmethod
def export_getCounters( attrList, attrDict = None, cutDate = '' ):
"""
Retrieve list of distinct attributes values from attrList
with attrDict as condition.
For each set of distinct values, count number of occurences.
Return a list. Each item is a list with 2 items, the list of distinct
attribute values and the counter
"""
# Check that Attributes in attrList and attrDict, they must be in
# self.queryAttributes.
# for attr in attrList:
# try:
# self.queryAttributes.index(attr)
# except:
# return S_ERROR( 'Requested Attribute not Allowed: %s.' % attr )
#
# for attr in attrDict:
# try:
# self.queryAttributes.index(attr)
# except:
# return S_ERROR( 'Condition Attribute not Allowed: %s.' % attr )
cutDate = str( cutDate )
if not attrDict:
attrDict = {}
return gJobDB.getCounters( 'Jobs', attrList, attrDict, newer = cutDate, timeStamp = 'LastUpdateTime' )
##############################################################################
types_getCurrentJobCounters = [ ]
@staticmethod
def export_getCurrentJobCounters( attrDict = None ):
""" Get job counters per Status with attrDict selection. Final statuses are given for
the last day.
"""
if not attrDict:
attrDict = {}
result = gJobDB.getCounters( 'Jobs', ['Status'], attrDict, timeStamp = 'LastUpdateTime' )
if not result['OK']:
return result
last_update = Time.dateTime() - Time.day
resultDay = gJobDB.getCounters( 'Jobs', ['Status'], attrDict, newer = last_update,
timeStamp = 'LastUpdateTime' )
if not resultDay['OK']:
return resultDay
resultDict = {}
for statusDict, count in result['Value']:
status = statusDict['Status']
resultDict[status] = count
if status in FINAL_STATES:
resultDict[status] = 0
for statusDayDict, ccount in resultDay['Value']:
if status == statusDayDict['Status']:
resultDict[status] = ccount
break
return S_OK( resultDict )
##############################################################################
types_getJobStatus = [ IntType ]
@staticmethod
def export_getJobStatus ( jobID ):
return gJobDB.getJobAttribute( jobID, 'Status' )
##############################################################################
types_getJobOwner = [ IntType ]
@staticmethod
def export_getJobOwner ( jobID ):
return gJobDB.getJobAttribute( jobID, 'Owner' )
##############################################################################
types_getJobSite = [ IntType ]
@staticmethod
def export_getJobSite ( jobID ):
return gJobDB.getJobAttribute( jobID, 'Site' )
##############################################################################
types_getJobJDL = [ IntType, BooleanType ]
@staticmethod
def export_getJobJDL( jobID, original ):
return gJobDB.getJobJDL( jobID, original = original )
##############################################################################
types_getJobLoggingInfo = [ IntType ]
@staticmethod
def export_getJobLoggingInfo( jobID ):
return gJobLoggingDB.getJobLoggingInfo( jobID )
##############################################################################
types_getJobsParameters = [ ListType, ListType ]
@staticmethod
def export_getJobsParameters ( jobIDs, parameters ):
if not ( jobIDs and parameters ) :
return S_OK( {} )
return gJobDB.getAttributesForJobList( jobIDs, parameters )
##############################################################################
types_getJobsStatus = [ ListType ]
@staticmethod
def export_getJobsStatus ( jobIDs ):
if not jobIDs:
return S_OK( {} )
return gJobDB.getAttributesForJobList( jobIDs, ['Status'] )
##############################################################################
types_getJobsMinorStatus = [ ListType ]
@staticmethod
def export_getJobsMinorStatus ( jobIDs ):
return gJobDB.getAttributesForJobList( jobIDs, ['MinorStatus'] )
##############################################################################
types_getJobsApplicationStatus = [ ListType ]
@staticmethod
def export_getJobsApplicationStatus ( jobIDs ):
return gJobDB.getAttributesForJobList( jobIDs, ['ApplicationStatus'] )
##############################################################################
types_getJobsSites = [ ListType ]
@staticmethod
def export_getJobsSites ( jobIDs ):
return gJobDB.getAttributesForJobList( jobIDs, ['Site'] )
##############################################################################
types_getJobSummary = [ IntType ]
@staticmethod
def export_getJobSummary( jobID ):
return gJobDB.getJobAttributes( jobID, SUMMARY )
##############################################################################
types_getJobPrimarySummary = [ IntType ]
@staticmethod
def export_getJobPrimarySummary( jobID ):
return gJobDB.getJobAttributes( jobID, PRIMARY_SUMMARY )
##############################################################################
types_getJobsSummary = [ ListType ]
@staticmethod
def export_getJobsSummary( jobIDs ):
if not jobIDs:
return S_ERROR( 'JobMonitoring.getJobsSummary: Received empty job list' )
result = gJobDB.getAttributesForJobList( jobIDs, SUMMARY )
# return result
restring = str( result['Value'] )
return S_OK( restring )
##############################################################################
types_getJobPageSummaryWeb = [DictType, ListType, IntType, IntType]
def export_getJobPageSummaryWeb( self, selectDict, sortList, startItem, maxItems, selectJobs = True ):
""" Get the summary of the job information for a given page in the
job monitor in a generic format
"""
resultDict = {}
startDate = selectDict.get( 'FromDate', None )
if startDate:
del selectDict['FromDate']
# For backward compatibility
if startDate is None:
startDate = selectDict.get( 'LastUpdate', None )
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get( 'ToDate', None )
if endDate:
del selectDict['ToDate']
result = self.jobPolicy.getControlledUsers( RIGHT_GET_INFO )
if not result['OK']:
return S_ERROR( 'Failed to evaluate user rights' )
if result['Value'] != 'ALL':
selectDict[ ( 'Owner', 'OwnerGroup' ) ] = result['Value']
# Sorting instructions. Only one for the moment.
if sortList:
orderAttribute = sortList[0][0] + ":" + sortList[0][1]
else:
orderAttribute = None
statusDict = {}
result = gJobDB.getCounters( 'Jobs', ['Status'], selectDict,
newer = startDate,
older = endDate,
timeStamp = 'LastUpdateTime' )
nJobs = 0
if result['OK']:
for stDict, count in result['Value']:
nJobs += count
statusDict[stDict['Status']] = count
resultDict['TotalRecords'] = nJobs
if nJobs == 0:
return S_OK( resultDict )
resultDict['Extras'] = statusDict
if selectJobs:
iniJob = startItem
if iniJob >= nJobs:
return S_ERROR( 'Item number out of range' )
result = gJobDB.selectJobs( selectDict, orderAttribute = orderAttribute,
newer = startDate, older = endDate, limit = ( maxItems, iniJob ) )
if not result['OK']:
return S_ERROR( 'Failed to select jobs: ' + result['Message'] )
summaryJobList = result['Value']
if not self.globalJobsInfo:
validJobs, _invalidJobs, _nonauthJobs, _ownJobs = self.jobPolicy.evaluateJobRights( summaryJobList,
RIGHT_GET_INFO )
summaryJobList = validJobs
result = gJobDB.getAttributesForJobList( summaryJobList, SUMMARY )
if not result['OK']:
return S_ERROR( 'Failed to get job summary: ' + result['Message'] )
summaryDict = result['Value']
# Evaluate last sign of life time
for jobID, jobDict in summaryDict.items():
if jobDict['HeartBeatTime'] == 'None':
jobDict['LastSignOfLife'] = jobDict['LastUpdateTime']
else:
lastTime = Time.fromString( jobDict['LastUpdateTime'] )
hbTime = Time.fromString( jobDict['HeartBeatTime'] )
if ( hbTime - lastTime ) > ( lastTime - lastTime ) or jobDict['Status'] == "Stalled":
jobDict['LastSignOfLife'] = jobDict['HeartBeatTime']
else:
jobDict['LastSignOfLife'] = jobDict['LastUpdateTime']
tqDict = {}
result = gTaskQueueDB.getTaskQueueForJobs( summaryJobList )
if result['OK']:
tqDict = result['Value']
# If no jobs can be selected after the properties check
if not summaryDict.keys():
return S_OK( resultDict )
# prepare the standard structure now
key = summaryDict.keys()[0]
paramNames = summaryDict[key].keys()
records = []
for jobID, jobDict in summaryDict.items():
jParList = []
for pname in paramNames:
jParList.append( jobDict[pname] )
jParList.append( tqDict.get( jobID, 0 ) )
records.append( jParList )
resultDict['ParameterNames'] = paramNames + ['TaskQueueID']
resultDict['Records'] = records
return S_OK( resultDict )
##############################################################################
types_getJobStats = [ StringTypes, DictType ]
@staticmethod
def export_getJobStats ( attribute, selectDict ):
""" Get job statistics distribution per attribute value with a given selection
"""
startDate = selectDict.get( 'FromDate', None )
if startDate:
del selectDict['FromDate']
# For backward compatibility
if startDate is None:
startDate = selectDict.get( 'LastUpdate', None )
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get( 'ToDate', None )
if endDate:
del selectDict['ToDate']
result = gJobDB.getCounters( 'Jobs', [attribute], selectDict,
newer = startDate,
older = endDate,
timeStamp = 'LastUpdateTime' )
resultDict = {}
if result['OK']:
for cDict, count in result['Value']:
resultDict[cDict[attribute]] = count
return S_OK( resultDict )
##############################################################################
types_getJobsPrimarySummary = [ ListType ]
@staticmethod
def export_getJobsPrimarySummary ( jobIDs ):
return gJobDB.getAttributesForJobList( jobIDs, PRIMARY_SUMMARY )
##############################################################################
types_getJobParameter = [ list( StringTypes ) + [ IntType, LongType] , StringTypes ]
@staticmethod
def export_getJobParameter( jobID, parName ):
return gJobDB.getJobParameters( jobID, [parName] )
##############################################################################
types_getJobParameters = [ [IntType, LongType] ]
@staticmethod
def export_getJobParameters( jobID ):
return gJobDB.getJobParameters( jobID )
##############################################################################
types_traceJobParameter = [ StringTypes, list( StringTypes ) + [IntType, LongType, ListType],
StringTypes, list( StringTypes ) + [NoneType],
list( StringTypes ) + [ NoneType] ]
@staticmethod
def export_traceJobParameter( site, localID, parameter, date, until ):
return gJobDB.traceJobParameter( site, localID, parameter, date, until )
##############################################################################
types_traceJobParameters = [ StringTypes, list( StringTypes ) + [IntType, LongType, ListType],
[ListType, NoneType], [ListType, NoneType],
list( StringTypes ) + [ NoneType], list( StringTypes ) + [ NoneType] ]
@staticmethod
def export_traceJobParameters( site, localID, parameterList, attributeList, date, until ):
return gJobDB.traceJobParameters( site, localID, parameterList, attributeList, date, until )
##############################################################################
types_getAtticJobParameters = [ [IntType, LongType] ]
@staticmethod
def export_getAtticJobParameters( jobID, parameters = None, rescheduleCycle = -1 ):
if not parameters:
parameters = []
return gJobDB.getAtticJobParameters( jobID, parameters, rescheduleCycle )
##############################################################################
types_getJobAttributes = [ IntType ]
@staticmethod
def export_getJobAttributes( jobID ):
return gJobDB.getJobAttributes( jobID )
##############################################################################
types_getJobAttribute = [ IntType, StringTypes ]
@staticmethod
def export_getJobAttribute( jobID, attribute ):
return gJobDB.getJobAttribute( jobID, attribute )
##############################################################################
types_getSiteSummary = [ ]
@staticmethod
def export_getSiteSummary():
return gJobDB.getSiteSummary()
##############################################################################
types_getJobHeartBeatData = [ IntType ]
@staticmethod
def export_getJobHeartBeatData( jobID ):
return gJobDB.getHeartBeatData( jobID )
##############################################################################
types_getInputData = [ [IntType, LongType] ]
@staticmethod
def export_getInputData( jobID ):
""" Get input data for the specified jobs
"""
return gJobDB.getInputData( jobID )
##############################################################################
types_getOwnerGroup = []
@staticmethod
def export_getOwnerGroup ():
"""
Return Distinct Values of OwnerGroup from the JobsDB
"""
return gJobDB.getDistinctJobAttributes( 'OwnerGroup' )
|
Andrew-McNab-UK/DIRAC
|
WorkloadManagementSystem/Service/JobMonitoringHandler.py
|
Python
|
gpl-3.0
| 19,195
|
[
"DIRAC"
] |
a902e4dd469676701c5f049c7bb00a8c577139cfa4797691f4e4748bd2c80131
|
from datetime import datetime
from django.test import TestCase
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from edc_constants.constants import POS, NEG, UNK
from hiv_status.status import Status
from hiv_status.models import HivResult, Subject, Visit, HivStatusReview
class TestSimple(TestCase):
"""Test status does not change base class if results are passed as strings."""
def setUp(self):
self.subject = Subject.objects.create(subject_identifier='123456789')
def test_returns_status(self):
"""Asserts excepts string instead of model."""
status = Status(subject=self.subject, tested=HivResult)
self.assertIn(status.result, [POS, NEG, UNK, None])
self.assertIn(str(status.result), [POS, NEG, UNK, ''])
self.assertFalse(status.newly_positive)
self.assertFalse(status.subject_aware)
def test_returns_all(self):
"""Asserts excepts string instead of model."""
status = Status(subject=self.subject, tested=POS, documented=NEG, indirect=NEG, verbal=POS)
self.assertEqual(status.tested, POS)
self.assertEqual(status.previous, '')
self.assertEqual(status.documented, NEG)
self.assertEqual(status.indirect, NEG)
self.assertEqual(status.verbal, POS)
self.assertEqual(status.result, POS)
self.assertTrue(status.newly_positive)
self.assertFalse(status.subject_aware)
def test_returns_status1(self):
"""Asserts excepts string instead of model."""
status = Status(subject=self.subject, tested=POS)
self.assertEqual(status.result, POS)
self.assertTrue(status.newly_positive)
self.assertFalse(status.subject_aware)
def test_returns_status2(self):
"""Asserts excepts string instead of model."""
status = Status(subject=self.subject, tested=POS, documented=NEG)
self.assertEqual(status.result, POS)
self.assertEqual(str(status.documented), NEG)
self.assertTrue(status.newly_positive)
self.assertFalse(status.subject_aware)
def test_returns_status3(self):
"""Asserts excepts string instead of model."""
status = Status(subject=self.subject, indirect=POS)
self.assertEqual(status.result, POS)
self.assertFalse(status.newly_positive)
self.assertTrue(status.subject_aware)
def test_returns_status4(self):
"""Asserts excepts string instead of model."""
status = Status(subject=self.subject, indirect=NEG)
self.assertEqual(status.result, '')
self.assertEqual(status.result, None)
self.assertIsNotNone(status.result)
self.assertFalse(status.newly_positive)
self.assertFalse(status.subject_aware)
class TestStatus(TestCase):
def setUp(self):
self.subject = Subject.objects.create(subject_identifier='123456789')
def create_visits(self, count, visit_code=None, base_datetime=None):
visit_code = visit_code or '1000'
base_datetime = base_datetime or timezone.now()
for m in range(0, count):
Visit.objects.create(
subject=self.subject,
visit_code=visit_code,
encounter=m,
visit_datetime=base_datetime - relativedelta(months=m)
)
def test_previous(self):
self.create_visits(3)
for visit in Visit.objects.order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
hiv_result = HivResult.objects.all().latest()
hiv_result.result_value = POS
hiv_result.save()
status = Status(subject=self.subject, tested=HivResult)
self.assertEqual(status.previous, NEG)
self.assertEqual(status.documented, NEG)
def test_previous2(self):
self.create_visits(3)
for visit in Visit.objects.order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
hiv_result = HivResult.objects.all().latest()
hiv_result.result_value = POS
hiv_result.save()
status = Status(subject=self.subject, tested=HivResult, documented=POS)
self.assertEqual(status.previous, NEG)
self.assertEqual(status.documented, POS)
def test_previous3(self):
self.create_visits(3)
for visit in Visit.objects.order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
hiv_result = HivResult.objects.all().latest()
hiv_result.result_value = POS
hiv_result.save()
status = Status(subject=self.subject, tested=HivResult, documented=HivStatusReview)
self.assertEqual(status.previous, NEG)
self.assertEqual(status.documented, NEG)
def test_previous4(self):
self.create_visits(3)
for visit in Visit.objects.order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
hiv_result = HivResult.objects.all().latest()
hiv_result.result_value = POS
hiv_result.save()
visit = Visit.objects.all().order_by('visit_datetime')[1]
d = visit.visit_datetime
HivStatusReview.objects.create(
visit=visit,
documented_result=POS,
documented_result_date=datetime(d.year, d.month, d.day)
)
status = Status(subject=self.subject, tested=HivResult, documented=HivStatusReview)
self.assertEqual(status.previous, NEG)
self.assertEqual(status.documented, POS)
def test_prefers_pos_result(self):
"""Returns the most recent result -- which is POS."""
self.create_visits(3)
for visit in Visit.objects.order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
hiv_result = HivResult.objects.all().latest()
hiv_result.result_value = POS
hiv_result.save()
status = Status(subject=self.subject, tested=HivResult)
self.assertEqual(status, POS)
self.assertTrue(status.newly_positive)
self.assertFalse(status.subject_aware)
def test_prefers_pos_result2(self):
"""Returns the second most recent result -- which is POS."""
self.create_visits(3)
for visit in Visit.objects.all().order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
hiv_result = HivResult.objects.all().order_by('result_datetime')[1]
hiv_result.result_value = POS
hiv_result.save()
status = Status(subject=self.subject, tested=HivResult)
self.assertEqual(status, POS)
self.assertTrue(status.newly_positive) # relative to this result
self.assertFalse(status.subject_aware)
def test_first_pos_result(self):
"""Returns the second most recent result -- which is POS."""
self.create_visits(6)
for visit in Visit.objects.all().order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
hiv_result = HivResult.objects.all().order_by('result_datetime')[1]
hiv_result.result_value = POS
hiv_result.save()
hiv_result = HivResult.objects.all().order_by('result_datetime')[5]
hiv_result.result_value = POS
hiv_result.save()
status = Status(subject=self.subject, tested=HivResult)
self.assertEqual(status, POS)
self.assertEqual(status.result.visit, hiv_result.visit)
self.assertEqual(status.previous, POS)
self.assertFalse(status.newly_positive)
self.assertTrue(status.subject_aware)
def test_status_as_of_visit_code(self):
self.create_visits(3, visit_code='1000', base_datetime=timezone.now() - relativedelta(years=2))
self.create_visits(3, visit_code='2000', base_datetime=timezone.now() - relativedelta(years=1))
for visit in Visit.objects.all().order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
status = Status(subject=self.subject, tested=HivResult, visit_code='2000', result_list=[POS, NEG])
self.assertEqual(status, None)
status = Status(subject=self.subject, tested=HivResult, visit_code='2000', result_list=[NEG])
self.assertEqual(status, NEG)
hiv_result = HivResult.objects.filter(visit__visit_code='2000').order_by('result_datetime')[1]
hiv_result.result_value = POS
hiv_result.save()
status = Status(subject=self.subject, tested=HivResult, visit_code='1000', result_list=[POS, NEG])
self.assertEqual(status, None)
status = Status(subject=self.subject, tested=HivResult, visit_code='1000', result_list=[NEG])
self.assertEqual(status, NEG)
status = Status(subject=self.subject, tested=HivResult, visit_code='2000', result_list=[POS, NEG])
self.assertEqual(status, POS)
self.assertEqual(status.result.visit, hiv_result.visit)
def test_result_as_of_visit_code_and_encounter(self):
self.create_visits(3, visit_code='1000', base_datetime=timezone.now() - relativedelta(years=2))
self.create_visits(3, visit_code='2000', base_datetime=timezone.now() - relativedelta(years=1))
for visit in Visit.objects.all().order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
hiv_result = HivResult.objects.get(visit__visit_code='2000', visit__encounter=1)
hiv_result.result_value = POS
hiv_result.save()
status = Status(subject=self.subject, tested=HivResult, visit_code='2000', encounter=1)
self.assertEqual(status, POS)
self.assertEqual(status.result.visit, hiv_result.visit)
def test_result_as_of_visit_code_and_encounter2(self):
self.create_visits(3, visit_code='1000', base_datetime=timezone.now() - relativedelta(years=2))
self.create_visits(3, visit_code='2000', base_datetime=timezone.now() - relativedelta(years=1))
for visit in Visit.objects.all().order_by('visit_datetime'):
HivResult.objects.create(
visit=visit,
result_value=NEG,
result_datetime=visit.visit_datetime)
hiv_result = HivResult.objects.get(visit__visit_code='1000', visit__encounter=1)
hiv_result.result_value = POS
hiv_result.save()
status = Status(subject=self.subject, tested=HivResult, visit_code='2000', encounter=1)
self.assertEqual(status, None)
def test_result_no_result(self):
self.create_visits(3, visit_code='1000', base_datetime=timezone.now() - relativedelta(years=2))
status = Status(subject=self.subject, tested=HivResult)
self.assertEquals(status.result.result_value, '')
self.assertEquals(status.result, None)
self.assertFalse(status.newly_positive)
self.assertFalse(status.subject_aware)
def test_result_wrapper(self):
status = Status(subject=self.subject, tested=None)
self.assertEqual(status.tested.result_value, '')
self.assertEqual(status.documented.result_value, '')
self.assertEqual(status.indirect.result_value, '')
self.assertEqual(status.verbal.result_value, '')
def test_tested_neg_documented_pos(self):
status = Status(subject=self.subject, tested=NEG, documented=POS)
self.assertEqual(status, NEG)
self.assertFalse(status.newly_positive)
self.assertFalse(status.subject_aware)
def test_tested_none_documented_pos(self):
status = Status(subject=self.subject, tested=None, documented=NEG)
self.assertEqual(status, None)
self.assertFalse(status.newly_positive)
self.assertFalse(status.subject_aware)
def test_indirect_none(self):
status = Status(subject=self.subject, indirect=None)
self.assertEqual(status, None)
self.assertFalse(status.newly_positive)
self.assertFalse(status.subject_aware)
def test_indirect_pos(self):
status = Status(subject=self.subject, indirect=POS)
self.assertEqual(status, POS)
self.assertFalse(status.newly_positive)
self.assertTrue(status.subject_aware)
def test_indirect_neg(self):
status = Status(subject=self.subject, indirect=NEG)
self.assertEqual(status, None)
def test_subject_aware(self):
status = Status(subject=self.subject, tested=POS)
self.assertFalse(status.subject_aware)
status = Status(subject=self.subject, documented=POS)
self.assertTrue(status.subject_aware)
status = Status(subject=self.subject, tested=POS, documented=NEG)
self.assertFalse(status.subject_aware)
status = Status(subject=self.subject, tested=NEG, documented=NEG)
self.assertTrue(status.subject_aware)
status = Status(subject=self.subject, documented=NEG)
self.assertFalse(status.subject_aware)
status = Status(subject=self.subject, tested=POS, indirect=POS)
self.assertTrue(status.subject_aware)
def test_newly_positive(self):
status = Status(subject=self.subject, tested=POS)
self.assertTrue(status.newly_positive)
status = Status(subject=self.subject, documented=POS)
self.assertFalse(status.newly_positive)
status = Status(subject=self.subject, tested=POS, documented=NEG)
self.assertTrue(status.newly_positive)
# def test_longitudinal(self):
# self.create_visits(3)
# visit = Visit.objects.all()[0]
# status = LongitudinalStatus(subject=self.subject, visit=visit, tested=HivResult, documented=HivStatusReview)
|
botswana-harvard/hiv-status
|
hiv_status/tests/test_status.py
|
Python
|
gpl-2.0
| 14,447
|
[
"VisIt"
] |
6dbda0194c3e1c4bebe2c0b456cc182746f0960b0ebfb4b8e8004939d821fb8e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.